xref: /linux/drivers/nvme/host/core.c (revision 8a8e54625be28a6e675e53d214387fc8ee41fb6e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVM Express device driver
4  * Copyright (c) 2011-2014, Intel Corporation.
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/errno.h>
12 #include <linux/hdreg.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/list_sort.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/pr.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
24 
25 #include "nvme.h"
26 #include "fabrics.h"
27 
28 #define CREATE_TRACE_POINTS
29 #include "trace.h"
30 
31 #define NVME_MINORS		(1U << MINORBITS)
32 
33 unsigned int admin_timeout = 60;
34 module_param(admin_timeout, uint, 0644);
35 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
36 EXPORT_SYMBOL_GPL(admin_timeout);
37 
38 unsigned int nvme_io_timeout = 30;
39 module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
40 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
41 EXPORT_SYMBOL_GPL(nvme_io_timeout);
42 
43 static unsigned char shutdown_timeout = 5;
44 module_param(shutdown_timeout, byte, 0644);
45 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
46 
47 static u8 nvme_max_retries = 5;
48 module_param_named(max_retries, nvme_max_retries, byte, 0644);
49 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
50 
51 static unsigned long default_ps_max_latency_us = 100000;
52 module_param(default_ps_max_latency_us, ulong, 0644);
53 MODULE_PARM_DESC(default_ps_max_latency_us,
54 		 "max power saving latency for new devices; use PM QOS to change per device");
55 
56 static bool force_apst;
57 module_param(force_apst, bool, 0644);
58 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
59 
60 static bool streams;
61 module_param(streams, bool, 0644);
62 MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
63 
64 /*
65  * nvme_wq - hosts nvme related works that are not reset or delete
66  * nvme_reset_wq - hosts nvme reset works
67  * nvme_delete_wq - hosts nvme delete works
68  *
69  * nvme_wq will host works such as scan, aen handling, fw activation,
70  * keep-alive, periodic reconnects etc. nvme_reset_wq
71  * runs reset works which also flush works hosted on nvme_wq for
72  * serialization purposes. nvme_delete_wq host controller deletion
73  * works which flush reset works for serialization.
74  */
75 struct workqueue_struct *nvme_wq;
76 EXPORT_SYMBOL_GPL(nvme_wq);
77 
78 struct workqueue_struct *nvme_reset_wq;
79 EXPORT_SYMBOL_GPL(nvme_reset_wq);
80 
81 struct workqueue_struct *nvme_delete_wq;
82 EXPORT_SYMBOL_GPL(nvme_delete_wq);
83 
84 static LIST_HEAD(nvme_subsystems);
85 static DEFINE_MUTEX(nvme_subsystems_lock);
86 
87 static DEFINE_IDA(nvme_instance_ida);
88 static dev_t nvme_chr_devt;
89 static struct class *nvme_class;
90 static struct class *nvme_subsys_class;
91 
92 static int nvme_revalidate_disk(struct gendisk *disk);
93 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
94 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
95 					   unsigned nsid);
96 
97 static void nvme_set_queue_dying(struct nvme_ns *ns)
98 {
99 	/*
100 	 * Revalidating a dead namespace sets capacity to 0. This will end
101 	 * buffered writers dirtying pages that can't be synced.
102 	 */
103 	if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
104 		return;
105 	blk_set_queue_dying(ns->queue);
106 	/* Forcibly unquiesce queues to avoid blocking dispatch */
107 	blk_mq_unquiesce_queue(ns->queue);
108 	/*
109 	 * Revalidate after unblocking dispatchers that may be holding bd_butex
110 	 */
111 	revalidate_disk(ns->disk);
112 }
113 
114 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
115 {
116 	/*
117 	 * Only new queue scan work when admin and IO queues are both alive
118 	 */
119 	if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
120 		queue_work(nvme_wq, &ctrl->scan_work);
121 }
122 
123 /*
124  * Use this function to proceed with scheduling reset_work for a controller
125  * that had previously been set to the resetting state. This is intended for
126  * code paths that can't be interrupted by other reset attempts. A hot removal
127  * may prevent this from succeeding.
128  */
129 int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
130 {
131 	if (ctrl->state != NVME_CTRL_RESETTING)
132 		return -EBUSY;
133 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
134 		return -EBUSY;
135 	return 0;
136 }
137 EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
138 
139 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
140 {
141 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
142 		return -EBUSY;
143 	if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
144 		return -EBUSY;
145 	return 0;
146 }
147 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
148 
149 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
150 {
151 	int ret;
152 
153 	ret = nvme_reset_ctrl(ctrl);
154 	if (!ret) {
155 		flush_work(&ctrl->reset_work);
156 		if (ctrl->state != NVME_CTRL_LIVE)
157 			ret = -ENETRESET;
158 	}
159 
160 	return ret;
161 }
162 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
163 
164 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
165 {
166 	dev_info(ctrl->device,
167 		 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
168 
169 	flush_work(&ctrl->reset_work);
170 	nvme_stop_ctrl(ctrl);
171 	nvme_remove_namespaces(ctrl);
172 	ctrl->ops->delete_ctrl(ctrl);
173 	nvme_uninit_ctrl(ctrl);
174 }
175 
176 static void nvme_delete_ctrl_work(struct work_struct *work)
177 {
178 	struct nvme_ctrl *ctrl =
179 		container_of(work, struct nvme_ctrl, delete_work);
180 
181 	nvme_do_delete_ctrl(ctrl);
182 }
183 
184 int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
185 {
186 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
187 		return -EBUSY;
188 	if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
189 		return -EBUSY;
190 	return 0;
191 }
192 EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
193 
194 static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
195 {
196 	/*
197 	 * Keep a reference until nvme_do_delete_ctrl() complete,
198 	 * since ->delete_ctrl can free the controller.
199 	 */
200 	nvme_get_ctrl(ctrl);
201 	if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
202 		nvme_do_delete_ctrl(ctrl);
203 	nvme_put_ctrl(ctrl);
204 }
205 
206 static blk_status_t nvme_error_status(u16 status)
207 {
208 	switch (status & 0x7ff) {
209 	case NVME_SC_SUCCESS:
210 		return BLK_STS_OK;
211 	case NVME_SC_CAP_EXCEEDED:
212 		return BLK_STS_NOSPC;
213 	case NVME_SC_LBA_RANGE:
214 	case NVME_SC_CMD_INTERRUPTED:
215 	case NVME_SC_NS_NOT_READY:
216 		return BLK_STS_TARGET;
217 	case NVME_SC_BAD_ATTRIBUTES:
218 	case NVME_SC_ONCS_NOT_SUPPORTED:
219 	case NVME_SC_INVALID_OPCODE:
220 	case NVME_SC_INVALID_FIELD:
221 	case NVME_SC_INVALID_NS:
222 		return BLK_STS_NOTSUPP;
223 	case NVME_SC_WRITE_FAULT:
224 	case NVME_SC_READ_ERROR:
225 	case NVME_SC_UNWRITTEN_BLOCK:
226 	case NVME_SC_ACCESS_DENIED:
227 	case NVME_SC_READ_ONLY:
228 	case NVME_SC_COMPARE_FAILED:
229 		return BLK_STS_MEDIUM;
230 	case NVME_SC_GUARD_CHECK:
231 	case NVME_SC_APPTAG_CHECK:
232 	case NVME_SC_REFTAG_CHECK:
233 	case NVME_SC_INVALID_PI:
234 		return BLK_STS_PROTECTION;
235 	case NVME_SC_RESERVATION_CONFLICT:
236 		return BLK_STS_NEXUS;
237 	case NVME_SC_HOST_PATH_ERROR:
238 		return BLK_STS_TRANSPORT;
239 	default:
240 		return BLK_STS_IOERR;
241 	}
242 }
243 
244 static inline bool nvme_req_needs_retry(struct request *req)
245 {
246 	if (blk_noretry_request(req))
247 		return false;
248 	if (nvme_req(req)->status & NVME_SC_DNR)
249 		return false;
250 	if (nvme_req(req)->retries >= nvme_max_retries)
251 		return false;
252 	return true;
253 }
254 
255 static void nvme_retry_req(struct request *req)
256 {
257 	struct nvme_ns *ns = req->q->queuedata;
258 	unsigned long delay = 0;
259 	u16 crd;
260 
261 	/* The mask and shift result must be <= 3 */
262 	crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
263 	if (ns && crd)
264 		delay = ns->ctrl->crdt[crd - 1] * 100;
265 
266 	nvme_req(req)->retries++;
267 	blk_mq_requeue_request(req, false);
268 	blk_mq_delay_kick_requeue_list(req->q, delay);
269 }
270 
271 void nvme_complete_rq(struct request *req)
272 {
273 	blk_status_t status = nvme_error_status(nvme_req(req)->status);
274 
275 	trace_nvme_complete_rq(req);
276 
277 	nvme_cleanup_cmd(req);
278 
279 	if (nvme_req(req)->ctrl->kas)
280 		nvme_req(req)->ctrl->comp_seen = true;
281 
282 	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
283 		if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
284 			return;
285 
286 		if (!blk_queue_dying(req->q)) {
287 			nvme_retry_req(req);
288 			return;
289 		}
290 	}
291 
292 	nvme_trace_bio_complete(req, status);
293 	blk_mq_end_request(req, status);
294 }
295 EXPORT_SYMBOL_GPL(nvme_complete_rq);
296 
297 bool nvme_cancel_request(struct request *req, void *data, bool reserved)
298 {
299 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
300 				"Cancelling I/O %d", req->tag);
301 
302 	/* don't abort one completed request */
303 	if (blk_mq_request_completed(req))
304 		return true;
305 
306 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
307 	blk_mq_force_complete_rq(req);
308 	return true;
309 }
310 EXPORT_SYMBOL_GPL(nvme_cancel_request);
311 
312 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
313 		enum nvme_ctrl_state new_state)
314 {
315 	enum nvme_ctrl_state old_state;
316 	unsigned long flags;
317 	bool changed = false;
318 
319 	spin_lock_irqsave(&ctrl->lock, flags);
320 
321 	old_state = ctrl->state;
322 	switch (new_state) {
323 	case NVME_CTRL_LIVE:
324 		switch (old_state) {
325 		case NVME_CTRL_NEW:
326 		case NVME_CTRL_RESETTING:
327 		case NVME_CTRL_CONNECTING:
328 			changed = true;
329 			/* FALLTHRU */
330 		default:
331 			break;
332 		}
333 		break;
334 	case NVME_CTRL_RESETTING:
335 		switch (old_state) {
336 		case NVME_CTRL_NEW:
337 		case NVME_CTRL_LIVE:
338 			changed = true;
339 			/* FALLTHRU */
340 		default:
341 			break;
342 		}
343 		break;
344 	case NVME_CTRL_CONNECTING:
345 		switch (old_state) {
346 		case NVME_CTRL_NEW:
347 		case NVME_CTRL_RESETTING:
348 			changed = true;
349 			/* FALLTHRU */
350 		default:
351 			break;
352 		}
353 		break;
354 	case NVME_CTRL_DELETING:
355 		switch (old_state) {
356 		case NVME_CTRL_LIVE:
357 		case NVME_CTRL_RESETTING:
358 		case NVME_CTRL_CONNECTING:
359 			changed = true;
360 			/* FALLTHRU */
361 		default:
362 			break;
363 		}
364 		break;
365 	case NVME_CTRL_DEAD:
366 		switch (old_state) {
367 		case NVME_CTRL_DELETING:
368 			changed = true;
369 			/* FALLTHRU */
370 		default:
371 			break;
372 		}
373 		break;
374 	default:
375 		break;
376 	}
377 
378 	if (changed) {
379 		ctrl->state = new_state;
380 		wake_up_all(&ctrl->state_wq);
381 	}
382 
383 	spin_unlock_irqrestore(&ctrl->lock, flags);
384 	if (changed && ctrl->state == NVME_CTRL_LIVE)
385 		nvme_kick_requeue_lists(ctrl);
386 	return changed;
387 }
388 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
389 
390 /*
391  * Returns true for sink states that can't ever transition back to live.
392  */
393 static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
394 {
395 	switch (ctrl->state) {
396 	case NVME_CTRL_NEW:
397 	case NVME_CTRL_LIVE:
398 	case NVME_CTRL_RESETTING:
399 	case NVME_CTRL_CONNECTING:
400 		return false;
401 	case NVME_CTRL_DELETING:
402 	case NVME_CTRL_DEAD:
403 		return true;
404 	default:
405 		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
406 		return true;
407 	}
408 }
409 
410 /*
411  * Waits for the controller state to be resetting, or returns false if it is
412  * not possible to ever transition to that state.
413  */
414 bool nvme_wait_reset(struct nvme_ctrl *ctrl)
415 {
416 	wait_event(ctrl->state_wq,
417 		   nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
418 		   nvme_state_terminal(ctrl));
419 	return ctrl->state == NVME_CTRL_RESETTING;
420 }
421 EXPORT_SYMBOL_GPL(nvme_wait_reset);
422 
423 static void nvme_free_ns_head(struct kref *ref)
424 {
425 	struct nvme_ns_head *head =
426 		container_of(ref, struct nvme_ns_head, ref);
427 
428 	nvme_mpath_remove_disk(head);
429 	ida_simple_remove(&head->subsys->ns_ida, head->instance);
430 	cleanup_srcu_struct(&head->srcu);
431 	nvme_put_subsystem(head->subsys);
432 	kfree(head);
433 }
434 
435 static void nvme_put_ns_head(struct nvme_ns_head *head)
436 {
437 	kref_put(&head->ref, nvme_free_ns_head);
438 }
439 
440 static void nvme_free_ns(struct kref *kref)
441 {
442 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
443 
444 	if (ns->ndev)
445 		nvme_nvm_unregister(ns);
446 
447 	put_disk(ns->disk);
448 	nvme_put_ns_head(ns->head);
449 	nvme_put_ctrl(ns->ctrl);
450 	kfree(ns);
451 }
452 
453 static void nvme_put_ns(struct nvme_ns *ns)
454 {
455 	kref_put(&ns->kref, nvme_free_ns);
456 }
457 
458 static inline void nvme_clear_nvme_request(struct request *req)
459 {
460 	if (!(req->rq_flags & RQF_DONTPREP)) {
461 		nvme_req(req)->retries = 0;
462 		nvme_req(req)->flags = 0;
463 		req->rq_flags |= RQF_DONTPREP;
464 	}
465 }
466 
467 struct request *nvme_alloc_request(struct request_queue *q,
468 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
469 {
470 	unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
471 	struct request *req;
472 
473 	if (qid == NVME_QID_ANY) {
474 		req = blk_mq_alloc_request(q, op, flags);
475 	} else {
476 		req = blk_mq_alloc_request_hctx(q, op, flags,
477 				qid ? qid - 1 : 0);
478 	}
479 	if (IS_ERR(req))
480 		return req;
481 
482 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
483 	nvme_clear_nvme_request(req);
484 	nvme_req(req)->cmd = cmd;
485 
486 	return req;
487 }
488 EXPORT_SYMBOL_GPL(nvme_alloc_request);
489 
490 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
491 {
492 	struct nvme_command c;
493 
494 	memset(&c, 0, sizeof(c));
495 
496 	c.directive.opcode = nvme_admin_directive_send;
497 	c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
498 	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
499 	c.directive.dtype = NVME_DIR_IDENTIFY;
500 	c.directive.tdtype = NVME_DIR_STREAMS;
501 	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
502 
503 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
504 }
505 
506 static int nvme_disable_streams(struct nvme_ctrl *ctrl)
507 {
508 	return nvme_toggle_streams(ctrl, false);
509 }
510 
511 static int nvme_enable_streams(struct nvme_ctrl *ctrl)
512 {
513 	return nvme_toggle_streams(ctrl, true);
514 }
515 
516 static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
517 				  struct streams_directive_params *s, u32 nsid)
518 {
519 	struct nvme_command c;
520 
521 	memset(&c, 0, sizeof(c));
522 	memset(s, 0, sizeof(*s));
523 
524 	c.directive.opcode = nvme_admin_directive_recv;
525 	c.directive.nsid = cpu_to_le32(nsid);
526 	c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s)));
527 	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
528 	c.directive.dtype = NVME_DIR_STREAMS;
529 
530 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
531 }
532 
533 static int nvme_configure_directives(struct nvme_ctrl *ctrl)
534 {
535 	struct streams_directive_params s;
536 	int ret;
537 
538 	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
539 		return 0;
540 	if (!streams)
541 		return 0;
542 
543 	ret = nvme_enable_streams(ctrl);
544 	if (ret)
545 		return ret;
546 
547 	ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
548 	if (ret)
549 		goto out_disable_stream;
550 
551 	ctrl->nssa = le16_to_cpu(s.nssa);
552 	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
553 		dev_info(ctrl->device, "too few streams (%u) available\n",
554 					ctrl->nssa);
555 		goto out_disable_stream;
556 	}
557 
558 	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
559 	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
560 	return 0;
561 
562 out_disable_stream:
563 	nvme_disable_streams(ctrl);
564 	return ret;
565 }
566 
567 /*
568  * Check if 'req' has a write hint associated with it. If it does, assign
569  * a valid namespace stream to the write.
570  */
571 static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
572 				     struct request *req, u16 *control,
573 				     u32 *dsmgmt)
574 {
575 	enum rw_hint streamid = req->write_hint;
576 
577 	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
578 		streamid = 0;
579 	else {
580 		streamid--;
581 		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
582 			return;
583 
584 		*control |= NVME_RW_DTYPE_STREAMS;
585 		*dsmgmt |= streamid << 16;
586 	}
587 
588 	if (streamid < ARRAY_SIZE(req->q->write_hints))
589 		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
590 }
591 
592 static inline void nvme_setup_flush(struct nvme_ns *ns,
593 		struct nvme_command *cmnd)
594 {
595 	cmnd->common.opcode = nvme_cmd_flush;
596 	cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
597 }
598 
599 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
600 		struct nvme_command *cmnd)
601 {
602 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
603 	struct nvme_dsm_range *range;
604 	struct bio *bio;
605 
606 	/*
607 	 * Some devices do not consider the DSM 'Number of Ranges' field when
608 	 * determining how much data to DMA. Always allocate memory for maximum
609 	 * number of segments to prevent device reading beyond end of buffer.
610 	 */
611 	static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
612 
613 	range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
614 	if (!range) {
615 		/*
616 		 * If we fail allocation our range, fallback to the controller
617 		 * discard page. If that's also busy, it's safe to return
618 		 * busy, as we know we can make progress once that's freed.
619 		 */
620 		if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
621 			return BLK_STS_RESOURCE;
622 
623 		range = page_address(ns->ctrl->discard_page);
624 	}
625 
626 	__rq_for_each_bio(bio, req) {
627 		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
628 		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
629 
630 		if (n < segments) {
631 			range[n].cattr = cpu_to_le32(0);
632 			range[n].nlb = cpu_to_le32(nlb);
633 			range[n].slba = cpu_to_le64(slba);
634 		}
635 		n++;
636 	}
637 
638 	if (WARN_ON_ONCE(n != segments)) {
639 		if (virt_to_page(range) == ns->ctrl->discard_page)
640 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
641 		else
642 			kfree(range);
643 		return BLK_STS_IOERR;
644 	}
645 
646 	cmnd->dsm.opcode = nvme_cmd_dsm;
647 	cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
648 	cmnd->dsm.nr = cpu_to_le32(segments - 1);
649 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
650 
651 	req->special_vec.bv_page = virt_to_page(range);
652 	req->special_vec.bv_offset = offset_in_page(range);
653 	req->special_vec.bv_len = alloc_size;
654 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
655 
656 	return BLK_STS_OK;
657 }
658 
659 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
660 		struct request *req, struct nvme_command *cmnd)
661 {
662 	if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
663 		return nvme_setup_discard(ns, req, cmnd);
664 
665 	cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
666 	cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
667 	cmnd->write_zeroes.slba =
668 		cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
669 	cmnd->write_zeroes.length =
670 		cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
671 	cmnd->write_zeroes.control = 0;
672 	return BLK_STS_OK;
673 }
674 
675 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
676 		struct request *req, struct nvme_command *cmnd)
677 {
678 	struct nvme_ctrl *ctrl = ns->ctrl;
679 	u16 control = 0;
680 	u32 dsmgmt = 0;
681 
682 	if (req->cmd_flags & REQ_FUA)
683 		control |= NVME_RW_FUA;
684 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
685 		control |= NVME_RW_LR;
686 
687 	if (req->cmd_flags & REQ_RAHEAD)
688 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
689 
690 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
691 	cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
692 	cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
693 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
694 
695 	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
696 		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
697 
698 	if (ns->ms) {
699 		/*
700 		 * If formated with metadata, the block layer always provides a
701 		 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
702 		 * we enable the PRACT bit for protection information or set the
703 		 * namespace capacity to zero to prevent any I/O.
704 		 */
705 		if (!blk_integrity_rq(req)) {
706 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
707 				return BLK_STS_NOTSUPP;
708 			control |= NVME_RW_PRINFO_PRACT;
709 		}
710 
711 		switch (ns->pi_type) {
712 		case NVME_NS_DPS_PI_TYPE3:
713 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
714 			break;
715 		case NVME_NS_DPS_PI_TYPE1:
716 		case NVME_NS_DPS_PI_TYPE2:
717 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
718 					NVME_RW_PRINFO_PRCHK_REF;
719 			cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
720 			break;
721 		}
722 	}
723 
724 	cmnd->rw.control = cpu_to_le16(control);
725 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
726 	return 0;
727 }
728 
729 void nvme_cleanup_cmd(struct request *req)
730 {
731 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
732 		struct nvme_ns *ns = req->rq_disk->private_data;
733 		struct page *page = req->special_vec.bv_page;
734 
735 		if (page == ns->ctrl->discard_page)
736 			clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
737 		else
738 			kfree(page_address(page) + req->special_vec.bv_offset);
739 	}
740 }
741 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
742 
743 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
744 		struct nvme_command *cmd)
745 {
746 	blk_status_t ret = BLK_STS_OK;
747 
748 	nvme_clear_nvme_request(req);
749 
750 	memset(cmd, 0, sizeof(*cmd));
751 	switch (req_op(req)) {
752 	case REQ_OP_DRV_IN:
753 	case REQ_OP_DRV_OUT:
754 		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
755 		break;
756 	case REQ_OP_FLUSH:
757 		nvme_setup_flush(ns, cmd);
758 		break;
759 	case REQ_OP_WRITE_ZEROES:
760 		ret = nvme_setup_write_zeroes(ns, req, cmd);
761 		break;
762 	case REQ_OP_DISCARD:
763 		ret = nvme_setup_discard(ns, req, cmd);
764 		break;
765 	case REQ_OP_READ:
766 	case REQ_OP_WRITE:
767 		ret = nvme_setup_rw(ns, req, cmd);
768 		break;
769 	default:
770 		WARN_ON_ONCE(1);
771 		return BLK_STS_IOERR;
772 	}
773 
774 	cmd->common.command_id = req->tag;
775 	trace_nvme_setup_cmd(req, cmd);
776 	return ret;
777 }
778 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
779 
780 static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
781 {
782 	struct completion *waiting = rq->end_io_data;
783 
784 	rq->end_io_data = NULL;
785 	complete(waiting);
786 }
787 
788 static void nvme_execute_rq_polled(struct request_queue *q,
789 		struct gendisk *bd_disk, struct request *rq, int at_head)
790 {
791 	DECLARE_COMPLETION_ONSTACK(wait);
792 
793 	WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
794 
795 	rq->cmd_flags |= REQ_HIPRI;
796 	rq->end_io_data = &wait;
797 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
798 
799 	while (!completion_done(&wait)) {
800 		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
801 		cond_resched();
802 	}
803 }
804 
805 /*
806  * Returns 0 on success.  If the result is negative, it's a Linux error code;
807  * if the result is positive, it's an NVM Express status code
808  */
809 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
810 		union nvme_result *result, void *buffer, unsigned bufflen,
811 		unsigned timeout, int qid, int at_head,
812 		blk_mq_req_flags_t flags, bool poll)
813 {
814 	struct request *req;
815 	int ret;
816 
817 	req = nvme_alloc_request(q, cmd, flags, qid);
818 	if (IS_ERR(req))
819 		return PTR_ERR(req);
820 
821 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
822 
823 	if (buffer && bufflen) {
824 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
825 		if (ret)
826 			goto out;
827 	}
828 
829 	if (poll)
830 		nvme_execute_rq_polled(req->q, NULL, req, at_head);
831 	else
832 		blk_execute_rq(req->q, NULL, req, at_head);
833 	if (result)
834 		*result = nvme_req(req)->result;
835 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
836 		ret = -EINTR;
837 	else
838 		ret = nvme_req(req)->status;
839  out:
840 	blk_mq_free_request(req);
841 	return ret;
842 }
843 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
844 
845 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
846 		void *buffer, unsigned bufflen)
847 {
848 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
849 			NVME_QID_ANY, 0, 0, false);
850 }
851 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
852 
853 static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
854 		unsigned len, u32 seed, bool write)
855 {
856 	struct bio_integrity_payload *bip;
857 	int ret = -ENOMEM;
858 	void *buf;
859 
860 	buf = kmalloc(len, GFP_KERNEL);
861 	if (!buf)
862 		goto out;
863 
864 	ret = -EFAULT;
865 	if (write && copy_from_user(buf, ubuf, len))
866 		goto out_free_meta;
867 
868 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
869 	if (IS_ERR(bip)) {
870 		ret = PTR_ERR(bip);
871 		goto out_free_meta;
872 	}
873 
874 	bip->bip_iter.bi_size = len;
875 	bip->bip_iter.bi_sector = seed;
876 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
877 			offset_in_page(buf));
878 	if (ret == len)
879 		return buf;
880 	ret = -ENOMEM;
881 out_free_meta:
882 	kfree(buf);
883 out:
884 	return ERR_PTR(ret);
885 }
886 
887 static int nvme_submit_user_cmd(struct request_queue *q,
888 		struct nvme_command *cmd, void __user *ubuffer,
889 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
890 		u32 meta_seed, u64 *result, unsigned timeout)
891 {
892 	bool write = nvme_is_write(cmd);
893 	struct nvme_ns *ns = q->queuedata;
894 	struct gendisk *disk = ns ? ns->disk : NULL;
895 	struct request *req;
896 	struct bio *bio = NULL;
897 	void *meta = NULL;
898 	int ret;
899 
900 	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
901 	if (IS_ERR(req))
902 		return PTR_ERR(req);
903 
904 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
905 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
906 
907 	if (ubuffer && bufflen) {
908 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
909 				GFP_KERNEL);
910 		if (ret)
911 			goto out;
912 		bio = req->bio;
913 		bio->bi_disk = disk;
914 		if (disk && meta_buffer && meta_len) {
915 			meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
916 					meta_seed, write);
917 			if (IS_ERR(meta)) {
918 				ret = PTR_ERR(meta);
919 				goto out_unmap;
920 			}
921 			req->cmd_flags |= REQ_INTEGRITY;
922 		}
923 	}
924 
925 	blk_execute_rq(req->q, disk, req, 0);
926 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
927 		ret = -EINTR;
928 	else
929 		ret = nvme_req(req)->status;
930 	if (result)
931 		*result = le64_to_cpu(nvme_req(req)->result.u64);
932 	if (meta && !ret && !write) {
933 		if (copy_to_user(meta_buffer, meta, meta_len))
934 			ret = -EFAULT;
935 	}
936 	kfree(meta);
937  out_unmap:
938 	if (bio)
939 		blk_rq_unmap_user(bio);
940  out:
941 	blk_mq_free_request(req);
942 	return ret;
943 }
944 
945 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
946 {
947 	struct nvme_ctrl *ctrl = rq->end_io_data;
948 	unsigned long flags;
949 	bool startka = false;
950 
951 	blk_mq_free_request(rq);
952 
953 	if (status) {
954 		dev_err(ctrl->device,
955 			"failed nvme_keep_alive_end_io error=%d\n",
956 				status);
957 		return;
958 	}
959 
960 	ctrl->comp_seen = false;
961 	spin_lock_irqsave(&ctrl->lock, flags);
962 	if (ctrl->state == NVME_CTRL_LIVE ||
963 	    ctrl->state == NVME_CTRL_CONNECTING)
964 		startka = true;
965 	spin_unlock_irqrestore(&ctrl->lock, flags);
966 	if (startka)
967 		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
968 }
969 
970 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
971 {
972 	struct request *rq;
973 
974 	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
975 			NVME_QID_ANY);
976 	if (IS_ERR(rq))
977 		return PTR_ERR(rq);
978 
979 	rq->timeout = ctrl->kato * HZ;
980 	rq->end_io_data = ctrl;
981 
982 	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
983 
984 	return 0;
985 }
986 
987 static void nvme_keep_alive_work(struct work_struct *work)
988 {
989 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
990 			struct nvme_ctrl, ka_work);
991 	bool comp_seen = ctrl->comp_seen;
992 
993 	if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
994 		dev_dbg(ctrl->device,
995 			"reschedule traffic based keep-alive timer\n");
996 		ctrl->comp_seen = false;
997 		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
998 		return;
999 	}
1000 
1001 	if (nvme_keep_alive(ctrl)) {
1002 		/* allocation failure, reset the controller */
1003 		dev_err(ctrl->device, "keep-alive failed\n");
1004 		nvme_reset_ctrl(ctrl);
1005 		return;
1006 	}
1007 }
1008 
1009 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1010 {
1011 	if (unlikely(ctrl->kato == 0))
1012 		return;
1013 
1014 	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
1015 }
1016 
1017 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1018 {
1019 	if (unlikely(ctrl->kato == 0))
1020 		return;
1021 
1022 	cancel_delayed_work_sync(&ctrl->ka_work);
1023 }
1024 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1025 
1026 /*
1027  * In NVMe 1.0 the CNS field was just a binary controller or namespace
1028  * flag, thus sending any new CNS opcodes has a big chance of not working.
1029  * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1030  * (but not for any later version).
1031  */
1032 static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
1033 {
1034 	if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
1035 		return ctrl->vs < NVME_VS(1, 2, 0);
1036 	return ctrl->vs < NVME_VS(1, 1, 0);
1037 }
1038 
1039 static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1040 {
1041 	struct nvme_command c = { };
1042 	int error;
1043 
1044 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1045 	c.identify.opcode = nvme_admin_identify;
1046 	c.identify.cns = NVME_ID_CNS_CTRL;
1047 
1048 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
1049 	if (!*id)
1050 		return -ENOMEM;
1051 
1052 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1053 			sizeof(struct nvme_id_ctrl));
1054 	if (error)
1055 		kfree(*id);
1056 	return error;
1057 }
1058 
1059 static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1060 		struct nvme_ns_id_desc *cur)
1061 {
1062 	const char *warn_str = "ctrl returned bogus length:";
1063 	void *data = cur;
1064 
1065 	switch (cur->nidt) {
1066 	case NVME_NIDT_EUI64:
1067 		if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1068 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1069 				 warn_str, cur->nidl);
1070 			return -1;
1071 		}
1072 		memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1073 		return NVME_NIDT_EUI64_LEN;
1074 	case NVME_NIDT_NGUID:
1075 		if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1076 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1077 				 warn_str, cur->nidl);
1078 			return -1;
1079 		}
1080 		memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1081 		return NVME_NIDT_NGUID_LEN;
1082 	case NVME_NIDT_UUID:
1083 		if (cur->nidl != NVME_NIDT_UUID_LEN) {
1084 			dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1085 				 warn_str, cur->nidl);
1086 			return -1;
1087 		}
1088 		uuid_copy(&ids->uuid, data + sizeof(*cur));
1089 		return NVME_NIDT_UUID_LEN;
1090 	default:
1091 		/* Skip unknown types */
1092 		return cur->nidl;
1093 	}
1094 }
1095 
1096 static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
1097 		struct nvme_ns_ids *ids)
1098 {
1099 	struct nvme_command c = { };
1100 	int status;
1101 	void *data;
1102 	int pos;
1103 	int len;
1104 
1105 	if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1106 		return 0;
1107 
1108 	c.identify.opcode = nvme_admin_identify;
1109 	c.identify.nsid = cpu_to_le32(nsid);
1110 	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1111 
1112 	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1113 	if (!data)
1114 		return -ENOMEM;
1115 
1116 	status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1117 				      NVME_IDENTIFY_DATA_SIZE);
1118 	if (status) {
1119 		dev_warn(ctrl->device,
1120 			"Identify Descriptors failed (%d)\n", status);
1121 		goto free_data;
1122 	}
1123 
1124 	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1125 		struct nvme_ns_id_desc *cur = data + pos;
1126 
1127 		if (cur->nidl == 0)
1128 			break;
1129 
1130 		len = nvme_process_ns_desc(ctrl, ids, cur);
1131 		if (len < 0)
1132 			goto free_data;
1133 
1134 		len += sizeof(*cur);
1135 	}
1136 free_data:
1137 	kfree(data);
1138 	return status;
1139 }
1140 
1141 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
1142 {
1143 	struct nvme_command c = { };
1144 
1145 	c.identify.opcode = nvme_admin_identify;
1146 	c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
1147 	c.identify.nsid = cpu_to_le32(nsid);
1148 	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
1149 				    NVME_IDENTIFY_DATA_SIZE);
1150 }
1151 
1152 static int nvme_identify_ns(struct nvme_ctrl *ctrl,
1153 		unsigned nsid, struct nvme_id_ns **id)
1154 {
1155 	struct nvme_command c = { };
1156 	int error;
1157 
1158 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1159 	c.identify.opcode = nvme_admin_identify;
1160 	c.identify.nsid = cpu_to_le32(nsid);
1161 	c.identify.cns = NVME_ID_CNS_NS;
1162 
1163 	*id = kmalloc(sizeof(**id), GFP_KERNEL);
1164 	if (!*id)
1165 		return -ENOMEM;
1166 
1167 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1168 	if (error) {
1169 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1170 		kfree(*id);
1171 	}
1172 
1173 	return error;
1174 }
1175 
1176 static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1177 		unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1178 {
1179 	union nvme_result res = { 0 };
1180 	struct nvme_command c;
1181 	int ret;
1182 
1183 	memset(&c, 0, sizeof(c));
1184 	c.features.opcode = op;
1185 	c.features.fid = cpu_to_le32(fid);
1186 	c.features.dword11 = cpu_to_le32(dword11);
1187 
1188 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1189 			buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
1190 	if (ret >= 0 && result)
1191 		*result = le32_to_cpu(res.u32);
1192 	return ret;
1193 }
1194 
1195 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1196 		      unsigned int dword11, void *buffer, size_t buflen,
1197 		      u32 *result)
1198 {
1199 	return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1200 			     buflen, result);
1201 }
1202 EXPORT_SYMBOL_GPL(nvme_set_features);
1203 
1204 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1205 		      unsigned int dword11, void *buffer, size_t buflen,
1206 		      u32 *result)
1207 {
1208 	return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1209 			     buflen, result);
1210 }
1211 EXPORT_SYMBOL_GPL(nvme_get_features);
1212 
1213 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1214 {
1215 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
1216 	u32 result;
1217 	int status, nr_io_queues;
1218 
1219 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1220 			&result);
1221 	if (status < 0)
1222 		return status;
1223 
1224 	/*
1225 	 * Degraded controllers might return an error when setting the queue
1226 	 * count.  We still want to be able to bring them online and offer
1227 	 * access to the admin queue, as that might be only way to fix them up.
1228 	 */
1229 	if (status > 0) {
1230 		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1231 		*count = 0;
1232 	} else {
1233 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1234 		*count = min(*count, nr_io_queues);
1235 	}
1236 
1237 	return 0;
1238 }
1239 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1240 
1241 #define NVME_AEN_SUPPORTED \
1242 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1243 	 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1244 
1245 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1246 {
1247 	u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1248 	int status;
1249 
1250 	if (!supported_aens)
1251 		return;
1252 
1253 	status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1254 			NULL, 0, &result);
1255 	if (status)
1256 		dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1257 			 supported_aens);
1258 
1259 	queue_work(nvme_wq, &ctrl->async_event_work);
1260 }
1261 
1262 /*
1263  * Convert integer values from ioctl structures to user pointers, silently
1264  * ignoring the upper bits in the compat case to match behaviour of 32-bit
1265  * kernels.
1266  */
1267 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
1268 {
1269 	if (in_compat_syscall())
1270 		ptrval = (compat_uptr_t)ptrval;
1271 	return (void __user *)ptrval;
1272 }
1273 
1274 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1275 {
1276 	struct nvme_user_io io;
1277 	struct nvme_command c;
1278 	unsigned length, meta_len;
1279 	void __user *metadata;
1280 
1281 	if (copy_from_user(&io, uio, sizeof(io)))
1282 		return -EFAULT;
1283 	if (io.flags)
1284 		return -EINVAL;
1285 
1286 	switch (io.opcode) {
1287 	case nvme_cmd_write:
1288 	case nvme_cmd_read:
1289 	case nvme_cmd_compare:
1290 		break;
1291 	default:
1292 		return -EINVAL;
1293 	}
1294 
1295 	length = (io.nblocks + 1) << ns->lba_shift;
1296 	meta_len = (io.nblocks + 1) * ns->ms;
1297 	metadata = nvme_to_user_ptr(io.metadata);
1298 
1299 	if (ns->features & NVME_NS_EXT_LBAS) {
1300 		length += meta_len;
1301 		meta_len = 0;
1302 	} else if (meta_len) {
1303 		if ((io.metadata & 3) || !io.metadata)
1304 			return -EINVAL;
1305 	}
1306 
1307 	memset(&c, 0, sizeof(c));
1308 	c.rw.opcode = io.opcode;
1309 	c.rw.flags = io.flags;
1310 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
1311 	c.rw.slba = cpu_to_le64(io.slba);
1312 	c.rw.length = cpu_to_le16(io.nblocks);
1313 	c.rw.control = cpu_to_le16(io.control);
1314 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1315 	c.rw.reftag = cpu_to_le32(io.reftag);
1316 	c.rw.apptag = cpu_to_le16(io.apptag);
1317 	c.rw.appmask = cpu_to_le16(io.appmask);
1318 
1319 	return nvme_submit_user_cmd(ns->queue, &c,
1320 			nvme_to_user_ptr(io.addr), length,
1321 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
1322 }
1323 
1324 static u32 nvme_known_admin_effects(u8 opcode)
1325 {
1326 	switch (opcode) {
1327 	case nvme_admin_format_nvm:
1328 		return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
1329 					NVME_CMD_EFFECTS_CSE_MASK;
1330 	case nvme_admin_sanitize_nvm:
1331 		return NVME_CMD_EFFECTS_CSE_MASK;
1332 	default:
1333 		break;
1334 	}
1335 	return 0;
1336 }
1337 
1338 static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1339 								u8 opcode)
1340 {
1341 	u32 effects = 0;
1342 
1343 	if (ns) {
1344 		if (ctrl->effects)
1345 			effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1346 		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1347 			dev_warn(ctrl->device,
1348 				 "IO command:%02x has unhandled effects:%08x\n",
1349 				 opcode, effects);
1350 		return 0;
1351 	}
1352 
1353 	if (ctrl->effects)
1354 		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1355 	effects |= nvme_known_admin_effects(opcode);
1356 
1357 	/*
1358 	 * For simplicity, IO to all namespaces is quiesced even if the command
1359 	 * effects say only one namespace is affected.
1360 	 */
1361 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1362 		mutex_lock(&ctrl->scan_lock);
1363 		mutex_lock(&ctrl->subsys->lock);
1364 		nvme_mpath_start_freeze(ctrl->subsys);
1365 		nvme_mpath_wait_freeze(ctrl->subsys);
1366 		nvme_start_freeze(ctrl);
1367 		nvme_wait_freeze(ctrl);
1368 	}
1369 	return effects;
1370 }
1371 
1372 static void nvme_update_formats(struct nvme_ctrl *ctrl)
1373 {
1374 	struct nvme_ns *ns;
1375 
1376 	down_read(&ctrl->namespaces_rwsem);
1377 	list_for_each_entry(ns, &ctrl->namespaces, list)
1378 		if (ns->disk && nvme_revalidate_disk(ns->disk))
1379 			nvme_set_queue_dying(ns);
1380 	up_read(&ctrl->namespaces_rwsem);
1381 }
1382 
1383 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1384 {
1385 	/*
1386 	 * Revalidate LBA changes prior to unfreezing. This is necessary to
1387 	 * prevent memory corruption if a logical block size was changed by
1388 	 * this command.
1389 	 */
1390 	if (effects & NVME_CMD_EFFECTS_LBCC)
1391 		nvme_update_formats(ctrl);
1392 	if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1393 		nvme_unfreeze(ctrl);
1394 		nvme_mpath_unfreeze(ctrl->subsys);
1395 		mutex_unlock(&ctrl->subsys->lock);
1396 		nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1397 		mutex_unlock(&ctrl->scan_lock);
1398 	}
1399 	if (effects & NVME_CMD_EFFECTS_CCC)
1400 		nvme_init_identify(ctrl);
1401 	if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1402 		nvme_queue_scan(ctrl);
1403 		flush_work(&ctrl->scan_work);
1404 	}
1405 }
1406 
1407 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1408 			struct nvme_passthru_cmd __user *ucmd)
1409 {
1410 	struct nvme_passthru_cmd cmd;
1411 	struct nvme_command c;
1412 	unsigned timeout = 0;
1413 	u32 effects;
1414 	u64 result;
1415 	int status;
1416 
1417 	if (!capable(CAP_SYS_ADMIN))
1418 		return -EACCES;
1419 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1420 		return -EFAULT;
1421 	if (cmd.flags)
1422 		return -EINVAL;
1423 
1424 	memset(&c, 0, sizeof(c));
1425 	c.common.opcode = cmd.opcode;
1426 	c.common.flags = cmd.flags;
1427 	c.common.nsid = cpu_to_le32(cmd.nsid);
1428 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1429 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1430 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1431 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1432 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1433 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1434 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1435 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1436 
1437 	if (cmd.timeout_ms)
1438 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1439 
1440 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1441 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1442 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
1443 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1444 			0, &result, timeout);
1445 	nvme_passthru_end(ctrl, effects);
1446 
1447 	if (status >= 0) {
1448 		if (put_user(result, &ucmd->result))
1449 			return -EFAULT;
1450 	}
1451 
1452 	return status;
1453 }
1454 
1455 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1456 			struct nvme_passthru_cmd64 __user *ucmd)
1457 {
1458 	struct nvme_passthru_cmd64 cmd;
1459 	struct nvme_command c;
1460 	unsigned timeout = 0;
1461 	u32 effects;
1462 	int status;
1463 
1464 	if (!capable(CAP_SYS_ADMIN))
1465 		return -EACCES;
1466 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1467 		return -EFAULT;
1468 	if (cmd.flags)
1469 		return -EINVAL;
1470 
1471 	memset(&c, 0, sizeof(c));
1472 	c.common.opcode = cmd.opcode;
1473 	c.common.flags = cmd.flags;
1474 	c.common.nsid = cpu_to_le32(cmd.nsid);
1475 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1476 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1477 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1478 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1479 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1480 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1481 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1482 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1483 
1484 	if (cmd.timeout_ms)
1485 		timeout = msecs_to_jiffies(cmd.timeout_ms);
1486 
1487 	effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1488 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1489 			nvme_to_user_ptr(cmd.addr), cmd.data_len,
1490 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
1491 			0, &cmd.result, timeout);
1492 	nvme_passthru_end(ctrl, effects);
1493 
1494 	if (status >= 0) {
1495 		if (put_user(cmd.result, &ucmd->result))
1496 			return -EFAULT;
1497 	}
1498 
1499 	return status;
1500 }
1501 
1502 /*
1503  * Issue ioctl requests on the first available path.  Note that unlike normal
1504  * block layer requests we will not retry failed request on another controller.
1505  */
1506 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
1507 		struct nvme_ns_head **head, int *srcu_idx)
1508 {
1509 #ifdef CONFIG_NVME_MULTIPATH
1510 	if (disk->fops == &nvme_ns_head_ops) {
1511 		struct nvme_ns *ns;
1512 
1513 		*head = disk->private_data;
1514 		*srcu_idx = srcu_read_lock(&(*head)->srcu);
1515 		ns = nvme_find_path(*head);
1516 		if (!ns)
1517 			srcu_read_unlock(&(*head)->srcu, *srcu_idx);
1518 		return ns;
1519 	}
1520 #endif
1521 	*head = NULL;
1522 	*srcu_idx = -1;
1523 	return disk->private_data;
1524 }
1525 
1526 static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1527 {
1528 	if (head)
1529 		srcu_read_unlock(&head->srcu, idx);
1530 }
1531 
1532 static bool is_ctrl_ioctl(unsigned int cmd)
1533 {
1534 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
1535 		return true;
1536 	if (is_sed_ioctl(cmd))
1537 		return true;
1538 	return false;
1539 }
1540 
1541 static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
1542 				  void __user *argp,
1543 				  struct nvme_ns_head *head,
1544 				  int srcu_idx)
1545 {
1546 	struct nvme_ctrl *ctrl = ns->ctrl;
1547 	int ret;
1548 
1549 	nvme_get_ctrl(ns->ctrl);
1550 	nvme_put_ns_from_disk(head, srcu_idx);
1551 
1552 	switch (cmd) {
1553 	case NVME_IOCTL_ADMIN_CMD:
1554 		ret = nvme_user_cmd(ctrl, NULL, argp);
1555 		break;
1556 	case NVME_IOCTL_ADMIN64_CMD:
1557 		ret = nvme_user_cmd64(ctrl, NULL, argp);
1558 		break;
1559 	default:
1560 		ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1561 		break;
1562 	}
1563 	nvme_put_ctrl(ctrl);
1564 	return ret;
1565 }
1566 
1567 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1568 		unsigned int cmd, unsigned long arg)
1569 {
1570 	struct nvme_ns_head *head = NULL;
1571 	void __user *argp = (void __user *)arg;
1572 	struct nvme_ns *ns;
1573 	int srcu_idx, ret;
1574 
1575 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
1576 	if (unlikely(!ns))
1577 		return -EWOULDBLOCK;
1578 
1579 	/*
1580 	 * Handle ioctls that apply to the controller instead of the namespace
1581 	 * seperately and drop the ns SRCU reference early.  This avoids a
1582 	 * deadlock when deleting namespaces using the passthrough interface.
1583 	 */
1584 	if (is_ctrl_ioctl(cmd))
1585 		return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1586 
1587 	switch (cmd) {
1588 	case NVME_IOCTL_ID:
1589 		force_successful_syscall_return();
1590 		ret = ns->head->ns_id;
1591 		break;
1592 	case NVME_IOCTL_IO_CMD:
1593 		ret = nvme_user_cmd(ns->ctrl, ns, argp);
1594 		break;
1595 	case NVME_IOCTL_SUBMIT_IO:
1596 		ret = nvme_submit_io(ns, argp);
1597 		break;
1598 	case NVME_IOCTL_IO64_CMD:
1599 		ret = nvme_user_cmd64(ns->ctrl, ns, argp);
1600 		break;
1601 	default:
1602 		if (ns->ndev)
1603 			ret = nvme_nvm_ioctl(ns, cmd, arg);
1604 		else
1605 			ret = -ENOTTY;
1606 	}
1607 
1608 	nvme_put_ns_from_disk(head, srcu_idx);
1609 	return ret;
1610 }
1611 
1612 #ifdef CONFIG_COMPAT
1613 struct nvme_user_io32 {
1614 	__u8	opcode;
1615 	__u8	flags;
1616 	__u16	control;
1617 	__u16	nblocks;
1618 	__u16	rsvd;
1619 	__u64	metadata;
1620 	__u64	addr;
1621 	__u64	slba;
1622 	__u32	dsmgmt;
1623 	__u32	reftag;
1624 	__u16	apptag;
1625 	__u16	appmask;
1626 } __attribute__((__packed__));
1627 
1628 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
1629 
1630 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
1631 		unsigned int cmd, unsigned long arg)
1632 {
1633 	/*
1634 	 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
1635 	 * between 32 bit programs and 64 bit kernel.
1636 	 * The cause is that the results of sizeof(struct nvme_user_io),
1637 	 * which is used to define NVME_IOCTL_SUBMIT_IO,
1638 	 * are not same between 32 bit compiler and 64 bit compiler.
1639 	 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
1640 	 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
1641 	 * Other IOCTL numbers are same between 32 bit and 64 bit.
1642 	 * So there is nothing to do regarding to other IOCTL numbers.
1643 	 */
1644 	if (cmd == NVME_IOCTL_SUBMIT_IO32)
1645 		return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
1646 
1647 	return nvme_ioctl(bdev, mode, cmd, arg);
1648 }
1649 #else
1650 #define nvme_compat_ioctl	NULL
1651 #endif /* CONFIG_COMPAT */
1652 
1653 static int nvme_open(struct block_device *bdev, fmode_t mode)
1654 {
1655 	struct nvme_ns *ns = bdev->bd_disk->private_data;
1656 
1657 #ifdef CONFIG_NVME_MULTIPATH
1658 	/* should never be called due to GENHD_FL_HIDDEN */
1659 	if (WARN_ON_ONCE(ns->head->disk))
1660 		goto fail;
1661 #endif
1662 	if (!kref_get_unless_zero(&ns->kref))
1663 		goto fail;
1664 	if (!try_module_get(ns->ctrl->ops->module))
1665 		goto fail_put_ns;
1666 
1667 	return 0;
1668 
1669 fail_put_ns:
1670 	nvme_put_ns(ns);
1671 fail:
1672 	return -ENXIO;
1673 }
1674 
1675 static void nvme_release(struct gendisk *disk, fmode_t mode)
1676 {
1677 	struct nvme_ns *ns = disk->private_data;
1678 
1679 	module_put(ns->ctrl->ops->module);
1680 	nvme_put_ns(ns);
1681 }
1682 
1683 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1684 {
1685 	/* some standard values */
1686 	geo->heads = 1 << 6;
1687 	geo->sectors = 1 << 5;
1688 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
1689 	return 0;
1690 }
1691 
1692 #ifdef CONFIG_BLK_DEV_INTEGRITY
1693 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1694 				u32 max_integrity_segments)
1695 {
1696 	struct blk_integrity integrity;
1697 
1698 	memset(&integrity, 0, sizeof(integrity));
1699 	switch (pi_type) {
1700 	case NVME_NS_DPS_PI_TYPE3:
1701 		integrity.profile = &t10_pi_type3_crc;
1702 		integrity.tag_size = sizeof(u16) + sizeof(u32);
1703 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1704 		break;
1705 	case NVME_NS_DPS_PI_TYPE1:
1706 	case NVME_NS_DPS_PI_TYPE2:
1707 		integrity.profile = &t10_pi_type1_crc;
1708 		integrity.tag_size = sizeof(u16);
1709 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1710 		break;
1711 	default:
1712 		integrity.profile = NULL;
1713 		break;
1714 	}
1715 	integrity.tuple_size = ms;
1716 	blk_integrity_register(disk, &integrity);
1717 	blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
1718 }
1719 #else
1720 static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type,
1721 				u32 max_integrity_segments)
1722 {
1723 }
1724 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1725 
1726 static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1727 {
1728 	struct nvme_ctrl *ctrl = ns->ctrl;
1729 	struct request_queue *queue = disk->queue;
1730 	u32 size = queue_logical_block_size(queue);
1731 
1732 	if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
1733 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
1734 		return;
1735 	}
1736 
1737 	if (ctrl->nr_streams && ns->sws && ns->sgs)
1738 		size *= ns->sws * ns->sgs;
1739 
1740 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
1741 			NVME_DSM_MAX_RANGES);
1742 
1743 	queue->limits.discard_alignment = 0;
1744 	queue->limits.discard_granularity = size;
1745 
1746 	/* If discard is already enabled, don't reset queue limits */
1747 	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
1748 		return;
1749 
1750 	blk_queue_max_discard_sectors(queue, UINT_MAX);
1751 	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
1752 
1753 	if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
1754 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1755 }
1756 
1757 static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1758 {
1759 	u64 max_blocks;
1760 
1761 	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1762 	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1763 		return;
1764 	/*
1765 	 * Even though NVMe spec explicitly states that MDTS is not
1766 	 * applicable to the write-zeroes:- "The restriction does not apply to
1767 	 * commands that do not transfer data between the host and the
1768 	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
1769 	 * In order to be more cautious use controller's max_hw_sectors value
1770 	 * to configure the maximum sectors for the write-zeroes which is
1771 	 * configured based on the controller's MDTS field in the
1772 	 * nvme_init_identify() if available.
1773 	 */
1774 	if (ns->ctrl->max_hw_sectors == UINT_MAX)
1775 		max_blocks = (u64)USHRT_MAX + 1;
1776 	else
1777 		max_blocks = ns->ctrl->max_hw_sectors + 1;
1778 
1779 	blk_queue_max_write_zeroes_sectors(disk->queue,
1780 					   nvme_lba_to_sect(ns, max_blocks));
1781 }
1782 
1783 static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
1784 		struct nvme_id_ns *id, struct nvme_ns_ids *ids)
1785 {
1786 	memset(ids, 0, sizeof(*ids));
1787 
1788 	if (ctrl->vs >= NVME_VS(1, 1, 0))
1789 		memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
1790 	if (ctrl->vs >= NVME_VS(1, 2, 0))
1791 		memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
1792 	if (ctrl->vs >= NVME_VS(1, 3, 0))
1793 		return nvme_identify_ns_descs(ctrl, nsid, ids);
1794 	return 0;
1795 }
1796 
1797 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
1798 {
1799 	return !uuid_is_null(&ids->uuid) ||
1800 		memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
1801 		memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
1802 }
1803 
1804 static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1805 {
1806 	return uuid_equal(&a->uuid, &b->uuid) &&
1807 		memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1808 		memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
1809 }
1810 
1811 static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1812 				 u32 *phys_bs, u32 *io_opt)
1813 {
1814 	struct streams_directive_params s;
1815 	int ret;
1816 
1817 	if (!ctrl->nr_streams)
1818 		return 0;
1819 
1820 	ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
1821 	if (ret)
1822 		return ret;
1823 
1824 	ns->sws = le32_to_cpu(s.sws);
1825 	ns->sgs = le16_to_cpu(s.sgs);
1826 
1827 	if (ns->sws) {
1828 		*phys_bs = ns->sws * (1 << ns->lba_shift);
1829 		if (ns->sgs)
1830 			*io_opt = *phys_bs * ns->sgs;
1831 	}
1832 
1833 	return 0;
1834 }
1835 
1836 static void nvme_update_disk_info(struct gendisk *disk,
1837 		struct nvme_ns *ns, struct nvme_id_ns *id)
1838 {
1839 	sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1840 	unsigned short bs = 1 << ns->lba_shift;
1841 	u32 atomic_bs, phys_bs, io_opt = 0;
1842 
1843 	if (ns->lba_shift > PAGE_SHIFT) {
1844 		/* unsupported block size, set capacity to 0 later */
1845 		bs = (1 << 9);
1846 	}
1847 	blk_mq_freeze_queue(disk->queue);
1848 	blk_integrity_unregister(disk);
1849 
1850 	atomic_bs = phys_bs = bs;
1851 	nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt);
1852 	if (id->nabo == 0) {
1853 		/*
1854 		 * Bit 1 indicates whether NAWUPF is defined for this namespace
1855 		 * and whether it should be used instead of AWUPF. If NAWUPF ==
1856 		 * 0 then AWUPF must be used instead.
1857 		 */
1858 		if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
1859 			atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
1860 		else
1861 			atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1862 	}
1863 
1864 	if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
1865 		/* NPWG = Namespace Preferred Write Granularity */
1866 		phys_bs = bs * (1 + le16_to_cpu(id->npwg));
1867 		/* NOWS = Namespace Optimal Write Size */
1868 		io_opt = bs * (1 + le16_to_cpu(id->nows));
1869 	}
1870 
1871 	blk_queue_logical_block_size(disk->queue, bs);
1872 	/*
1873 	 * Linux filesystems assume writing a single physical block is
1874 	 * an atomic operation. Hence limit the physical block size to the
1875 	 * value of the Atomic Write Unit Power Fail parameter.
1876 	 */
1877 	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
1878 	blk_queue_io_min(disk->queue, phys_bs);
1879 	blk_queue_io_opt(disk->queue, io_opt);
1880 
1881 	/*
1882 	 * The block layer can't support LBA sizes larger than the page size
1883 	 * yet, so catch this early and don't allow block I/O.
1884 	 */
1885 	if (ns->lba_shift > PAGE_SHIFT)
1886 		capacity = 0;
1887 
1888 	/*
1889 	 * Register a metadata profile for PI, or the plain non-integrity NVMe
1890 	 * metadata masquerading as Type 0 if supported, otherwise reject block
1891 	 * I/O to namespaces with metadata except when the namespace supports
1892 	 * PI, as it can strip/insert in that case.
1893 	 */
1894 	if (ns->ms) {
1895 		if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1896 		    (ns->features & NVME_NS_METADATA_SUPPORTED))
1897 			nvme_init_integrity(disk, ns->ms, ns->pi_type,
1898 					    ns->ctrl->max_integrity_segments);
1899 		else if (!nvme_ns_has_pi(ns))
1900 			capacity = 0;
1901 	}
1902 
1903 	set_capacity_revalidate_and_notify(disk, capacity, false);
1904 
1905 	nvme_config_discard(disk, ns);
1906 	nvme_config_write_zeroes(disk, ns);
1907 
1908 	if (id->nsattr & NVME_NS_ATTR_RO)
1909 		set_disk_ro(disk, true);
1910 	else
1911 		set_disk_ro(disk, false);
1912 
1913 	blk_mq_unfreeze_queue(disk->queue);
1914 }
1915 
1916 static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1917 {
1918 	struct nvme_ns *ns = disk->private_data;
1919 	struct nvme_ctrl *ctrl = ns->ctrl;
1920 	u32 iob;
1921 
1922 	/*
1923 	 * If identify namespace failed, use default 512 byte block size so
1924 	 * block layer can use before failing read/write for 0 capacity.
1925 	 */
1926 	ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1927 	if (ns->lba_shift == 0)
1928 		ns->lba_shift = 9;
1929 
1930 	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1931 	    is_power_of_2(ctrl->max_hw_sectors))
1932 		iob = ctrl->max_hw_sectors;
1933 	else
1934 		iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1935 
1936 	ns->features = 0;
1937 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1938 	/* the PI implementation requires metadata equal t10 pi tuple size */
1939 	if (ns->ms == sizeof(struct t10_pi_tuple))
1940 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1941 	else
1942 		ns->pi_type = 0;
1943 
1944 	if (ns->ms) {
1945 		/*
1946 		 * For PCIe only the separate metadata pointer is supported,
1947 		 * as the block layer supplies metadata in a separate bio_vec
1948 		 * chain. For Fabrics, only metadata as part of extended data
1949 		 * LBA is supported on the wire per the Fabrics specification,
1950 		 * but the HBA/HCA will do the remapping from the separate
1951 		 * metadata buffers for us.
1952 		 */
1953 		if (id->flbas & NVME_NS_FLBAS_META_EXT) {
1954 			ns->features |= NVME_NS_EXT_LBAS;
1955 			if ((ctrl->ops->flags & NVME_F_FABRICS) &&
1956 			    (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
1957 			    ctrl->max_integrity_segments)
1958 				ns->features |= NVME_NS_METADATA_SUPPORTED;
1959 		} else {
1960 			if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
1961 				return -EINVAL;
1962 			if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
1963 				ns->features |= NVME_NS_METADATA_SUPPORTED;
1964 		}
1965 	}
1966 
1967 	if (iob)
1968 		blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
1969 	nvme_update_disk_info(disk, ns, id);
1970 #ifdef CONFIG_NVME_MULTIPATH
1971 	if (ns->head->disk) {
1972 		nvme_update_disk_info(ns->head->disk, ns, id);
1973 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1974 		nvme_mpath_update_disk_size(ns->head->disk);
1975 	}
1976 #endif
1977 	return 0;
1978 }
1979 
1980 static int nvme_revalidate_disk(struct gendisk *disk)
1981 {
1982 	struct nvme_ns *ns = disk->private_data;
1983 	struct nvme_ctrl *ctrl = ns->ctrl;
1984 	struct nvme_id_ns *id;
1985 	struct nvme_ns_ids ids;
1986 	int ret = 0;
1987 
1988 	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
1989 		set_capacity(disk, 0);
1990 		return -ENODEV;
1991 	}
1992 
1993 	ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
1994 	if (ret)
1995 		goto out;
1996 
1997 	if (id->ncap == 0) {
1998 		ret = -ENODEV;
1999 		goto free_id;
2000 	}
2001 
2002 	ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
2003 	if (ret)
2004 		goto free_id;
2005 
2006 	if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
2007 		dev_err(ctrl->device,
2008 			"identifiers changed for nsid %d\n", ns->head->ns_id);
2009 		ret = -ENODEV;
2010 		goto free_id;
2011 	}
2012 
2013 	ret = __nvme_revalidate_disk(disk, id);
2014 free_id:
2015 	kfree(id);
2016 out:
2017 	/*
2018 	 * Only fail the function if we got a fatal error back from the
2019 	 * device, otherwise ignore the error and just move on.
2020 	 */
2021 	if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
2022 		ret = 0;
2023 	else if (ret > 0)
2024 		ret = blk_status_to_errno(nvme_error_status(ret));
2025 	return ret;
2026 }
2027 
2028 static char nvme_pr_type(enum pr_type type)
2029 {
2030 	switch (type) {
2031 	case PR_WRITE_EXCLUSIVE:
2032 		return 1;
2033 	case PR_EXCLUSIVE_ACCESS:
2034 		return 2;
2035 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
2036 		return 3;
2037 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
2038 		return 4;
2039 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
2040 		return 5;
2041 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
2042 		return 6;
2043 	default:
2044 		return 0;
2045 	}
2046 };
2047 
2048 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
2049 				u64 key, u64 sa_key, u8 op)
2050 {
2051 	struct nvme_ns_head *head = NULL;
2052 	struct nvme_ns *ns;
2053 	struct nvme_command c;
2054 	int srcu_idx, ret;
2055 	u8 data[16] = { 0, };
2056 
2057 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
2058 	if (unlikely(!ns))
2059 		return -EWOULDBLOCK;
2060 
2061 	put_unaligned_le64(key, &data[0]);
2062 	put_unaligned_le64(sa_key, &data[8]);
2063 
2064 	memset(&c, 0, sizeof(c));
2065 	c.common.opcode = op;
2066 	c.common.nsid = cpu_to_le32(ns->head->ns_id);
2067 	c.common.cdw10 = cpu_to_le32(cdw10);
2068 
2069 	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
2070 	nvme_put_ns_from_disk(head, srcu_idx);
2071 	return ret;
2072 }
2073 
2074 static int nvme_pr_register(struct block_device *bdev, u64 old,
2075 		u64 new, unsigned flags)
2076 {
2077 	u32 cdw10;
2078 
2079 	if (flags & ~PR_FL_IGNORE_KEY)
2080 		return -EOPNOTSUPP;
2081 
2082 	cdw10 = old ? 2 : 0;
2083 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
2084 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
2085 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
2086 }
2087 
2088 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
2089 		enum pr_type type, unsigned flags)
2090 {
2091 	u32 cdw10;
2092 
2093 	if (flags & ~PR_FL_IGNORE_KEY)
2094 		return -EOPNOTSUPP;
2095 
2096 	cdw10 = nvme_pr_type(type) << 8;
2097 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
2098 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
2099 }
2100 
2101 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
2102 		enum pr_type type, bool abort)
2103 {
2104 	u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
2105 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
2106 }
2107 
2108 static int nvme_pr_clear(struct block_device *bdev, u64 key)
2109 {
2110 	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
2111 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
2112 }
2113 
2114 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
2115 {
2116 	u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
2117 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
2118 }
2119 
2120 static const struct pr_ops nvme_pr_ops = {
2121 	.pr_register	= nvme_pr_register,
2122 	.pr_reserve	= nvme_pr_reserve,
2123 	.pr_release	= nvme_pr_release,
2124 	.pr_preempt	= nvme_pr_preempt,
2125 	.pr_clear	= nvme_pr_clear,
2126 };
2127 
2128 #ifdef CONFIG_BLK_SED_OPAL
2129 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2130 		bool send)
2131 {
2132 	struct nvme_ctrl *ctrl = data;
2133 	struct nvme_command cmd;
2134 
2135 	memset(&cmd, 0, sizeof(cmd));
2136 	if (send)
2137 		cmd.common.opcode = nvme_admin_security_send;
2138 	else
2139 		cmd.common.opcode = nvme_admin_security_recv;
2140 	cmd.common.nsid = 0;
2141 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2142 	cmd.common.cdw11 = cpu_to_le32(len);
2143 
2144 	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2145 				      ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
2146 }
2147 EXPORT_SYMBOL_GPL(nvme_sec_submit);
2148 #endif /* CONFIG_BLK_SED_OPAL */
2149 
2150 static const struct block_device_operations nvme_fops = {
2151 	.owner		= THIS_MODULE,
2152 	.ioctl		= nvme_ioctl,
2153 	.compat_ioctl	= nvme_compat_ioctl,
2154 	.open		= nvme_open,
2155 	.release	= nvme_release,
2156 	.getgeo		= nvme_getgeo,
2157 	.revalidate_disk= nvme_revalidate_disk,
2158 	.pr_ops		= &nvme_pr_ops,
2159 };
2160 
2161 #ifdef CONFIG_NVME_MULTIPATH
2162 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
2163 {
2164 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
2165 
2166 	if (!kref_get_unless_zero(&head->ref))
2167 		return -ENXIO;
2168 	return 0;
2169 }
2170 
2171 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
2172 {
2173 	nvme_put_ns_head(disk->private_data);
2174 }
2175 
2176 const struct block_device_operations nvme_ns_head_ops = {
2177 	.owner		= THIS_MODULE,
2178 	.open		= nvme_ns_head_open,
2179 	.release	= nvme_ns_head_release,
2180 	.ioctl		= nvme_ioctl,
2181 	.compat_ioctl	= nvme_compat_ioctl,
2182 	.getgeo		= nvme_getgeo,
2183 	.pr_ops		= &nvme_pr_ops,
2184 };
2185 #endif /* CONFIG_NVME_MULTIPATH */
2186 
2187 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
2188 {
2189 	unsigned long timeout =
2190 		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
2191 	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
2192 	int ret;
2193 
2194 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2195 		if (csts == ~0)
2196 			return -ENODEV;
2197 		if ((csts & NVME_CSTS_RDY) == bit)
2198 			break;
2199 
2200 		usleep_range(1000, 2000);
2201 		if (fatal_signal_pending(current))
2202 			return -EINTR;
2203 		if (time_after(jiffies, timeout)) {
2204 			dev_err(ctrl->device,
2205 				"Device not ready; aborting %s, CSTS=0x%x\n",
2206 				enabled ? "initialisation" : "reset", csts);
2207 			return -ENODEV;
2208 		}
2209 	}
2210 
2211 	return ret;
2212 }
2213 
2214 /*
2215  * If the device has been passed off to us in an enabled state, just clear
2216  * the enabled bit.  The spec says we should set the 'shutdown notification
2217  * bits', but doing so may cause the device to complete commands to the
2218  * admin queue ... and we don't know what memory that might be pointing at!
2219  */
2220 int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
2221 {
2222 	int ret;
2223 
2224 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2225 	ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2226 
2227 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2228 	if (ret)
2229 		return ret;
2230 
2231 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2232 		msleep(NVME_QUIRK_DELAY_AMOUNT);
2233 
2234 	return nvme_wait_ready(ctrl, ctrl->cap, false);
2235 }
2236 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2237 
2238 int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2239 {
2240 	/*
2241 	 * Default to a 4K page size, with the intention to update this
2242 	 * path in the future to accomodate architectures with differing
2243 	 * kernel and IO page sizes.
2244 	 */
2245 	unsigned dev_page_min, page_shift = 12;
2246 	int ret;
2247 
2248 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2249 	if (ret) {
2250 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2251 		return ret;
2252 	}
2253 	dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2254 
2255 	if (page_shift < dev_page_min) {
2256 		dev_err(ctrl->device,
2257 			"Minimum device page size %u too large for host (%u)\n",
2258 			1 << dev_page_min, 1 << page_shift);
2259 		return -ENODEV;
2260 	}
2261 
2262 	ctrl->page_size = 1 << page_shift;
2263 
2264 	ctrl->ctrl_config = NVME_CC_CSS_NVM;
2265 	ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
2266 	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2267 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2268 	ctrl->ctrl_config |= NVME_CC_ENABLE;
2269 
2270 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2271 	if (ret)
2272 		return ret;
2273 	return nvme_wait_ready(ctrl, ctrl->cap, true);
2274 }
2275 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2276 
2277 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
2278 {
2279 	unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ);
2280 	u32 csts;
2281 	int ret;
2282 
2283 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2284 	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2285 
2286 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2287 	if (ret)
2288 		return ret;
2289 
2290 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2291 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
2292 			break;
2293 
2294 		msleep(100);
2295 		if (fatal_signal_pending(current))
2296 			return -EINTR;
2297 		if (time_after(jiffies, timeout)) {
2298 			dev_err(ctrl->device,
2299 				"Device shutdown incomplete; abort shutdown\n");
2300 			return -ENODEV;
2301 		}
2302 	}
2303 
2304 	return ret;
2305 }
2306 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
2307 
2308 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
2309 		struct request_queue *q)
2310 {
2311 	bool vwc = false;
2312 
2313 	if (ctrl->max_hw_sectors) {
2314 		u32 max_segments =
2315 			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
2316 
2317 		max_segments = min_not_zero(max_segments, ctrl->max_segments);
2318 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
2319 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
2320 	}
2321 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
2322 	blk_queue_dma_alignment(q, 7);
2323 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
2324 		vwc = true;
2325 	blk_queue_write_cache(q, vwc, vwc);
2326 }
2327 
2328 static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2329 {
2330 	__le64 ts;
2331 	int ret;
2332 
2333 	if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2334 		return 0;
2335 
2336 	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2337 	ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2338 			NULL);
2339 	if (ret)
2340 		dev_warn_once(ctrl->device,
2341 			"could not set timestamp (%d)\n", ret);
2342 	return ret;
2343 }
2344 
2345 static int nvme_configure_acre(struct nvme_ctrl *ctrl)
2346 {
2347 	struct nvme_feat_host_behavior *host;
2348 	int ret;
2349 
2350 	/* Don't bother enabling the feature if retry delay is not reported */
2351 	if (!ctrl->crdt[0])
2352 		return 0;
2353 
2354 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2355 	if (!host)
2356 		return 0;
2357 
2358 	host->acre = NVME_ENABLE_ACRE;
2359 	ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2360 				host, sizeof(*host), NULL);
2361 	kfree(host);
2362 	return ret;
2363 }
2364 
2365 static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2366 {
2367 	/*
2368 	 * APST (Autonomous Power State Transition) lets us program a
2369 	 * table of power state transitions that the controller will
2370 	 * perform automatically.  We configure it with a simple
2371 	 * heuristic: we are willing to spend at most 2% of the time
2372 	 * transitioning between power states.  Therefore, when running
2373 	 * in any given state, we will enter the next lower-power
2374 	 * non-operational state after waiting 50 * (enlat + exlat)
2375 	 * microseconds, as long as that state's exit latency is under
2376 	 * the requested maximum latency.
2377 	 *
2378 	 * We will not autonomously enter any non-operational state for
2379 	 * which the total latency exceeds ps_max_latency_us.  Users
2380 	 * can set ps_max_latency_us to zero to turn off APST.
2381 	 */
2382 
2383 	unsigned apste;
2384 	struct nvme_feat_auto_pst *table;
2385 	u64 max_lat_us = 0;
2386 	int max_ps = -1;
2387 	int ret;
2388 
2389 	/*
2390 	 * If APST isn't supported or if we haven't been initialized yet,
2391 	 * then don't do anything.
2392 	 */
2393 	if (!ctrl->apsta)
2394 		return 0;
2395 
2396 	if (ctrl->npss > 31) {
2397 		dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2398 		return 0;
2399 	}
2400 
2401 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2402 	if (!table)
2403 		return 0;
2404 
2405 	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2406 		/* Turn off APST. */
2407 		apste = 0;
2408 		dev_dbg(ctrl->device, "APST disabled\n");
2409 	} else {
2410 		__le64 target = cpu_to_le64(0);
2411 		int state;
2412 
2413 		/*
2414 		 * Walk through all states from lowest- to highest-power.
2415 		 * According to the spec, lower-numbered states use more
2416 		 * power.  NPSS, despite the name, is the index of the
2417 		 * lowest-power state, not the number of states.
2418 		 */
2419 		for (state = (int)ctrl->npss; state >= 0; state--) {
2420 			u64 total_latency_us, exit_latency_us, transition_ms;
2421 
2422 			if (target)
2423 				table->entries[state] = target;
2424 
2425 			/*
2426 			 * Don't allow transitions to the deepest state
2427 			 * if it's quirked off.
2428 			 */
2429 			if (state == ctrl->npss &&
2430 			    (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2431 				continue;
2432 
2433 			/*
2434 			 * Is this state a useful non-operational state for
2435 			 * higher-power states to autonomously transition to?
2436 			 */
2437 			if (!(ctrl->psd[state].flags &
2438 			      NVME_PS_FLAGS_NON_OP_STATE))
2439 				continue;
2440 
2441 			exit_latency_us =
2442 				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2443 			if (exit_latency_us > ctrl->ps_max_latency_us)
2444 				continue;
2445 
2446 			total_latency_us =
2447 				exit_latency_us +
2448 				le32_to_cpu(ctrl->psd[state].entry_lat);
2449 
2450 			/*
2451 			 * This state is good.  Use it as the APST idle
2452 			 * target for higher power states.
2453 			 */
2454 			transition_ms = total_latency_us + 19;
2455 			do_div(transition_ms, 20);
2456 			if (transition_ms > (1 << 24) - 1)
2457 				transition_ms = (1 << 24) - 1;
2458 
2459 			target = cpu_to_le64((state << 3) |
2460 					     (transition_ms << 8));
2461 
2462 			if (max_ps == -1)
2463 				max_ps = state;
2464 
2465 			if (total_latency_us > max_lat_us)
2466 				max_lat_us = total_latency_us;
2467 		}
2468 
2469 		apste = 1;
2470 
2471 		if (max_ps == -1) {
2472 			dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2473 		} else {
2474 			dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2475 				max_ps, max_lat_us, (int)sizeof(*table), table);
2476 		}
2477 	}
2478 
2479 	ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2480 				table, sizeof(*table), NULL);
2481 	if (ret)
2482 		dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2483 
2484 	kfree(table);
2485 	return ret;
2486 }
2487 
2488 static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2489 {
2490 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2491 	u64 latency;
2492 
2493 	switch (val) {
2494 	case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2495 	case PM_QOS_LATENCY_ANY:
2496 		latency = U64_MAX;
2497 		break;
2498 
2499 	default:
2500 		latency = val;
2501 	}
2502 
2503 	if (ctrl->ps_max_latency_us != latency) {
2504 		ctrl->ps_max_latency_us = latency;
2505 		nvme_configure_apst(ctrl);
2506 	}
2507 }
2508 
2509 struct nvme_core_quirk_entry {
2510 	/*
2511 	 * NVMe model and firmware strings are padded with spaces.  For
2512 	 * simplicity, strings in the quirk table are padded with NULLs
2513 	 * instead.
2514 	 */
2515 	u16 vid;
2516 	const char *mn;
2517 	const char *fr;
2518 	unsigned long quirks;
2519 };
2520 
2521 static const struct nvme_core_quirk_entry core_quirks[] = {
2522 	{
2523 		/*
2524 		 * This Toshiba device seems to die using any APST states.  See:
2525 		 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2526 		 */
2527 		.vid = 0x1179,
2528 		.mn = "THNSF5256GPUK TOSHIBA",
2529 		.quirks = NVME_QUIRK_NO_APST,
2530 	},
2531 	{
2532 		/*
2533 		 * This LiteON CL1-3D*-Q11 firmware version has a race
2534 		 * condition associated with actions related to suspend to idle
2535 		 * LiteON has resolved the problem in future firmware
2536 		 */
2537 		.vid = 0x14a4,
2538 		.fr = "22301111",
2539 		.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2540 	}
2541 };
2542 
2543 /* match is null-terminated but idstr is space-padded. */
2544 static bool string_matches(const char *idstr, const char *match, size_t len)
2545 {
2546 	size_t matchlen;
2547 
2548 	if (!match)
2549 		return true;
2550 
2551 	matchlen = strlen(match);
2552 	WARN_ON_ONCE(matchlen > len);
2553 
2554 	if (memcmp(idstr, match, matchlen))
2555 		return false;
2556 
2557 	for (; matchlen < len; matchlen++)
2558 		if (idstr[matchlen] != ' ')
2559 			return false;
2560 
2561 	return true;
2562 }
2563 
2564 static bool quirk_matches(const struct nvme_id_ctrl *id,
2565 			  const struct nvme_core_quirk_entry *q)
2566 {
2567 	return q->vid == le16_to_cpu(id->vid) &&
2568 		string_matches(id->mn, q->mn, sizeof(id->mn)) &&
2569 		string_matches(id->fr, q->fr, sizeof(id->fr));
2570 }
2571 
2572 static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
2573 		struct nvme_id_ctrl *id)
2574 {
2575 	size_t nqnlen;
2576 	int off;
2577 
2578 	if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2579 		nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2580 		if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2581 			strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2582 			return;
2583 		}
2584 
2585 		if (ctrl->vs >= NVME_VS(1, 2, 1))
2586 			dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2587 	}
2588 
2589 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2590 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2591 			"nqn.2014.08.org.nvmexpress:%04x%04x",
2592 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2593 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2594 	off += sizeof(id->sn);
2595 	memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
2596 	off += sizeof(id->mn);
2597 	memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
2598 }
2599 
2600 static void nvme_release_subsystem(struct device *dev)
2601 {
2602 	struct nvme_subsystem *subsys =
2603 		container_of(dev, struct nvme_subsystem, dev);
2604 
2605 	if (subsys->instance >= 0)
2606 		ida_simple_remove(&nvme_instance_ida, subsys->instance);
2607 	kfree(subsys);
2608 }
2609 
2610 static void nvme_destroy_subsystem(struct kref *ref)
2611 {
2612 	struct nvme_subsystem *subsys =
2613 			container_of(ref, struct nvme_subsystem, ref);
2614 
2615 	mutex_lock(&nvme_subsystems_lock);
2616 	list_del(&subsys->entry);
2617 	mutex_unlock(&nvme_subsystems_lock);
2618 
2619 	ida_destroy(&subsys->ns_ida);
2620 	device_del(&subsys->dev);
2621 	put_device(&subsys->dev);
2622 }
2623 
2624 static void nvme_put_subsystem(struct nvme_subsystem *subsys)
2625 {
2626 	kref_put(&subsys->ref, nvme_destroy_subsystem);
2627 }
2628 
2629 static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
2630 {
2631 	struct nvme_subsystem *subsys;
2632 
2633 	lockdep_assert_held(&nvme_subsystems_lock);
2634 
2635 	/*
2636 	 * Fail matches for discovery subsystems. This results
2637 	 * in each discovery controller bound to a unique subsystem.
2638 	 * This avoids issues with validating controller values
2639 	 * that can only be true when there is a single unique subsystem.
2640 	 * There may be multiple and completely independent entities
2641 	 * that provide discovery controllers.
2642 	 */
2643 	if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
2644 		return NULL;
2645 
2646 	list_for_each_entry(subsys, &nvme_subsystems, entry) {
2647 		if (strcmp(subsys->subnqn, subsysnqn))
2648 			continue;
2649 		if (!kref_get_unless_zero(&subsys->ref))
2650 			continue;
2651 		return subsys;
2652 	}
2653 
2654 	return NULL;
2655 }
2656 
2657 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
2658 	struct device_attribute subsys_attr_##_name = \
2659 		__ATTR(_name, _mode, _show, NULL)
2660 
2661 static ssize_t nvme_subsys_show_nqn(struct device *dev,
2662 				    struct device_attribute *attr,
2663 				    char *buf)
2664 {
2665 	struct nvme_subsystem *subsys =
2666 		container_of(dev, struct nvme_subsystem, dev);
2667 
2668 	return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
2669 }
2670 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
2671 
2672 #define nvme_subsys_show_str_function(field)				\
2673 static ssize_t subsys_##field##_show(struct device *dev,		\
2674 			    struct device_attribute *attr, char *buf)	\
2675 {									\
2676 	struct nvme_subsystem *subsys =					\
2677 		container_of(dev, struct nvme_subsystem, dev);		\
2678 	return sprintf(buf, "%.*s\n",					\
2679 		       (int)sizeof(subsys->field), subsys->field);	\
2680 }									\
2681 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2682 
2683 nvme_subsys_show_str_function(model);
2684 nvme_subsys_show_str_function(serial);
2685 nvme_subsys_show_str_function(firmware_rev);
2686 
2687 static struct attribute *nvme_subsys_attrs[] = {
2688 	&subsys_attr_model.attr,
2689 	&subsys_attr_serial.attr,
2690 	&subsys_attr_firmware_rev.attr,
2691 	&subsys_attr_subsysnqn.attr,
2692 #ifdef CONFIG_NVME_MULTIPATH
2693 	&subsys_attr_iopolicy.attr,
2694 #endif
2695 	NULL,
2696 };
2697 
2698 static struct attribute_group nvme_subsys_attrs_group = {
2699 	.attrs = nvme_subsys_attrs,
2700 };
2701 
2702 static const struct attribute_group *nvme_subsys_attrs_groups[] = {
2703 	&nvme_subsys_attrs_group,
2704 	NULL,
2705 };
2706 
2707 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
2708 		struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2709 {
2710 	struct nvme_ctrl *tmp;
2711 
2712 	lockdep_assert_held(&nvme_subsystems_lock);
2713 
2714 	list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
2715 		if (nvme_state_terminal(tmp))
2716 			continue;
2717 
2718 		if (tmp->cntlid == ctrl->cntlid) {
2719 			dev_err(ctrl->device,
2720 				"Duplicate cntlid %u with %s, rejecting\n",
2721 				ctrl->cntlid, dev_name(tmp->device));
2722 			return false;
2723 		}
2724 
2725 		if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
2726 		    (ctrl->opts && ctrl->opts->discovery_nqn))
2727 			continue;
2728 
2729 		dev_err(ctrl->device,
2730 			"Subsystem does not support multiple controllers\n");
2731 		return false;
2732 	}
2733 
2734 	return true;
2735 }
2736 
2737 static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2738 {
2739 	struct nvme_subsystem *subsys, *found;
2740 	int ret;
2741 
2742 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
2743 	if (!subsys)
2744 		return -ENOMEM;
2745 
2746 	subsys->instance = -1;
2747 	mutex_init(&subsys->lock);
2748 	kref_init(&subsys->ref);
2749 	INIT_LIST_HEAD(&subsys->ctrls);
2750 	INIT_LIST_HEAD(&subsys->nsheads);
2751 	nvme_init_subnqn(subsys, ctrl, id);
2752 	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
2753 	memcpy(subsys->model, id->mn, sizeof(subsys->model));
2754 	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
2755 	subsys->vendor_id = le16_to_cpu(id->vid);
2756 	subsys->cmic = id->cmic;
2757 	subsys->awupf = le16_to_cpu(id->awupf);
2758 #ifdef CONFIG_NVME_MULTIPATH
2759 	subsys->iopolicy = NVME_IOPOLICY_NUMA;
2760 #endif
2761 
2762 	subsys->dev.class = nvme_subsys_class;
2763 	subsys->dev.release = nvme_release_subsystem;
2764 	subsys->dev.groups = nvme_subsys_attrs_groups;
2765 	dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
2766 	device_initialize(&subsys->dev);
2767 
2768 	mutex_lock(&nvme_subsystems_lock);
2769 	found = __nvme_find_get_subsystem(subsys->subnqn);
2770 	if (found) {
2771 		put_device(&subsys->dev);
2772 		subsys = found;
2773 
2774 		if (!nvme_validate_cntlid(subsys, ctrl, id)) {
2775 			ret = -EINVAL;
2776 			goto out_put_subsystem;
2777 		}
2778 	} else {
2779 		ret = device_add(&subsys->dev);
2780 		if (ret) {
2781 			dev_err(ctrl->device,
2782 				"failed to register subsystem device.\n");
2783 			put_device(&subsys->dev);
2784 			goto out_unlock;
2785 		}
2786 		ida_init(&subsys->ns_ida);
2787 		list_add_tail(&subsys->entry, &nvme_subsystems);
2788 	}
2789 
2790 	ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2791 				dev_name(ctrl->device));
2792 	if (ret) {
2793 		dev_err(ctrl->device,
2794 			"failed to create sysfs link from subsystem.\n");
2795 		goto out_put_subsystem;
2796 	}
2797 
2798 	if (!found)
2799 		subsys->instance = ctrl->instance;
2800 	ctrl->subsys = subsys;
2801 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
2802 	mutex_unlock(&nvme_subsystems_lock);
2803 	return 0;
2804 
2805 out_put_subsystem:
2806 	nvme_put_subsystem(subsys);
2807 out_unlock:
2808 	mutex_unlock(&nvme_subsystems_lock);
2809 	return ret;
2810 }
2811 
2812 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
2813 		void *log, size_t size, u64 offset)
2814 {
2815 	struct nvme_command c = { };
2816 	u32 dwlen = nvme_bytes_to_numd(size);
2817 
2818 	c.get_log_page.opcode = nvme_admin_get_log_page;
2819 	c.get_log_page.nsid = cpu_to_le32(nsid);
2820 	c.get_log_page.lid = log_page;
2821 	c.get_log_page.lsp = lsp;
2822 	c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
2823 	c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
2824 	c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
2825 	c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
2826 
2827 	return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
2828 }
2829 
2830 static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
2831 {
2832 	int ret;
2833 
2834 	if (!ctrl->effects)
2835 		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
2836 
2837 	if (!ctrl->effects)
2838 		return 0;
2839 
2840 	ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
2841 			ctrl->effects, sizeof(*ctrl->effects), 0);
2842 	if (ret) {
2843 		kfree(ctrl->effects);
2844 		ctrl->effects = NULL;
2845 	}
2846 	return ret;
2847 }
2848 
2849 /*
2850  * Initialize the cached copies of the Identify data and various controller
2851  * register in our nvme_ctrl structure.  This should be called as soon as
2852  * the admin queue is fully up and running.
2853  */
2854 int nvme_init_identify(struct nvme_ctrl *ctrl)
2855 {
2856 	struct nvme_id_ctrl *id;
2857 	int ret, page_shift;
2858 	u32 max_hw_sectors;
2859 	bool prev_apst_enabled;
2860 
2861 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
2862 	if (ret) {
2863 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
2864 		return ret;
2865 	}
2866 	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2867 	ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
2868 
2869 	if (ctrl->vs >= NVME_VS(1, 1, 0))
2870 		ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
2871 
2872 	ret = nvme_identify_ctrl(ctrl, &id);
2873 	if (ret) {
2874 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
2875 		return -EIO;
2876 	}
2877 
2878 	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
2879 		ret = nvme_get_effects_log(ctrl);
2880 		if (ret < 0)
2881 			goto out_free;
2882 	}
2883 
2884 	if (!(ctrl->ops->flags & NVME_F_FABRICS))
2885 		ctrl->cntlid = le16_to_cpu(id->cntlid);
2886 
2887 	if (!ctrl->identified) {
2888 		int i;
2889 
2890 		ret = nvme_init_subsystem(ctrl, id);
2891 		if (ret)
2892 			goto out_free;
2893 
2894 		/*
2895 		 * Check for quirks.  Quirk can depend on firmware version,
2896 		 * so, in principle, the set of quirks present can change
2897 		 * across a reset.  As a possible future enhancement, we
2898 		 * could re-scan for quirks every time we reinitialize
2899 		 * the device, but we'd have to make sure that the driver
2900 		 * behaves intelligently if the quirks change.
2901 		 */
2902 		for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
2903 			if (quirk_matches(id, &core_quirks[i]))
2904 				ctrl->quirks |= core_quirks[i].quirks;
2905 		}
2906 	}
2907 
2908 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
2909 		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2910 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
2911 	}
2912 
2913 	ctrl->crdt[0] = le16_to_cpu(id->crdt1);
2914 	ctrl->crdt[1] = le16_to_cpu(id->crdt2);
2915 	ctrl->crdt[2] = le16_to_cpu(id->crdt3);
2916 
2917 	ctrl->oacs = le16_to_cpu(id->oacs);
2918 	ctrl->oncs = le16_to_cpu(id->oncs);
2919 	ctrl->mtfa = le16_to_cpu(id->mtfa);
2920 	ctrl->oaes = le32_to_cpu(id->oaes);
2921 	ctrl->wctemp = le16_to_cpu(id->wctemp);
2922 	ctrl->cctemp = le16_to_cpu(id->cctemp);
2923 
2924 	atomic_set(&ctrl->abort_limit, id->acl + 1);
2925 	ctrl->vwc = id->vwc;
2926 	if (id->mdts)
2927 		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2928 	else
2929 		max_hw_sectors = UINT_MAX;
2930 	ctrl->max_hw_sectors =
2931 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
2932 
2933 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
2934 	ctrl->sgls = le32_to_cpu(id->sgls);
2935 	ctrl->kas = le16_to_cpu(id->kas);
2936 	ctrl->max_namespaces = le32_to_cpu(id->mnan);
2937 	ctrl->ctratt = le32_to_cpu(id->ctratt);
2938 
2939 	if (id->rtd3e) {
2940 		/* us -> s */
2941 		u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
2942 
2943 		ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
2944 						 shutdown_timeout, 60);
2945 
2946 		if (ctrl->shutdown_timeout != shutdown_timeout)
2947 			dev_info(ctrl->device,
2948 				 "Shutdown timeout set to %u seconds\n",
2949 				 ctrl->shutdown_timeout);
2950 	} else
2951 		ctrl->shutdown_timeout = shutdown_timeout;
2952 
2953 	ctrl->npss = id->npss;
2954 	ctrl->apsta = id->apsta;
2955 	prev_apst_enabled = ctrl->apst_enabled;
2956 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
2957 		if (force_apst && id->apsta) {
2958 			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2959 			ctrl->apst_enabled = true;
2960 		} else {
2961 			ctrl->apst_enabled = false;
2962 		}
2963 	} else {
2964 		ctrl->apst_enabled = id->apsta;
2965 	}
2966 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
2967 
2968 	if (ctrl->ops->flags & NVME_F_FABRICS) {
2969 		ctrl->icdoff = le16_to_cpu(id->icdoff);
2970 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
2971 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
2972 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
2973 
2974 		/*
2975 		 * In fabrics we need to verify the cntlid matches the
2976 		 * admin connect
2977 		 */
2978 		if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
2979 			dev_err(ctrl->device,
2980 				"Mismatching cntlid: Connect %u vs Identify "
2981 				"%u, rejecting\n",
2982 				ctrl->cntlid, le16_to_cpu(id->cntlid));
2983 			ret = -EINVAL;
2984 			goto out_free;
2985 		}
2986 
2987 		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
2988 			dev_err(ctrl->device,
2989 				"keep-alive support is mandatory for fabrics\n");
2990 			ret = -EINVAL;
2991 			goto out_free;
2992 		}
2993 	} else {
2994 		ctrl->hmpre = le32_to_cpu(id->hmpre);
2995 		ctrl->hmmin = le32_to_cpu(id->hmmin);
2996 		ctrl->hmminds = le32_to_cpu(id->hmminds);
2997 		ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
2998 	}
2999 
3000 	ret = nvme_mpath_init(ctrl, id);
3001 	kfree(id);
3002 
3003 	if (ret < 0)
3004 		return ret;
3005 
3006 	if (ctrl->apst_enabled && !prev_apst_enabled)
3007 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
3008 	else if (!ctrl->apst_enabled && prev_apst_enabled)
3009 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
3010 
3011 	ret = nvme_configure_apst(ctrl);
3012 	if (ret < 0)
3013 		return ret;
3014 
3015 	ret = nvme_configure_timestamp(ctrl);
3016 	if (ret < 0)
3017 		return ret;
3018 
3019 	ret = nvme_configure_directives(ctrl);
3020 	if (ret < 0)
3021 		return ret;
3022 
3023 	ret = nvme_configure_acre(ctrl);
3024 	if (ret < 0)
3025 		return ret;
3026 
3027 	if (!ctrl->identified)
3028 		nvme_hwmon_init(ctrl);
3029 
3030 	ctrl->identified = true;
3031 
3032 	return 0;
3033 
3034 out_free:
3035 	kfree(id);
3036 	return ret;
3037 }
3038 EXPORT_SYMBOL_GPL(nvme_init_identify);
3039 
3040 static int nvme_dev_open(struct inode *inode, struct file *file)
3041 {
3042 	struct nvme_ctrl *ctrl =
3043 		container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3044 
3045 	switch (ctrl->state) {
3046 	case NVME_CTRL_LIVE:
3047 		break;
3048 	default:
3049 		return -EWOULDBLOCK;
3050 	}
3051 
3052 	file->private_data = ctrl;
3053 	return 0;
3054 }
3055 
3056 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
3057 {
3058 	struct nvme_ns *ns;
3059 	int ret;
3060 
3061 	down_read(&ctrl->namespaces_rwsem);
3062 	if (list_empty(&ctrl->namespaces)) {
3063 		ret = -ENOTTY;
3064 		goto out_unlock;
3065 	}
3066 
3067 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
3068 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
3069 		dev_warn(ctrl->device,
3070 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
3071 		ret = -EINVAL;
3072 		goto out_unlock;
3073 	}
3074 
3075 	dev_warn(ctrl->device,
3076 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
3077 	kref_get(&ns->kref);
3078 	up_read(&ctrl->namespaces_rwsem);
3079 
3080 	ret = nvme_user_cmd(ctrl, ns, argp);
3081 	nvme_put_ns(ns);
3082 	return ret;
3083 
3084 out_unlock:
3085 	up_read(&ctrl->namespaces_rwsem);
3086 	return ret;
3087 }
3088 
3089 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
3090 		unsigned long arg)
3091 {
3092 	struct nvme_ctrl *ctrl = file->private_data;
3093 	void __user *argp = (void __user *)arg;
3094 
3095 	switch (cmd) {
3096 	case NVME_IOCTL_ADMIN_CMD:
3097 		return nvme_user_cmd(ctrl, NULL, argp);
3098 	case NVME_IOCTL_ADMIN64_CMD:
3099 		return nvme_user_cmd64(ctrl, NULL, argp);
3100 	case NVME_IOCTL_IO_CMD:
3101 		return nvme_dev_user_cmd(ctrl, argp);
3102 	case NVME_IOCTL_RESET:
3103 		dev_warn(ctrl->device, "resetting controller\n");
3104 		return nvme_reset_ctrl_sync(ctrl);
3105 	case NVME_IOCTL_SUBSYS_RESET:
3106 		return nvme_reset_subsystem(ctrl);
3107 	case NVME_IOCTL_RESCAN:
3108 		nvme_queue_scan(ctrl);
3109 		return 0;
3110 	default:
3111 		return -ENOTTY;
3112 	}
3113 }
3114 
3115 static const struct file_operations nvme_dev_fops = {
3116 	.owner		= THIS_MODULE,
3117 	.open		= nvme_dev_open,
3118 	.unlocked_ioctl	= nvme_dev_ioctl,
3119 	.compat_ioctl	= compat_ptr_ioctl,
3120 };
3121 
3122 static ssize_t nvme_sysfs_reset(struct device *dev,
3123 				struct device_attribute *attr, const char *buf,
3124 				size_t count)
3125 {
3126 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3127 	int ret;
3128 
3129 	ret = nvme_reset_ctrl_sync(ctrl);
3130 	if (ret < 0)
3131 		return ret;
3132 	return count;
3133 }
3134 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
3135 
3136 static ssize_t nvme_sysfs_rescan(struct device *dev,
3137 				struct device_attribute *attr, const char *buf,
3138 				size_t count)
3139 {
3140 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3141 
3142 	nvme_queue_scan(ctrl);
3143 	return count;
3144 }
3145 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
3146 
3147 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
3148 {
3149 	struct gendisk *disk = dev_to_disk(dev);
3150 
3151 	if (disk->fops == &nvme_fops)
3152 		return nvme_get_ns_from_dev(dev)->head;
3153 	else
3154 		return disk->private_data;
3155 }
3156 
3157 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
3158 		char *buf)
3159 {
3160 	struct nvme_ns_head *head = dev_to_ns_head(dev);
3161 	struct nvme_ns_ids *ids = &head->ids;
3162 	struct nvme_subsystem *subsys = head->subsys;
3163 	int serial_len = sizeof(subsys->serial);
3164 	int model_len = sizeof(subsys->model);
3165 
3166 	if (!uuid_is_null(&ids->uuid))
3167 		return sprintf(buf, "uuid.%pU\n", &ids->uuid);
3168 
3169 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3170 		return sprintf(buf, "eui.%16phN\n", ids->nguid);
3171 
3172 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3173 		return sprintf(buf, "eui.%8phN\n", ids->eui64);
3174 
3175 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
3176 				  subsys->serial[serial_len - 1] == '\0'))
3177 		serial_len--;
3178 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
3179 				 subsys->model[model_len - 1] == '\0'))
3180 		model_len--;
3181 
3182 	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
3183 		serial_len, subsys->serial, model_len, subsys->model,
3184 		head->ns_id);
3185 }
3186 static DEVICE_ATTR_RO(wwid);
3187 
3188 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
3189 		char *buf)
3190 {
3191 	return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
3192 }
3193 static DEVICE_ATTR_RO(nguid);
3194 
3195 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
3196 		char *buf)
3197 {
3198 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3199 
3200 	/* For backward compatibility expose the NGUID to userspace if
3201 	 * we have no UUID set
3202 	 */
3203 	if (uuid_is_null(&ids->uuid)) {
3204 		printk_ratelimited(KERN_WARNING
3205 				   "No UUID available providing old NGUID\n");
3206 		return sprintf(buf, "%pU\n", ids->nguid);
3207 	}
3208 	return sprintf(buf, "%pU\n", &ids->uuid);
3209 }
3210 static DEVICE_ATTR_RO(uuid);
3211 
3212 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
3213 		char *buf)
3214 {
3215 	return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
3216 }
3217 static DEVICE_ATTR_RO(eui);
3218 
3219 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
3220 		char *buf)
3221 {
3222 	return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
3223 }
3224 static DEVICE_ATTR_RO(nsid);
3225 
3226 static struct attribute *nvme_ns_id_attrs[] = {
3227 	&dev_attr_wwid.attr,
3228 	&dev_attr_uuid.attr,
3229 	&dev_attr_nguid.attr,
3230 	&dev_attr_eui.attr,
3231 	&dev_attr_nsid.attr,
3232 #ifdef CONFIG_NVME_MULTIPATH
3233 	&dev_attr_ana_grpid.attr,
3234 	&dev_attr_ana_state.attr,
3235 #endif
3236 	NULL,
3237 };
3238 
3239 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
3240 		struct attribute *a, int n)
3241 {
3242 	struct device *dev = container_of(kobj, struct device, kobj);
3243 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
3244 
3245 	if (a == &dev_attr_uuid.attr) {
3246 		if (uuid_is_null(&ids->uuid) &&
3247 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3248 			return 0;
3249 	}
3250 	if (a == &dev_attr_nguid.attr) {
3251 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
3252 			return 0;
3253 	}
3254 	if (a == &dev_attr_eui.attr) {
3255 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
3256 			return 0;
3257 	}
3258 #ifdef CONFIG_NVME_MULTIPATH
3259 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
3260 		if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
3261 			return 0;
3262 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
3263 			return 0;
3264 	}
3265 #endif
3266 	return a->mode;
3267 }
3268 
3269 static const struct attribute_group nvme_ns_id_attr_group = {
3270 	.attrs		= nvme_ns_id_attrs,
3271 	.is_visible	= nvme_ns_id_attrs_are_visible,
3272 };
3273 
3274 const struct attribute_group *nvme_ns_id_attr_groups[] = {
3275 	&nvme_ns_id_attr_group,
3276 #ifdef CONFIG_NVM
3277 	&nvme_nvm_attr_group,
3278 #endif
3279 	NULL,
3280 };
3281 
3282 #define nvme_show_str_function(field)						\
3283 static ssize_t  field##_show(struct device *dev,				\
3284 			    struct device_attribute *attr, char *buf)		\
3285 {										\
3286         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3287         return sprintf(buf, "%.*s\n",						\
3288 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
3289 }										\
3290 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3291 
3292 nvme_show_str_function(model);
3293 nvme_show_str_function(serial);
3294 nvme_show_str_function(firmware_rev);
3295 
3296 #define nvme_show_int_function(field)						\
3297 static ssize_t  field##_show(struct device *dev,				\
3298 			    struct device_attribute *attr, char *buf)		\
3299 {										\
3300         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
3301         return sprintf(buf, "%d\n", ctrl->field);	\
3302 }										\
3303 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3304 
3305 nvme_show_int_function(cntlid);
3306 nvme_show_int_function(numa_node);
3307 nvme_show_int_function(queue_count);
3308 nvme_show_int_function(sqsize);
3309 
3310 static ssize_t nvme_sysfs_delete(struct device *dev,
3311 				struct device_attribute *attr, const char *buf,
3312 				size_t count)
3313 {
3314 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3315 
3316 	/* Can't delete non-created controllers */
3317 	if (!ctrl->created)
3318 		return -EBUSY;
3319 
3320 	if (device_remove_file_self(dev, attr))
3321 		nvme_delete_ctrl_sync(ctrl);
3322 	return count;
3323 }
3324 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
3325 
3326 static ssize_t nvme_sysfs_show_transport(struct device *dev,
3327 					 struct device_attribute *attr,
3328 					 char *buf)
3329 {
3330 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3331 
3332 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
3333 }
3334 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
3335 
3336 static ssize_t nvme_sysfs_show_state(struct device *dev,
3337 				     struct device_attribute *attr,
3338 				     char *buf)
3339 {
3340 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3341 	static const char *const state_name[] = {
3342 		[NVME_CTRL_NEW]		= "new",
3343 		[NVME_CTRL_LIVE]	= "live",
3344 		[NVME_CTRL_RESETTING]	= "resetting",
3345 		[NVME_CTRL_CONNECTING]	= "connecting",
3346 		[NVME_CTRL_DELETING]	= "deleting",
3347 		[NVME_CTRL_DEAD]	= "dead",
3348 	};
3349 
3350 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
3351 	    state_name[ctrl->state])
3352 		return sprintf(buf, "%s\n", state_name[ctrl->state]);
3353 
3354 	return sprintf(buf, "unknown state\n");
3355 }
3356 
3357 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
3358 
3359 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
3360 					 struct device_attribute *attr,
3361 					 char *buf)
3362 {
3363 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3364 
3365 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
3366 }
3367 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
3368 
3369 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
3370 					struct device_attribute *attr,
3371 					char *buf)
3372 {
3373 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3374 
3375 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
3376 }
3377 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
3378 
3379 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
3380 					struct device_attribute *attr,
3381 					char *buf)
3382 {
3383 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3384 
3385 	return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
3386 }
3387 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
3388 
3389 static ssize_t nvme_sysfs_show_address(struct device *dev,
3390 					 struct device_attribute *attr,
3391 					 char *buf)
3392 {
3393 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3394 
3395 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
3396 }
3397 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
3398 
3399 static struct attribute *nvme_dev_attrs[] = {
3400 	&dev_attr_reset_controller.attr,
3401 	&dev_attr_rescan_controller.attr,
3402 	&dev_attr_model.attr,
3403 	&dev_attr_serial.attr,
3404 	&dev_attr_firmware_rev.attr,
3405 	&dev_attr_cntlid.attr,
3406 	&dev_attr_delete_controller.attr,
3407 	&dev_attr_transport.attr,
3408 	&dev_attr_subsysnqn.attr,
3409 	&dev_attr_address.attr,
3410 	&dev_attr_state.attr,
3411 	&dev_attr_numa_node.attr,
3412 	&dev_attr_queue_count.attr,
3413 	&dev_attr_sqsize.attr,
3414 	&dev_attr_hostnqn.attr,
3415 	&dev_attr_hostid.attr,
3416 	NULL
3417 };
3418 
3419 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
3420 		struct attribute *a, int n)
3421 {
3422 	struct device *dev = container_of(kobj, struct device, kobj);
3423 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
3424 
3425 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
3426 		return 0;
3427 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
3428 		return 0;
3429 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
3430 		return 0;
3431 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
3432 		return 0;
3433 
3434 	return a->mode;
3435 }
3436 
3437 static struct attribute_group nvme_dev_attrs_group = {
3438 	.attrs		= nvme_dev_attrs,
3439 	.is_visible	= nvme_dev_attrs_are_visible,
3440 };
3441 
3442 static const struct attribute_group *nvme_dev_attr_groups[] = {
3443 	&nvme_dev_attrs_group,
3444 	NULL,
3445 };
3446 
3447 static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
3448 		unsigned nsid)
3449 {
3450 	struct nvme_ns_head *h;
3451 
3452 	lockdep_assert_held(&subsys->lock);
3453 
3454 	list_for_each_entry(h, &subsys->nsheads, entry) {
3455 		if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
3456 			return h;
3457 	}
3458 
3459 	return NULL;
3460 }
3461 
3462 static int __nvme_check_ids(struct nvme_subsystem *subsys,
3463 		struct nvme_ns_head *new)
3464 {
3465 	struct nvme_ns_head *h;
3466 
3467 	lockdep_assert_held(&subsys->lock);
3468 
3469 	list_for_each_entry(h, &subsys->nsheads, entry) {
3470 		if (nvme_ns_ids_valid(&new->ids) &&
3471 		    nvme_ns_ids_equal(&new->ids, &h->ids))
3472 			return -EINVAL;
3473 	}
3474 
3475 	return 0;
3476 }
3477 
3478 static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3479 		unsigned nsid, struct nvme_ns_ids *ids)
3480 {
3481 	struct nvme_ns_head *head;
3482 	size_t size = sizeof(*head);
3483 	int ret = -ENOMEM;
3484 
3485 #ifdef CONFIG_NVME_MULTIPATH
3486 	size += num_possible_nodes() * sizeof(struct nvme_ns *);
3487 #endif
3488 
3489 	head = kzalloc(size, GFP_KERNEL);
3490 	if (!head)
3491 		goto out;
3492 	ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
3493 	if (ret < 0)
3494 		goto out_free_head;
3495 	head->instance = ret;
3496 	INIT_LIST_HEAD(&head->list);
3497 	ret = init_srcu_struct(&head->srcu);
3498 	if (ret)
3499 		goto out_ida_remove;
3500 	head->subsys = ctrl->subsys;
3501 	head->ns_id = nsid;
3502 	head->ids = *ids;
3503 	kref_init(&head->ref);
3504 
3505 	ret = __nvme_check_ids(ctrl->subsys, head);
3506 	if (ret) {
3507 		dev_err(ctrl->device,
3508 			"duplicate IDs for nsid %d\n", nsid);
3509 		goto out_cleanup_srcu;
3510 	}
3511 
3512 	ret = nvme_mpath_alloc_disk(ctrl, head);
3513 	if (ret)
3514 		goto out_cleanup_srcu;
3515 
3516 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3517 
3518 	kref_get(&ctrl->subsys->ref);
3519 
3520 	return head;
3521 out_cleanup_srcu:
3522 	cleanup_srcu_struct(&head->srcu);
3523 out_ida_remove:
3524 	ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
3525 out_free_head:
3526 	kfree(head);
3527 out:
3528 	if (ret > 0)
3529 		ret = blk_status_to_errno(nvme_error_status(ret));
3530 	return ERR_PTR(ret);
3531 }
3532 
3533 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
3534 		struct nvme_id_ns *id)
3535 {
3536 	struct nvme_ctrl *ctrl = ns->ctrl;
3537 	bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
3538 	struct nvme_ns_head *head = NULL;
3539 	struct nvme_ns_ids ids;
3540 	int ret = 0;
3541 
3542 	ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
3543 	if (ret) {
3544 		if (ret < 0)
3545 			return ret;
3546 		return blk_status_to_errno(nvme_error_status(ret));
3547 	}
3548 
3549 	mutex_lock(&ctrl->subsys->lock);
3550 	head = nvme_find_ns_head(ctrl->subsys, nsid);
3551 	if (!head) {
3552 		head = nvme_alloc_ns_head(ctrl, nsid, &ids);
3553 		if (IS_ERR(head)) {
3554 			ret = PTR_ERR(head);
3555 			goto out_unlock;
3556 		}
3557 		head->shared = is_shared;
3558 	} else {
3559 		ret = -EINVAL;
3560 		if (!is_shared || !head->shared) {
3561 			dev_err(ctrl->device,
3562 				"Duplicate unshared namespace %d\n", nsid);
3563 			goto out_put_ns_head;
3564 		}
3565 		if (!nvme_ns_ids_equal(&head->ids, &ids)) {
3566 			dev_err(ctrl->device,
3567 				"IDs don't match for shared namespace %d\n",
3568 					nsid);
3569 			goto out_put_ns_head;
3570 		}
3571 	}
3572 
3573 	list_add_tail(&ns->siblings, &head->list);
3574 	ns->head = head;
3575 	mutex_unlock(&ctrl->subsys->lock);
3576 	return 0;
3577 
3578 out_put_ns_head:
3579 	nvme_put_ns_head(head);
3580 out_unlock:
3581 	mutex_unlock(&ctrl->subsys->lock);
3582 	return ret;
3583 }
3584 
3585 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
3586 {
3587 	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
3588 	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
3589 
3590 	return nsa->head->ns_id - nsb->head->ns_id;
3591 }
3592 
3593 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3594 {
3595 	struct nvme_ns *ns, *ret = NULL;
3596 
3597 	down_read(&ctrl->namespaces_rwsem);
3598 	list_for_each_entry(ns, &ctrl->namespaces, list) {
3599 		if (ns->head->ns_id == nsid) {
3600 			if (!kref_get_unless_zero(&ns->kref))
3601 				continue;
3602 			ret = ns;
3603 			break;
3604 		}
3605 		if (ns->head->ns_id > nsid)
3606 			break;
3607 	}
3608 	up_read(&ctrl->namespaces_rwsem);
3609 	return ret;
3610 }
3611 
3612 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3613 {
3614 	struct nvme_ns *ns;
3615 	struct gendisk *disk;
3616 	struct nvme_id_ns *id;
3617 	char disk_name[DISK_NAME_LEN];
3618 	int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
3619 
3620 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
3621 	if (!ns)
3622 		return;
3623 
3624 	ns->queue = blk_mq_init_queue(ctrl->tagset);
3625 	if (IS_ERR(ns->queue))
3626 		goto out_free_ns;
3627 
3628 	if (ctrl->opts && ctrl->opts->data_digest)
3629 		ns->queue->backing_dev_info->capabilities
3630 			|= BDI_CAP_STABLE_WRITES;
3631 
3632 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
3633 	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
3634 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
3635 
3636 	ns->queue->queuedata = ns;
3637 	ns->ctrl = ctrl;
3638 
3639 	kref_init(&ns->kref);
3640 	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
3641 
3642 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
3643 	nvme_set_queue_limits(ctrl, ns->queue);
3644 
3645 	ret = nvme_identify_ns(ctrl, nsid, &id);
3646 	if (ret)
3647 		goto out_free_queue;
3648 
3649 	if (id->ncap == 0)	/* no namespace (legacy quirk) */
3650 		goto out_free_id;
3651 
3652 	ret = nvme_init_ns_head(ns, nsid, id);
3653 	if (ret)
3654 		goto out_free_id;
3655 	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3656 
3657 	disk = alloc_disk_node(0, node);
3658 	if (!disk)
3659 		goto out_unlink_ns;
3660 
3661 	disk->fops = &nvme_fops;
3662 	disk->private_data = ns;
3663 	disk->queue = ns->queue;
3664 	disk->flags = flags;
3665 	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
3666 	ns->disk = disk;
3667 
3668 	if (__nvme_revalidate_disk(disk, id))
3669 		goto out_put_disk;
3670 
3671 	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3672 		ret = nvme_nvm_register(ns, disk_name, node);
3673 		if (ret) {
3674 			dev_warn(ctrl->device, "LightNVM init failure\n");
3675 			goto out_put_disk;
3676 		}
3677 	}
3678 
3679 	down_write(&ctrl->namespaces_rwsem);
3680 	list_add_tail(&ns->list, &ctrl->namespaces);
3681 	up_write(&ctrl->namespaces_rwsem);
3682 
3683 	nvme_get_ctrl(ctrl);
3684 
3685 	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
3686 
3687 	nvme_mpath_add_disk(ns, id);
3688 	nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
3689 	kfree(id);
3690 
3691 	return;
3692  out_put_disk:
3693 	/* prevent double queue cleanup */
3694 	ns->disk->queue = NULL;
3695 	put_disk(ns->disk);
3696  out_unlink_ns:
3697 	mutex_lock(&ctrl->subsys->lock);
3698 	list_del_rcu(&ns->siblings);
3699 	if (list_empty(&ns->head->list))
3700 		list_del_init(&ns->head->entry);
3701 	mutex_unlock(&ctrl->subsys->lock);
3702 	nvme_put_ns_head(ns->head);
3703  out_free_id:
3704 	kfree(id);
3705  out_free_queue:
3706 	blk_cleanup_queue(ns->queue);
3707  out_free_ns:
3708 	kfree(ns);
3709 }
3710 
3711 static void nvme_ns_remove(struct nvme_ns *ns)
3712 {
3713 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
3714 		return;
3715 
3716 	nvme_fault_inject_fini(&ns->fault_inject);
3717 
3718 	mutex_lock(&ns->ctrl->subsys->lock);
3719 	list_del_rcu(&ns->siblings);
3720 	if (list_empty(&ns->head->list))
3721 		list_del_init(&ns->head->entry);
3722 	mutex_unlock(&ns->ctrl->subsys->lock);
3723 
3724 	synchronize_rcu(); /* guarantee not available in head->list */
3725 	nvme_mpath_clear_current_path(ns);
3726 	synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
3727 
3728 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3729 		del_gendisk(ns->disk);
3730 		blk_cleanup_queue(ns->queue);
3731 		if (blk_get_integrity(ns->disk))
3732 			blk_integrity_unregister(ns->disk);
3733 	}
3734 
3735 	down_write(&ns->ctrl->namespaces_rwsem);
3736 	list_del_init(&ns->list);
3737 	up_write(&ns->ctrl->namespaces_rwsem);
3738 
3739 	nvme_mpath_check_last_path(ns);
3740 	nvme_put_ns(ns);
3741 }
3742 
3743 static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
3744 {
3745 	struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
3746 
3747 	if (ns) {
3748 		nvme_ns_remove(ns);
3749 		nvme_put_ns(ns);
3750 	}
3751 }
3752 
3753 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3754 {
3755 	struct nvme_ns *ns;
3756 
3757 	ns = nvme_find_get_ns(ctrl, nsid);
3758 	if (ns) {
3759 		if (ns->disk && revalidate_disk(ns->disk))
3760 			nvme_ns_remove(ns);
3761 		nvme_put_ns(ns);
3762 	} else
3763 		nvme_alloc_ns(ctrl, nsid);
3764 }
3765 
3766 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3767 					unsigned nsid)
3768 {
3769 	struct nvme_ns *ns, *next;
3770 	LIST_HEAD(rm_list);
3771 
3772 	down_write(&ctrl->namespaces_rwsem);
3773 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3774 		if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3775 			list_move_tail(&ns->list, &rm_list);
3776 	}
3777 	up_write(&ctrl->namespaces_rwsem);
3778 
3779 	list_for_each_entry_safe(ns, next, &rm_list, list)
3780 		nvme_ns_remove(ns);
3781 
3782 }
3783 
3784 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
3785 {
3786 	const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
3787 	__le32 *ns_list;
3788 	u32 prev = 0;
3789 	int ret = 0, i;
3790 
3791 	if (nvme_ctrl_limited_cns(ctrl))
3792 		return -EOPNOTSUPP;
3793 
3794 	ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
3795 	if (!ns_list)
3796 		return -ENOMEM;
3797 
3798 	for (;;) {
3799 		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
3800 		if (ret)
3801 			goto free;
3802 
3803 		for (i = 0; i < nr_entries; i++) {
3804 			u32 nsid = le32_to_cpu(ns_list[i]);
3805 
3806 			if (!nsid)	/* end of the list? */
3807 				goto out;
3808 			nvme_validate_ns(ctrl, nsid);
3809 			while (++prev < nsid)
3810 				nvme_ns_remove_by_nsid(ctrl, prev);
3811 		}
3812 	}
3813  out:
3814 	nvme_remove_invalid_namespaces(ctrl, prev);
3815  free:
3816 	kfree(ns_list);
3817 	return ret;
3818 }
3819 
3820 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
3821 {
3822 	struct nvme_id_ctrl *id;
3823 	u32 nn, i;
3824 
3825 	if (nvme_identify_ctrl(ctrl, &id))
3826 		return;
3827 	nn = le32_to_cpu(id->nn);
3828 	kfree(id);
3829 
3830 	for (i = 1; i <= nn; i++)
3831 		nvme_validate_ns(ctrl, i);
3832 
3833 	nvme_remove_invalid_namespaces(ctrl, nn);
3834 }
3835 
3836 static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
3837 {
3838 	size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3839 	__le32 *log;
3840 	int error;
3841 
3842 	log = kzalloc(log_size, GFP_KERNEL);
3843 	if (!log)
3844 		return;
3845 
3846 	/*
3847 	 * We need to read the log to clear the AEN, but we don't want to rely
3848 	 * on it for the changed namespace information as userspace could have
3849 	 * raced with us in reading the log page, which could cause us to miss
3850 	 * updates.
3851 	 */
3852 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
3853 			log_size, 0);
3854 	if (error)
3855 		dev_warn(ctrl->device,
3856 			"reading changed ns log failed: %d\n", error);
3857 
3858 	kfree(log);
3859 }
3860 
3861 static void nvme_scan_work(struct work_struct *work)
3862 {
3863 	struct nvme_ctrl *ctrl =
3864 		container_of(work, struct nvme_ctrl, scan_work);
3865 
3866 	/* No tagset on a live ctrl means IO queues could not created */
3867 	if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
3868 		return;
3869 
3870 	if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
3871 		dev_info(ctrl->device, "rescanning namespaces.\n");
3872 		nvme_clear_changed_ns_log(ctrl);
3873 	}
3874 
3875 	mutex_lock(&ctrl->scan_lock);
3876 	if (nvme_scan_ns_list(ctrl) != 0)
3877 		nvme_scan_ns_sequential(ctrl);
3878 	mutex_unlock(&ctrl->scan_lock);
3879 
3880 	down_write(&ctrl->namespaces_rwsem);
3881 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
3882 	up_write(&ctrl->namespaces_rwsem);
3883 }
3884 
3885 /*
3886  * This function iterates the namespace list unlocked to allow recovery from
3887  * controller failure. It is up to the caller to ensure the namespace list is
3888  * not modified by scan work while this function is executing.
3889  */
3890 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3891 {
3892 	struct nvme_ns *ns, *next;
3893 	LIST_HEAD(ns_list);
3894 
3895 	/*
3896 	 * make sure to requeue I/O to all namespaces as these
3897 	 * might result from the scan itself and must complete
3898 	 * for the scan_work to make progress
3899 	 */
3900 	nvme_mpath_clear_ctrl_paths(ctrl);
3901 
3902 	/* prevent racing with ns scanning */
3903 	flush_work(&ctrl->scan_work);
3904 
3905 	/*
3906 	 * The dead states indicates the controller was not gracefully
3907 	 * disconnected. In that case, we won't be able to flush any data while
3908 	 * removing the namespaces' disks; fail all the queues now to avoid
3909 	 * potentially having to clean up the failed sync later.
3910 	 */
3911 	if (ctrl->state == NVME_CTRL_DEAD)
3912 		nvme_kill_queues(ctrl);
3913 
3914 	down_write(&ctrl->namespaces_rwsem);
3915 	list_splice_init(&ctrl->namespaces, &ns_list);
3916 	up_write(&ctrl->namespaces_rwsem);
3917 
3918 	list_for_each_entry_safe(ns, next, &ns_list, list)
3919 		nvme_ns_remove(ns);
3920 }
3921 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
3922 
3923 static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
3924 {
3925 	struct nvme_ctrl *ctrl =
3926 		container_of(dev, struct nvme_ctrl, ctrl_device);
3927 	struct nvmf_ctrl_options *opts = ctrl->opts;
3928 	int ret;
3929 
3930 	ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
3931 	if (ret)
3932 		return ret;
3933 
3934 	if (opts) {
3935 		ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
3936 		if (ret)
3937 			return ret;
3938 
3939 		ret = add_uevent_var(env, "NVME_TRSVCID=%s",
3940 				opts->trsvcid ?: "none");
3941 		if (ret)
3942 			return ret;
3943 
3944 		ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
3945 				opts->host_traddr ?: "none");
3946 	}
3947 	return ret;
3948 }
3949 
3950 static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
3951 {
3952 	char *envp[2] = { NULL, NULL };
3953 	u32 aen_result = ctrl->aen_result;
3954 
3955 	ctrl->aen_result = 0;
3956 	if (!aen_result)
3957 		return;
3958 
3959 	envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
3960 	if (!envp[0])
3961 		return;
3962 	kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
3963 	kfree(envp[0]);
3964 }
3965 
3966 static void nvme_async_event_work(struct work_struct *work)
3967 {
3968 	struct nvme_ctrl *ctrl =
3969 		container_of(work, struct nvme_ctrl, async_event_work);
3970 
3971 	nvme_aen_uevent(ctrl);
3972 	ctrl->ops->submit_async_event(ctrl);
3973 }
3974 
3975 static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
3976 {
3977 
3978 	u32 csts;
3979 
3980 	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
3981 		return false;
3982 
3983 	if (csts == ~0)
3984 		return false;
3985 
3986 	return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
3987 }
3988 
3989 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
3990 {
3991 	struct nvme_fw_slot_info_log *log;
3992 
3993 	log = kmalloc(sizeof(*log), GFP_KERNEL);
3994 	if (!log)
3995 		return;
3996 
3997 	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
3998 			sizeof(*log), 0))
3999 		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4000 	kfree(log);
4001 }
4002 
4003 static void nvme_fw_act_work(struct work_struct *work)
4004 {
4005 	struct nvme_ctrl *ctrl = container_of(work,
4006 				struct nvme_ctrl, fw_act_work);
4007 	unsigned long fw_act_timeout;
4008 
4009 	if (ctrl->mtfa)
4010 		fw_act_timeout = jiffies +
4011 				msecs_to_jiffies(ctrl->mtfa * 100);
4012 	else
4013 		fw_act_timeout = jiffies +
4014 				msecs_to_jiffies(admin_timeout * 1000);
4015 
4016 	nvme_stop_queues(ctrl);
4017 	while (nvme_ctrl_pp_status(ctrl)) {
4018 		if (time_after(jiffies, fw_act_timeout)) {
4019 			dev_warn(ctrl->device,
4020 				"Fw activation timeout, reset controller\n");
4021 			nvme_try_sched_reset(ctrl);
4022 			return;
4023 		}
4024 		msleep(100);
4025 	}
4026 
4027 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4028 		return;
4029 
4030 	nvme_start_queues(ctrl);
4031 	/* read FW slot information to clear the AER */
4032 	nvme_get_fw_slot_info(ctrl);
4033 }
4034 
4035 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4036 {
4037 	u32 aer_notice_type = (result & 0xff00) >> 8;
4038 
4039 	trace_nvme_async_event(ctrl, aer_notice_type);
4040 
4041 	switch (aer_notice_type) {
4042 	case NVME_AER_NOTICE_NS_CHANGED:
4043 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4044 		nvme_queue_scan(ctrl);
4045 		break;
4046 	case NVME_AER_NOTICE_FW_ACT_STARTING:
4047 		/*
4048 		 * We are (ab)using the RESETTING state to prevent subsequent
4049 		 * recovery actions from interfering with the controller's
4050 		 * firmware activation.
4051 		 */
4052 		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
4053 			queue_work(nvme_wq, &ctrl->fw_act_work);
4054 		break;
4055 #ifdef CONFIG_NVME_MULTIPATH
4056 	case NVME_AER_NOTICE_ANA:
4057 		if (!ctrl->ana_log_buf)
4058 			break;
4059 		queue_work(nvme_wq, &ctrl->ana_work);
4060 		break;
4061 #endif
4062 	case NVME_AER_NOTICE_DISC_CHANGED:
4063 		ctrl->aen_result = result;
4064 		break;
4065 	default:
4066 		dev_warn(ctrl->device, "async event result %08x\n", result);
4067 	}
4068 }
4069 
4070 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4071 		volatile union nvme_result *res)
4072 {
4073 	u32 result = le32_to_cpu(res->u32);
4074 	u32 aer_type = result & 0x07;
4075 
4076 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4077 		return;
4078 
4079 	switch (aer_type) {
4080 	case NVME_AER_NOTICE:
4081 		nvme_handle_aen_notice(ctrl, result);
4082 		break;
4083 	case NVME_AER_ERROR:
4084 	case NVME_AER_SMART:
4085 	case NVME_AER_CSS:
4086 	case NVME_AER_VS:
4087 		trace_nvme_async_event(ctrl, aer_type);
4088 		ctrl->aen_result = result;
4089 		break;
4090 	default:
4091 		break;
4092 	}
4093 	queue_work(nvme_wq, &ctrl->async_event_work);
4094 }
4095 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4096 
4097 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
4098 {
4099 	nvme_mpath_stop(ctrl);
4100 	nvme_stop_keep_alive(ctrl);
4101 	flush_work(&ctrl->async_event_work);
4102 	cancel_work_sync(&ctrl->fw_act_work);
4103 }
4104 EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
4105 
4106 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
4107 {
4108 	if (ctrl->kato)
4109 		nvme_start_keep_alive(ctrl);
4110 
4111 	nvme_enable_aen(ctrl);
4112 
4113 	if (ctrl->queue_count > 1) {
4114 		nvme_queue_scan(ctrl);
4115 		nvme_start_queues(ctrl);
4116 	}
4117 	ctrl->created = true;
4118 }
4119 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
4120 
4121 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
4122 {
4123 	nvme_fault_inject_fini(&ctrl->fault_inject);
4124 	dev_pm_qos_hide_latency_tolerance(ctrl->device);
4125 	cdev_device_del(&ctrl->cdev, ctrl->device);
4126 	nvme_put_ctrl(ctrl);
4127 }
4128 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
4129 
4130 static void nvme_free_ctrl(struct device *dev)
4131 {
4132 	struct nvme_ctrl *ctrl =
4133 		container_of(dev, struct nvme_ctrl, ctrl_device);
4134 	struct nvme_subsystem *subsys = ctrl->subsys;
4135 
4136 	if (subsys && ctrl->instance != subsys->instance)
4137 		ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4138 
4139 	kfree(ctrl->effects);
4140 	nvme_mpath_uninit(ctrl);
4141 	__free_page(ctrl->discard_page);
4142 
4143 	if (subsys) {
4144 		mutex_lock(&nvme_subsystems_lock);
4145 		list_del(&ctrl->subsys_entry);
4146 		sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
4147 		mutex_unlock(&nvme_subsystems_lock);
4148 	}
4149 
4150 	ctrl->ops->free_ctrl(ctrl);
4151 
4152 	if (subsys)
4153 		nvme_put_subsystem(subsys);
4154 }
4155 
4156 /*
4157  * Initialize a NVMe controller structures.  This needs to be called during
4158  * earliest initialization so that we have the initialized structured around
4159  * during probing.
4160  */
4161 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4162 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
4163 {
4164 	int ret;
4165 
4166 	ctrl->state = NVME_CTRL_NEW;
4167 	spin_lock_init(&ctrl->lock);
4168 	mutex_init(&ctrl->scan_lock);
4169 	INIT_LIST_HEAD(&ctrl->namespaces);
4170 	init_rwsem(&ctrl->namespaces_rwsem);
4171 	ctrl->dev = dev;
4172 	ctrl->ops = ops;
4173 	ctrl->quirks = quirks;
4174 	ctrl->numa_node = NUMA_NO_NODE;
4175 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
4176 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
4177 	INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
4178 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
4179 	init_waitqueue_head(&ctrl->state_wq);
4180 
4181 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
4182 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
4183 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
4184 
4185 	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
4186 			PAGE_SIZE);
4187 	ctrl->discard_page = alloc_page(GFP_KERNEL);
4188 	if (!ctrl->discard_page) {
4189 		ret = -ENOMEM;
4190 		goto out;
4191 	}
4192 
4193 	ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
4194 	if (ret < 0)
4195 		goto out;
4196 	ctrl->instance = ret;
4197 
4198 	device_initialize(&ctrl->ctrl_device);
4199 	ctrl->device = &ctrl->ctrl_device;
4200 	ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
4201 	ctrl->device->class = nvme_class;
4202 	ctrl->device->parent = ctrl->dev;
4203 	ctrl->device->groups = nvme_dev_attr_groups;
4204 	ctrl->device->release = nvme_free_ctrl;
4205 	dev_set_drvdata(ctrl->device, ctrl);
4206 	ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
4207 	if (ret)
4208 		goto out_release_instance;
4209 
4210 	nvme_get_ctrl(ctrl);
4211 	cdev_init(&ctrl->cdev, &nvme_dev_fops);
4212 	ctrl->cdev.owner = ops->module;
4213 	ret = cdev_device_add(&ctrl->cdev, ctrl->device);
4214 	if (ret)
4215 		goto out_free_name;
4216 
4217 	/*
4218 	 * Initialize latency tolerance controls.  The sysfs files won't
4219 	 * be visible to userspace unless the device actually supports APST.
4220 	 */
4221 	ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
4222 	dev_pm_qos_update_user_latency_tolerance(ctrl->device,
4223 		min(default_ps_max_latency_us, (unsigned long)S32_MAX));
4224 
4225 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
4226 
4227 	return 0;
4228 out_free_name:
4229 	nvme_put_ctrl(ctrl);
4230 	kfree_const(ctrl->device->kobj.name);
4231 out_release_instance:
4232 	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
4233 out:
4234 	if (ctrl->discard_page)
4235 		__free_page(ctrl->discard_page);
4236 	return ret;
4237 }
4238 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4239 
4240 /**
4241  * nvme_kill_queues(): Ends all namespace queues
4242  * @ctrl: the dead controller that needs to end
4243  *
4244  * Call this function when the driver determines it is unable to get the
4245  * controller in a state capable of servicing IO.
4246  */
4247 void nvme_kill_queues(struct nvme_ctrl *ctrl)
4248 {
4249 	struct nvme_ns *ns;
4250 
4251 	down_read(&ctrl->namespaces_rwsem);
4252 
4253 	/* Forcibly unquiesce queues to avoid blocking dispatch */
4254 	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
4255 		blk_mq_unquiesce_queue(ctrl->admin_q);
4256 
4257 	list_for_each_entry(ns, &ctrl->namespaces, list)
4258 		nvme_set_queue_dying(ns);
4259 
4260 	up_read(&ctrl->namespaces_rwsem);
4261 }
4262 EXPORT_SYMBOL_GPL(nvme_kill_queues);
4263 
4264 void nvme_unfreeze(struct nvme_ctrl *ctrl)
4265 {
4266 	struct nvme_ns *ns;
4267 
4268 	down_read(&ctrl->namespaces_rwsem);
4269 	list_for_each_entry(ns, &ctrl->namespaces, list)
4270 		blk_mq_unfreeze_queue(ns->queue);
4271 	up_read(&ctrl->namespaces_rwsem);
4272 }
4273 EXPORT_SYMBOL_GPL(nvme_unfreeze);
4274 
4275 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
4276 {
4277 	struct nvme_ns *ns;
4278 
4279 	down_read(&ctrl->namespaces_rwsem);
4280 	list_for_each_entry(ns, &ctrl->namespaces, list) {
4281 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
4282 		if (timeout <= 0)
4283 			break;
4284 	}
4285 	up_read(&ctrl->namespaces_rwsem);
4286 }
4287 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
4288 
4289 void nvme_wait_freeze(struct nvme_ctrl *ctrl)
4290 {
4291 	struct nvme_ns *ns;
4292 
4293 	down_read(&ctrl->namespaces_rwsem);
4294 	list_for_each_entry(ns, &ctrl->namespaces, list)
4295 		blk_mq_freeze_queue_wait(ns->queue);
4296 	up_read(&ctrl->namespaces_rwsem);
4297 }
4298 EXPORT_SYMBOL_GPL(nvme_wait_freeze);
4299 
4300 void nvme_start_freeze(struct nvme_ctrl *ctrl)
4301 {
4302 	struct nvme_ns *ns;
4303 
4304 	down_read(&ctrl->namespaces_rwsem);
4305 	list_for_each_entry(ns, &ctrl->namespaces, list)
4306 		blk_freeze_queue_start(ns->queue);
4307 	up_read(&ctrl->namespaces_rwsem);
4308 }
4309 EXPORT_SYMBOL_GPL(nvme_start_freeze);
4310 
4311 void nvme_stop_queues(struct nvme_ctrl *ctrl)
4312 {
4313 	struct nvme_ns *ns;
4314 
4315 	down_read(&ctrl->namespaces_rwsem);
4316 	list_for_each_entry(ns, &ctrl->namespaces, list)
4317 		blk_mq_quiesce_queue(ns->queue);
4318 	up_read(&ctrl->namespaces_rwsem);
4319 }
4320 EXPORT_SYMBOL_GPL(nvme_stop_queues);
4321 
4322 void nvme_start_queues(struct nvme_ctrl *ctrl)
4323 {
4324 	struct nvme_ns *ns;
4325 
4326 	down_read(&ctrl->namespaces_rwsem);
4327 	list_for_each_entry(ns, &ctrl->namespaces, list)
4328 		blk_mq_unquiesce_queue(ns->queue);
4329 	up_read(&ctrl->namespaces_rwsem);
4330 }
4331 EXPORT_SYMBOL_GPL(nvme_start_queues);
4332 
4333 
4334 void nvme_sync_queues(struct nvme_ctrl *ctrl)
4335 {
4336 	struct nvme_ns *ns;
4337 
4338 	down_read(&ctrl->namespaces_rwsem);
4339 	list_for_each_entry(ns, &ctrl->namespaces, list)
4340 		blk_sync_queue(ns->queue);
4341 	up_read(&ctrl->namespaces_rwsem);
4342 
4343 	if (ctrl->admin_q)
4344 		blk_sync_queue(ctrl->admin_q);
4345 }
4346 EXPORT_SYMBOL_GPL(nvme_sync_queues);
4347 
4348 /*
4349  * Check we didn't inadvertently grow the command structure sizes:
4350  */
4351 static inline void _nvme_check_size(void)
4352 {
4353 	BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
4354 	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
4355 	BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
4356 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
4357 	BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
4358 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
4359 	BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
4360 	BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
4361 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
4362 	BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
4363 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
4364 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
4365 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
4366 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
4367 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
4368 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
4369 	BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
4370 }
4371 
4372 
4373 static int __init nvme_core_init(void)
4374 {
4375 	int result = -ENOMEM;
4376 
4377 	_nvme_check_size();
4378 
4379 	nvme_wq = alloc_workqueue("nvme-wq",
4380 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4381 	if (!nvme_wq)
4382 		goto out;
4383 
4384 	nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
4385 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4386 	if (!nvme_reset_wq)
4387 		goto destroy_wq;
4388 
4389 	nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
4390 			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
4391 	if (!nvme_delete_wq)
4392 		goto destroy_reset_wq;
4393 
4394 	result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
4395 	if (result < 0)
4396 		goto destroy_delete_wq;
4397 
4398 	nvme_class = class_create(THIS_MODULE, "nvme");
4399 	if (IS_ERR(nvme_class)) {
4400 		result = PTR_ERR(nvme_class);
4401 		goto unregister_chrdev;
4402 	}
4403 	nvme_class->dev_uevent = nvme_class_uevent;
4404 
4405 	nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
4406 	if (IS_ERR(nvme_subsys_class)) {
4407 		result = PTR_ERR(nvme_subsys_class);
4408 		goto destroy_class;
4409 	}
4410 	return 0;
4411 
4412 destroy_class:
4413 	class_destroy(nvme_class);
4414 unregister_chrdev:
4415 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4416 destroy_delete_wq:
4417 	destroy_workqueue(nvme_delete_wq);
4418 destroy_reset_wq:
4419 	destroy_workqueue(nvme_reset_wq);
4420 destroy_wq:
4421 	destroy_workqueue(nvme_wq);
4422 out:
4423 	return result;
4424 }
4425 
4426 static void __exit nvme_core_exit(void)
4427 {
4428 	class_destroy(nvme_subsys_class);
4429 	class_destroy(nvme_class);
4430 	unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
4431 	destroy_workqueue(nvme_delete_wq);
4432 	destroy_workqueue(nvme_reset_wq);
4433 	destroy_workqueue(nvme_wq);
4434 	ida_destroy(&nvme_instance_ida);
4435 }
4436 
4437 MODULE_LICENSE("GPL");
4438 MODULE_VERSION("1.0");
4439 module_init(nvme_core_init);
4440 module_exit(nvme_core_exit);
4441