xref: /linux/drivers/nvme/target/admin-cmd.c (revision 1cbfb828e05171ca2dd77b5988d068e6872480fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <linux/unaligned.h>
13 #include "nvmet.h"
14 
15 static void nvmet_execute_delete_sq(struct nvmet_req *req)
16 {
17 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
18 	u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
19 	u16 status;
20 
21 	if (!nvmet_is_pci_ctrl(ctrl)) {
22 		status = nvmet_report_invalid_opcode(req);
23 		goto complete;
24 	}
25 
26 	if (!sqid) {
27 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
28 		goto complete;
29 	}
30 
31 	status = nvmet_check_sqid(ctrl, sqid, false);
32 	if (status != NVME_SC_SUCCESS)
33 		goto complete;
34 
35 	status = ctrl->ops->delete_sq(ctrl, sqid);
36 
37 complete:
38 	nvmet_req_complete(req, status);
39 }
40 
41 static void nvmet_execute_create_sq(struct nvmet_req *req)
42 {
43 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
44 	struct nvme_command *cmd = req->cmd;
45 	u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
46 	u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
47 	u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
48 	u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
49 	u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
50 	u16 status;
51 
52 	if (!nvmet_is_pci_ctrl(ctrl)) {
53 		status = nvmet_report_invalid_opcode(req);
54 		goto complete;
55 	}
56 
57 	if (!sqid) {
58 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
59 		goto complete;
60 	}
61 
62 	status = nvmet_check_sqid(ctrl, sqid, true);
63 	if (status != NVME_SC_SUCCESS)
64 		goto complete;
65 
66 	/*
67 	 * Note: The NVMe specification allows multiple SQs to use the same CQ.
68 	 * However, the target code does not really support that. So for now,
69 	 * prevent this and fail the command if sqid and cqid are different.
70 	 */
71 	if (!cqid || cqid != sqid) {
72 		pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid);
73 		status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR;
74 		goto complete;
75 	}
76 
77 	if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
78 		status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
79 		goto complete;
80 	}
81 
82 	status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1);
83 
84 complete:
85 	nvmet_req_complete(req, status);
86 }
87 
88 static void nvmet_execute_delete_cq(struct nvmet_req *req)
89 {
90 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
91 	u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
92 	u16 status;
93 
94 	if (!nvmet_is_pci_ctrl(ctrl)) {
95 		status = nvmet_report_invalid_opcode(req);
96 		goto complete;
97 	}
98 
99 	if (!cqid) {
100 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
101 		goto complete;
102 	}
103 
104 	status = nvmet_check_cqid(ctrl, cqid);
105 	if (status != NVME_SC_SUCCESS)
106 		goto complete;
107 
108 	status = ctrl->ops->delete_cq(ctrl, cqid);
109 
110 complete:
111 	nvmet_req_complete(req, status);
112 }
113 
114 static void nvmet_execute_create_cq(struct nvmet_req *req)
115 {
116 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
117 	struct nvme_command *cmd = req->cmd;
118 	u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
119 	u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
120 	u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
121 	u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
122 	u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
123 	u16 status;
124 
125 	if (!nvmet_is_pci_ctrl(ctrl)) {
126 		status = nvmet_report_invalid_opcode(req);
127 		goto complete;
128 	}
129 
130 	if (!cqid) {
131 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
132 		goto complete;
133 	}
134 
135 	status = nvmet_check_cqid(ctrl, cqid);
136 	if (status != NVME_SC_SUCCESS)
137 		goto complete;
138 
139 	if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
140 		status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
141 		goto complete;
142 	}
143 
144 	status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
145 				      prp1, irq_vector);
146 
147 complete:
148 	nvmet_req_complete(req, status);
149 }
150 
151 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
152 {
153 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
154 
155 	len <<= 16;
156 	len += le16_to_cpu(cmd->get_log_page.numdl);
157 	/* NUMD is a 0's based value */
158 	len += 1;
159 	len *= sizeof(u32);
160 
161 	return len;
162 }
163 
164 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
165 {
166 	switch (cdw10 & 0xff) {
167 	case NVME_FEAT_HOST_ID:
168 		return sizeof(req->sq->ctrl->hostid);
169 	default:
170 		return 0;
171 	}
172 }
173 
174 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
175 {
176 	return le64_to_cpu(cmd->get_log_page.lpo);
177 }
178 
179 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
180 {
181 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
182 }
183 
184 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
185 {
186 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
187 	unsigned long flags;
188 	off_t offset = 0;
189 	u64 slot;
190 	u64 i;
191 
192 	spin_lock_irqsave(&ctrl->error_lock, flags);
193 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
194 
195 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
196 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
197 				sizeof(struct nvme_error_slot)))
198 			break;
199 
200 		if (slot == 0)
201 			slot = NVMET_ERROR_LOG_SLOTS - 1;
202 		else
203 			slot--;
204 		offset += sizeof(struct nvme_error_slot);
205 	}
206 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
207 	nvmet_req_complete(req, 0);
208 }
209 
210 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
211 {
212 	struct nvme_supported_log *logs;
213 	u16 status;
214 
215 	logs = kzalloc(sizeof(*logs), GFP_KERNEL);
216 	if (!logs) {
217 		status = NVME_SC_INTERNAL;
218 		goto out;
219 	}
220 
221 	logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
222 	logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
223 	logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
224 	logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
225 	logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
226 	logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
227 	logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
228 	logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
229 	logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
230 	logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
231 	logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
232 
233 	status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
234 	kfree(logs);
235 out:
236 	nvmet_req_complete(req, status);
237 }
238 
239 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
240 		struct nvme_smart_log *slog)
241 {
242 	u64 host_reads, host_writes, data_units_read, data_units_written;
243 	u16 status;
244 
245 	status = nvmet_req_find_ns(req);
246 	if (status)
247 		return status;
248 
249 	/* we don't have the right data for file backed ns */
250 	if (!req->ns->bdev)
251 		return NVME_SC_SUCCESS;
252 
253 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
254 	data_units_read =
255 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
256 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
257 	data_units_written =
258 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
259 
260 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
261 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
262 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
263 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
264 
265 	return NVME_SC_SUCCESS;
266 }
267 
268 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
269 		struct nvme_smart_log *slog)
270 {
271 	u64 host_reads = 0, host_writes = 0;
272 	u64 data_units_read = 0, data_units_written = 0;
273 	struct nvmet_ns *ns;
274 	struct nvmet_ctrl *ctrl;
275 	unsigned long idx;
276 
277 	ctrl = req->sq->ctrl;
278 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
279 		/* we don't have the right data for file backed ns */
280 		if (!ns->bdev)
281 			continue;
282 		host_reads += part_stat_read(ns->bdev, ios[READ]);
283 		data_units_read += DIV_ROUND_UP(
284 			part_stat_read(ns->bdev, sectors[READ]), 1000);
285 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
286 		data_units_written += DIV_ROUND_UP(
287 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
288 	}
289 
290 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
291 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
292 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
293 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
294 
295 	return NVME_SC_SUCCESS;
296 }
297 
298 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
299 {
300 	struct nvme_rotational_media_log *log;
301 	struct gendisk *disk;
302 	u16 status;
303 
304 	req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
305 					    req->cmd->get_log_page.lsi));
306 	status = nvmet_req_find_ns(req);
307 	if (status)
308 		goto out;
309 
310 	if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
311 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
312 		goto out;
313 	}
314 
315 	if (req->transfer_len != sizeof(*log)) {
316 		status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
317 		goto out;
318 	}
319 
320 	log = kzalloc(sizeof(*log), GFP_KERNEL);
321 	if (!log)
322 		goto out;
323 
324 	log->endgid = req->cmd->get_log_page.lsi;
325 	disk = req->ns->bdev->bd_disk;
326 	if (disk && disk->ia_ranges)
327 		log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
328 	else
329 		log->numa = cpu_to_le16(1);
330 
331 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
332 	kfree(log);
333 out:
334 	nvmet_req_complete(req, status);
335 }
336 
337 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
338 {
339 	struct nvme_smart_log *log;
340 	u16 status = NVME_SC_INTERNAL;
341 	unsigned long flags;
342 
343 	if (req->transfer_len != sizeof(*log))
344 		goto out;
345 
346 	log = kzalloc(sizeof(*log), GFP_KERNEL);
347 	if (!log)
348 		goto out;
349 
350 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
351 		status = nvmet_get_smart_log_all(req, log);
352 	else
353 		status = nvmet_get_smart_log_nsid(req, log);
354 	if (status)
355 		goto out_free_log;
356 
357 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
358 	put_unaligned_le64(req->sq->ctrl->err_counter,
359 			&log->num_err_log_entries);
360 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
361 
362 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
363 out_free_log:
364 	kfree(log);
365 out:
366 	nvmet_req_complete(req, status);
367 }
368 
369 static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
370 					struct nvme_effects_log *log)
371 {
372 	/* For a PCI target controller, advertize support for the . */
373 	if (nvmet_is_pci_ctrl(ctrl)) {
374 		log->acs[nvme_admin_delete_sq] =
375 		log->acs[nvme_admin_create_sq] =
376 		log->acs[nvme_admin_delete_cq] =
377 		log->acs[nvme_admin_create_cq] =
378 			cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
379 	}
380 
381 	log->acs[nvme_admin_get_log_page] =
382 	log->acs[nvme_admin_identify] =
383 	log->acs[nvme_admin_abort_cmd] =
384 	log->acs[nvme_admin_set_features] =
385 	log->acs[nvme_admin_get_features] =
386 	log->acs[nvme_admin_async_event] =
387 	log->acs[nvme_admin_keep_alive] =
388 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
389 }
390 
391 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
392 {
393 	log->iocs[nvme_cmd_read] =
394 	log->iocs[nvme_cmd_flush] =
395 	log->iocs[nvme_cmd_dsm]	=
396 	log->iocs[nvme_cmd_resv_acquire] =
397 	log->iocs[nvme_cmd_resv_register] =
398 	log->iocs[nvme_cmd_resv_release] =
399 	log->iocs[nvme_cmd_resv_report] =
400 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
401 	log->iocs[nvme_cmd_write] =
402 	log->iocs[nvme_cmd_write_zeroes] =
403 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
404 }
405 
406 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
407 {
408 	log->iocs[nvme_cmd_zone_append] =
409 	log->iocs[nvme_cmd_zone_mgmt_send] =
410 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
411 	log->iocs[nvme_cmd_zone_mgmt_recv] =
412 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
413 }
414 
415 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
416 {
417 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
418 	struct nvme_effects_log *log;
419 	u16 status = NVME_SC_SUCCESS;
420 
421 	log = kzalloc(sizeof(*log), GFP_KERNEL);
422 	if (!log) {
423 		status = NVME_SC_INTERNAL;
424 		goto out;
425 	}
426 
427 	switch (req->cmd->get_log_page.csi) {
428 	case NVME_CSI_NVM:
429 		nvmet_get_cmd_effects_admin(ctrl, log);
430 		nvmet_get_cmd_effects_nvm(log);
431 		break;
432 	case NVME_CSI_ZNS:
433 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
434 			status = NVME_SC_INVALID_IO_CMD_SET;
435 			goto free;
436 		}
437 		nvmet_get_cmd_effects_admin(ctrl, log);
438 		nvmet_get_cmd_effects_nvm(log);
439 		nvmet_get_cmd_effects_zns(log);
440 		break;
441 	default:
442 		status = NVME_SC_INVALID_LOG_PAGE;
443 		goto free;
444 	}
445 
446 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
447 free:
448 	kfree(log);
449 out:
450 	nvmet_req_complete(req, status);
451 }
452 
453 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
454 {
455 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
456 	u16 status = NVME_SC_INTERNAL;
457 	size_t len;
458 
459 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
460 		goto out;
461 
462 	mutex_lock(&ctrl->lock);
463 	if (ctrl->nr_changed_ns == U32_MAX)
464 		len = sizeof(__le32);
465 	else
466 		len = ctrl->nr_changed_ns * sizeof(__le32);
467 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
468 	if (!status)
469 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
470 	ctrl->nr_changed_ns = 0;
471 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
472 	mutex_unlock(&ctrl->lock);
473 out:
474 	nvmet_req_complete(req, status);
475 }
476 
477 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
478 		struct nvme_ana_group_desc *desc)
479 {
480 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
481 	struct nvmet_ns *ns;
482 	unsigned long idx;
483 	u32 count = 0;
484 
485 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
486 		nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
487 			if (ns->anagrpid == grpid)
488 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
489 		}
490 	}
491 
492 	desc->grpid = cpu_to_le32(grpid);
493 	desc->nnsids = cpu_to_le32(count);
494 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
495 	desc->state = req->port->ana_state[grpid];
496 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
497 	return struct_size(desc, nsids, count);
498 }
499 
500 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
501 {
502 	u64 host_reads, host_writes, data_units_read, data_units_written;
503 	struct nvme_endurance_group_log *log;
504 	u16 status;
505 
506 	/*
507 	 * The target driver emulates each endurance group as its own
508 	 * namespace, reusing the nsid as the endurance group identifier.
509 	 */
510 	req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
511 					    req->cmd->get_log_page.lsi));
512 	status = nvmet_req_find_ns(req);
513 	if (status)
514 		goto out;
515 
516 	log = kzalloc(sizeof(*log), GFP_KERNEL);
517 	if (!log) {
518 		status = NVME_SC_INTERNAL;
519 		goto out;
520 	}
521 
522 	if (!req->ns->bdev)
523 		goto copy;
524 
525 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
526 	data_units_read =
527 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
528 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
529 	data_units_written =
530 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
531 
532 	put_unaligned_le64(host_reads, &log->hrc[0]);
533 	put_unaligned_le64(data_units_read, &log->dur[0]);
534 	put_unaligned_le64(host_writes, &log->hwc[0]);
535 	put_unaligned_le64(data_units_written, &log->duw[0]);
536 copy:
537 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
538 	kfree(log);
539 out:
540 	nvmet_req_complete(req, status);
541 }
542 
543 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
544 {
545 	struct nvme_ana_rsp_hdr hdr = { 0, };
546 	struct nvme_ana_group_desc *desc;
547 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
548 	size_t len;
549 	u32 grpid;
550 	u16 ngrps = 0;
551 	u16 status;
552 
553 	status = NVME_SC_INTERNAL;
554 	desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
555 		       GFP_KERNEL);
556 	if (!desc)
557 		goto out;
558 
559 	down_read(&nvmet_ana_sem);
560 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
561 		if (!nvmet_ana_group_enabled[grpid])
562 			continue;
563 		len = nvmet_format_ana_group(req, grpid, desc);
564 		status = nvmet_copy_to_sgl(req, offset, desc, len);
565 		if (status)
566 			break;
567 		offset += len;
568 		ngrps++;
569 	}
570 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
571 		if (nvmet_ana_group_enabled[grpid])
572 			ngrps++;
573 	}
574 
575 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
576 	hdr.ngrps = cpu_to_le16(ngrps);
577 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
578 	up_read(&nvmet_ana_sem);
579 
580 	kfree(desc);
581 
582 	/* copy the header last once we know the number of groups */
583 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
584 out:
585 	nvmet_req_complete(req, status);
586 }
587 
588 static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
589 {
590 	struct nvme_supported_features_log *features;
591 	u16 status;
592 
593 	features = kzalloc(sizeof(*features), GFP_KERNEL);
594 	if (!features) {
595 		status = NVME_SC_INTERNAL;
596 		goto out;
597 	}
598 
599 	features->fis[NVME_FEAT_NUM_QUEUES] =
600 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
601 	features->fis[NVME_FEAT_KATO] =
602 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
603 	features->fis[NVME_FEAT_ASYNC_EVENT] =
604 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
605 	features->fis[NVME_FEAT_HOST_ID] =
606 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
607 	features->fis[NVME_FEAT_WRITE_PROTECT] =
608 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
609 	features->fis[NVME_FEAT_RESV_MASK] =
610 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
611 
612 	status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
613 	kfree(features);
614 out:
615 	nvmet_req_complete(req, status);
616 }
617 
618 static void nvmet_execute_get_log_page(struct nvmet_req *req)
619 {
620 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
621 		return;
622 
623 	switch (req->cmd->get_log_page.lid) {
624 	case NVME_LOG_SUPPORTED:
625 		return nvmet_execute_get_supported_log_pages(req);
626 	case NVME_LOG_ERROR:
627 		return nvmet_execute_get_log_page_error(req);
628 	case NVME_LOG_SMART:
629 		return nvmet_execute_get_log_page_smart(req);
630 	case NVME_LOG_FW_SLOT:
631 		/*
632 		 * We only support a single firmware slot which always is
633 		 * active, so we can zero out the whole firmware slot log and
634 		 * still claim to fully implement this mandatory log page.
635 		 */
636 		return nvmet_execute_get_log_page_noop(req);
637 	case NVME_LOG_CHANGED_NS:
638 		return nvmet_execute_get_log_changed_ns(req);
639 	case NVME_LOG_CMD_EFFECTS:
640 		return nvmet_execute_get_log_cmd_effects_ns(req);
641 	case NVME_LOG_ENDURANCE_GROUP:
642 		return nvmet_execute_get_log_page_endgrp(req);
643 	case NVME_LOG_ANA:
644 		return nvmet_execute_get_log_page_ana(req);
645 	case NVME_LOG_FEATURES:
646 		return nvmet_execute_get_log_page_features(req);
647 	case NVME_LOG_RMI:
648 		return nvmet_execute_get_log_page_rmi(req);
649 	case NVME_LOG_RESERVATION:
650 		return nvmet_execute_get_log_page_resv(req);
651 	}
652 	pr_debug("unhandled lid %d on qid %d\n",
653 	       req->cmd->get_log_page.lid, req->sq->qid);
654 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
655 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
656 }
657 
658 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
659 {
660 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
661 	struct nvmet_subsys *subsys = ctrl->subsys;
662 	struct nvme_id_ctrl *id;
663 	u32 cmd_capsule_size, ctratt;
664 	u16 status = 0;
665 
666 	if (!subsys->subsys_discovered) {
667 		mutex_lock(&subsys->lock);
668 		subsys->subsys_discovered = true;
669 		mutex_unlock(&subsys->lock);
670 	}
671 
672 	id = kzalloc(sizeof(*id), GFP_KERNEL);
673 	if (!id) {
674 		status = NVME_SC_INTERNAL;
675 		goto out;
676 	}
677 
678 	id->vid = cpu_to_le16(subsys->vendor_id);
679 	id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
680 
681 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
682 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
683 		       strlen(subsys->model_number), ' ');
684 	memcpy_and_pad(id->fr, sizeof(id->fr),
685 		       subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
686 
687 	put_unaligned_le24(subsys->ieee_oui, id->ieee);
688 
689 	id->rab = 6;
690 
691 	if (nvmet_is_disc_subsys(ctrl->subsys))
692 		id->cntrltype = NVME_CTRL_DISC;
693 	else
694 		id->cntrltype = NVME_CTRL_IO;
695 
696 	/* we support multiple ports, multiples hosts and ANA: */
697 	id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
698 		NVME_CTRL_CMIC_ANA;
699 
700 	/* Limit MDTS according to transport capability */
701 	if (ctrl->ops->get_mdts)
702 		id->mdts = ctrl->ops->get_mdts(ctrl);
703 	else
704 		id->mdts = 0;
705 
706 	id->cntlid = cpu_to_le16(ctrl->cntlid);
707 	id->ver = cpu_to_le32(ctrl->subsys->ver);
708 
709 	/* XXX: figure out what to do about RTD3R/RTD3 */
710 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
711 	ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
712 	if (nvmet_is_pci_ctrl(ctrl))
713 		ctratt |= NVME_CTRL_ATTR_RHII;
714 	id->ctratt = cpu_to_le32(ctratt);
715 
716 	id->oacs = 0;
717 
718 	/*
719 	 * We don't really have a practical limit on the number of abort
720 	 * comands.  But we don't do anything useful for abort either, so
721 	 * no point in allowing more abort commands than the spec requires.
722 	 */
723 	id->acl = 3;
724 
725 	id->aerl = NVMET_ASYNC_EVENTS - 1;
726 
727 	/* first slot is read-only, only one slot supported */
728 	id->frmw = (1 << 0) | (1 << 1);
729 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
730 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
731 	id->npss = 0;
732 
733 	/* We support keep-alive timeout in granularity of seconds */
734 	id->kas = cpu_to_le16(NVMET_KAS);
735 
736 	id->sqes = (0x6 << 4) | 0x6;
737 	id->cqes = (0x4 << 4) | 0x4;
738 
739 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
740 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
741 
742 	id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
743 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
744 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
745 			NVME_CTRL_ONCS_WRITE_ZEROES |
746 			NVME_CTRL_ONCS_RESERVATIONS);
747 
748 	/* XXX: don't report vwc if the underlying device is write through */
749 	id->vwc = NVME_CTRL_VWC_PRESENT;
750 
751 	/*
752 	 * We can't support atomic writes bigger than a LBA without support
753 	 * from the backend device.
754 	 */
755 	id->awun = 0;
756 	id->awupf = 0;
757 
758 	/* we always support SGLs */
759 	id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
760 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
761 		id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
762 	if (req->port->inline_data_size)
763 		id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
764 
765 	strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
766 
767 	/*
768 	 * Max command capsule size is sqe + in-capsule data size.
769 	 * Disable in-capsule data for Metadata capable controllers.
770 	 */
771 	cmd_capsule_size = sizeof(struct nvme_command);
772 	if (!ctrl->pi_support)
773 		cmd_capsule_size += req->port->inline_data_size;
774 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
775 
776 	/* Max response capsule size is cqe */
777 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
778 
779 	id->msdbd = ctrl->ops->msdbd;
780 
781 	/*
782 	 * Endurance group identifier is 16 bits, so we can't let namespaces
783 	 * overflow that since we reuse the nsid
784 	 */
785 	BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
786 	id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
787 
788 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
789 	id->anatt = 10; /* random value */
790 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
791 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
792 
793 	/*
794 	 * Meh, we don't really support any power state.  Fake up the same
795 	 * values that qemu does.
796 	 */
797 	id->psd[0].max_power = cpu_to_le16(0x9c4);
798 	id->psd[0].entry_lat = cpu_to_le32(0x10);
799 	id->psd[0].exit_lat = cpu_to_le32(0x4);
800 
801 	id->nwpc = 1 << 0; /* write protect and no write protect */
802 
803 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
804 
805 	kfree(id);
806 out:
807 	nvmet_req_complete(req, status);
808 }
809 
810 static void nvmet_execute_identify_ns(struct nvmet_req *req)
811 {
812 	struct nvme_id_ns *id;
813 	u16 status;
814 
815 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
816 		req->error_loc = offsetof(struct nvme_identify, nsid);
817 		status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
818 		goto out;
819 	}
820 
821 	id = kzalloc(sizeof(*id), GFP_KERNEL);
822 	if (!id) {
823 		status = NVME_SC_INTERNAL;
824 		goto out;
825 	}
826 
827 	/* return an all zeroed buffer if we can't find an active namespace */
828 	status = nvmet_req_find_ns(req);
829 	if (status) {
830 		status = 0;
831 		goto done;
832 	}
833 
834 	if (nvmet_ns_revalidate(req->ns)) {
835 		mutex_lock(&req->ns->subsys->lock);
836 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
837 		mutex_unlock(&req->ns->subsys->lock);
838 	}
839 
840 	/*
841 	 * nuse = ncap = nsze isn't always true, but we have no way to find
842 	 * that out from the underlying device.
843 	 */
844 	id->ncap = id->nsze =
845 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
846 	switch (req->port->ana_state[req->ns->anagrpid]) {
847 	case NVME_ANA_INACCESSIBLE:
848 	case NVME_ANA_PERSISTENT_LOSS:
849 		break;
850 	default:
851 		id->nuse = id->nsze;
852 		break;
853 	}
854 
855 	if (req->ns->bdev)
856 		nvmet_bdev_set_limits(req->ns->bdev, id);
857 
858 	/*
859 	 * We just provide a single LBA format that matches what the
860 	 * underlying device reports.
861 	 */
862 	id->nlbaf = 0;
863 	id->flbas = 0;
864 
865 	/*
866 	 * Our namespace might always be shared.  Not just with other
867 	 * controllers, but also with any other user of the block device.
868 	 */
869 	id->nmic = NVME_NS_NMIC_SHARED;
870 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
871 
872 	if (req->ns->pr.enable)
873 		id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
874 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
875 			NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
876 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
877 			NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
878 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
879 			NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
880 
881 	/*
882 	 * Since we don't know any better, every namespace is its own endurance
883 	 * group.
884 	 */
885 	id->endgid = cpu_to_le16(req->ns->nsid);
886 
887 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
888 
889 	id->lbaf[0].ds = req->ns->blksize_shift;
890 
891 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
892 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
893 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
894 			  NVME_NS_DPC_PI_TYPE3;
895 		id->mc = NVME_MC_EXTENDED_LBA;
896 		id->dps = req->ns->pi_type;
897 		id->flbas = NVME_NS_FLBAS_META_EXT;
898 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
899 	}
900 
901 	if (req->ns->readonly)
902 		id->nsattr |= NVME_NS_ATTR_RO;
903 done:
904 	if (!status)
905 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
906 
907 	kfree(id);
908 out:
909 	nvmet_req_complete(req, status);
910 }
911 
912 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
913 {
914 	u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
915 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
916 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
917 	struct nvmet_ns *ns;
918 	unsigned long idx;
919 	__le16 *list;
920 	u16 status;
921 	int i = 1;
922 
923 	list = kzalloc(buf_size, GFP_KERNEL);
924 	if (!list) {
925 		status = NVME_SC_INTERNAL;
926 		goto out;
927 	}
928 
929 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
930 		if (ns->nsid <= min_endgid)
931 			continue;
932 
933 		list[i++] = cpu_to_le16(ns->nsid);
934 		if (i == buf_size / sizeof(__le16))
935 			break;
936 	}
937 
938 	list[0] = cpu_to_le16(i - 1);
939 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
940 	kfree(list);
941 out:
942 	nvmet_req_complete(req, status);
943 }
944 
945 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
946 {
947 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
948 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
949 	struct nvmet_ns *ns;
950 	unsigned long idx;
951 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
952 	__le32 *list;
953 	u16 status = 0;
954 	int i = 0;
955 
956 	/*
957 	 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
958 	 * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
959 	 */
960 	if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
961 		req->error_loc = offsetof(struct nvme_identify, nsid);
962 		status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
963 		goto out;
964 	}
965 
966 	list = kzalloc(buf_size, GFP_KERNEL);
967 	if (!list) {
968 		status = NVME_SC_INTERNAL;
969 		goto out;
970 	}
971 
972 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
973 		if (ns->nsid <= min_nsid)
974 			continue;
975 		if (match_css && req->ns->csi != req->cmd->identify.csi)
976 			continue;
977 		list[i++] = cpu_to_le32(ns->nsid);
978 		if (i == buf_size / sizeof(__le32))
979 			break;
980 	}
981 
982 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
983 
984 	kfree(list);
985 out:
986 	nvmet_req_complete(req, status);
987 }
988 
989 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
990 				    void *id, off_t *off)
991 {
992 	struct nvme_ns_id_desc desc = {
993 		.nidt = type,
994 		.nidl = len,
995 	};
996 	u16 status;
997 
998 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
999 	if (status)
1000 		return status;
1001 	*off += sizeof(desc);
1002 
1003 	status = nvmet_copy_to_sgl(req, *off, id, len);
1004 	if (status)
1005 		return status;
1006 	*off += len;
1007 
1008 	return 0;
1009 }
1010 
1011 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
1012 {
1013 	off_t off = 0;
1014 	u16 status;
1015 
1016 	status = nvmet_req_find_ns(req);
1017 	if (status)
1018 		goto out;
1019 
1020 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
1021 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
1022 						  NVME_NIDT_UUID_LEN,
1023 						  &req->ns->uuid, &off);
1024 		if (status)
1025 			goto out;
1026 	}
1027 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
1028 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
1029 						  NVME_NIDT_NGUID_LEN,
1030 						  &req->ns->nguid, &off);
1031 		if (status)
1032 			goto out;
1033 	}
1034 
1035 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
1036 					  NVME_NIDT_CSI_LEN,
1037 					  &req->ns->csi, &off);
1038 	if (status)
1039 		goto out;
1040 
1041 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
1042 			off) != NVME_IDENTIFY_DATA_SIZE - off)
1043 		status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1044 
1045 out:
1046 	nvmet_req_complete(req, status);
1047 }
1048 
1049 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
1050 {
1051 	/* Not supported: return zeroes */
1052 	nvmet_req_complete(req,
1053 		   nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
1054 }
1055 
1056 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
1057 {
1058 	u16 status;
1059 	struct nvme_id_ns_nvm *id;
1060 
1061 	status = nvmet_req_find_ns(req);
1062 	if (status)
1063 		goto out;
1064 
1065 	id = kzalloc(sizeof(*id), GFP_KERNEL);
1066 	if (!id) {
1067 		status = NVME_SC_INTERNAL;
1068 		goto out;
1069 	}
1070 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1071 out:
1072 	nvmet_req_complete(req, status);
1073 }
1074 
1075 static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
1076 {
1077 	struct nvme_id_ns_cs_indep *id;
1078 	u16 status;
1079 
1080 	status = nvmet_req_find_ns(req);
1081 	if (status)
1082 		goto out;
1083 
1084 	id = kzalloc(sizeof(*id), GFP_KERNEL);
1085 	if (!id) {
1086 		status = NVME_SC_INTERNAL;
1087 		goto out;
1088 	}
1089 
1090 	id->nstat = NVME_NSTAT_NRDY;
1091 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
1092 	id->nmic = NVME_NS_NMIC_SHARED;
1093 	if (req->ns->readonly)
1094 		id->nsattr |= NVME_NS_ATTR_RO;
1095 	if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
1096 		id->nsfeat |= NVME_NS_ROTATIONAL;
1097 	/*
1098 	 * We need flush command to flush the file's metadata,
1099 	 * so report supporting vwc if backend is file, even
1100 	 * though buffered_io is disable.
1101 	 */
1102 	if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
1103 		id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
1104 
1105 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1106 	kfree(id);
1107 out:
1108 	nvmet_req_complete(req, status);
1109 }
1110 
1111 static void nvmet_execute_identify(struct nvmet_req *req)
1112 {
1113 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
1114 		return;
1115 
1116 	switch (req->cmd->identify.cns) {
1117 	case NVME_ID_CNS_NS:
1118 		nvmet_execute_identify_ns(req);
1119 		return;
1120 	case NVME_ID_CNS_CTRL:
1121 		nvmet_execute_identify_ctrl(req);
1122 		return;
1123 	case NVME_ID_CNS_NS_ACTIVE_LIST:
1124 		nvmet_execute_identify_nslist(req, false);
1125 		return;
1126 	case NVME_ID_CNS_NS_DESC_LIST:
1127 		nvmet_execute_identify_desclist(req);
1128 		return;
1129 	case NVME_ID_CNS_CS_NS:
1130 		switch (req->cmd->identify.csi) {
1131 		case NVME_CSI_NVM:
1132 			nvme_execute_identify_ns_nvm(req);
1133 			return;
1134 		case NVME_CSI_ZNS:
1135 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1136 				nvmet_execute_identify_ns_zns(req);
1137 				return;
1138 			}
1139 			break;
1140 		}
1141 		break;
1142 	case NVME_ID_CNS_CS_CTRL:
1143 		switch (req->cmd->identify.csi) {
1144 		case NVME_CSI_NVM:
1145 			nvmet_execute_identify_ctrl_nvm(req);
1146 			return;
1147 		case NVME_CSI_ZNS:
1148 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1149 				nvmet_execute_identify_ctrl_zns(req);
1150 				return;
1151 			}
1152 			break;
1153 		}
1154 		break;
1155 	case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
1156 		nvmet_execute_identify_nslist(req, true);
1157 		return;
1158 	case NVME_ID_CNS_NS_CS_INDEP:
1159 		nvmet_execute_id_cs_indep(req);
1160 		return;
1161 	case NVME_ID_CNS_ENDGRP_LIST:
1162 		nvmet_execute_identify_endgrp_list(req);
1163 		return;
1164 	}
1165 
1166 	pr_debug("unhandled identify cns %d on qid %d\n",
1167 	       req->cmd->identify.cns, req->sq->qid);
1168 	req->error_loc = offsetof(struct nvme_identify, cns);
1169 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1170 }
1171 
1172 /*
1173  * A "minimum viable" abort implementation: the command is mandatory in the
1174  * spec, but we are not required to do any useful work.  We couldn't really
1175  * do a useful abort, so don't bother even with waiting for the command
1176  * to be exectuted and return immediately telling the command to abort
1177  * wasn't found.
1178  */
1179 static void nvmet_execute_abort(struct nvmet_req *req)
1180 {
1181 	if (!nvmet_check_transfer_len(req, 0))
1182 		return;
1183 	nvmet_set_result(req, 1);
1184 	nvmet_req_complete(req, 0);
1185 }
1186 
1187 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1188 {
1189 	u16 status;
1190 
1191 	if (req->ns->file)
1192 		status = nvmet_file_flush(req);
1193 	else
1194 		status = nvmet_bdev_flush(req);
1195 
1196 	if (status)
1197 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1198 	return status;
1199 }
1200 
1201 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1202 {
1203 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1204 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1205 	u16 status;
1206 
1207 	status = nvmet_req_find_ns(req);
1208 	if (status)
1209 		return status;
1210 
1211 	mutex_lock(&subsys->lock);
1212 	switch (write_protect) {
1213 	case NVME_NS_WRITE_PROTECT:
1214 		req->ns->readonly = true;
1215 		status = nvmet_write_protect_flush_sync(req);
1216 		if (status)
1217 			req->ns->readonly = false;
1218 		break;
1219 	case NVME_NS_NO_WRITE_PROTECT:
1220 		req->ns->readonly = false;
1221 		status = 0;
1222 		break;
1223 	default:
1224 		break;
1225 	}
1226 
1227 	if (!status)
1228 		nvmet_ns_changed(subsys, req->ns->nsid);
1229 	mutex_unlock(&subsys->lock);
1230 	return status;
1231 }
1232 
1233 u16 nvmet_set_feat_kato(struct nvmet_req *req)
1234 {
1235 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1236 
1237 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
1238 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1239 	nvmet_start_keep_alive_timer(req->sq->ctrl);
1240 
1241 	nvmet_set_result(req, req->sq->ctrl->kato);
1242 
1243 	return 0;
1244 }
1245 
1246 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1247 {
1248 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1249 
1250 	if (val32 & ~mask) {
1251 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
1252 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1253 	}
1254 
1255 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1256 	nvmet_set_result(req, val32);
1257 
1258 	return 0;
1259 }
1260 
1261 static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
1262 {
1263 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1264 
1265 	if (!nvmet_is_pci_ctrl(ctrl))
1266 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1267 
1268 	/*
1269 	 * The NVMe base specifications v2.1 recommends supporting 128-bits host
1270 	 * IDs (section 5.1.25.1.28.1). However, that same section also says
1271 	 * that "The controller may support a 64-bit Host Identifier and/or an
1272 	 * extended 128-bit Host Identifier". So simplify this support and do
1273 	 * not support 64-bits host IDs to avoid needing to check that all
1274 	 * controllers associated with the same subsystem all use the same host
1275 	 * ID size.
1276 	 */
1277 	if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1278 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
1279 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1280 	}
1281 
1282 	return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
1283 				   sizeof(req->sq->ctrl->hostid));
1284 }
1285 
1286 static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
1287 {
1288 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1289 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1290 	struct nvmet_feat_irq_coalesce irqc = {
1291 		.time = (cdw11 >> 8) & 0xff,
1292 		.thr = cdw11 & 0xff,
1293 	};
1294 
1295 	/*
1296 	 * This feature is not supported for fabrics controllers and mandatory
1297 	 * for PCI controllers.
1298 	 */
1299 	if (!nvmet_is_pci_ctrl(ctrl)) {
1300 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1301 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1302 	}
1303 
1304 	return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1305 }
1306 
1307 static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
1308 {
1309 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1310 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1311 	struct nvmet_feat_irq_config irqcfg = {
1312 		.iv = cdw11 & 0xffff,
1313 		.cd = (cdw11 >> 16) & 0x1,
1314 	};
1315 
1316 	/*
1317 	 * This feature is not supported for fabrics controllers and mandatory
1318 	 * for PCI controllers.
1319 	 */
1320 	if (!nvmet_is_pci_ctrl(ctrl)) {
1321 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1322 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1323 	}
1324 
1325 	return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1326 }
1327 
1328 static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
1329 {
1330 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1331 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1332 	struct nvmet_feat_arbitration arb = {
1333 		.hpw = (cdw11 >> 24) & 0xff,
1334 		.mpw = (cdw11 >> 16) & 0xff,
1335 		.lpw = (cdw11 >> 8) & 0xff,
1336 		.ab = cdw11 & 0x3,
1337 	};
1338 
1339 	if (!ctrl->ops->set_feature) {
1340 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1341 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1342 	}
1343 
1344 	return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1345 }
1346 
1347 void nvmet_execute_set_features(struct nvmet_req *req)
1348 {
1349 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1350 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1351 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1352 	u16 status = 0;
1353 	u16 nsqr;
1354 	u16 ncqr;
1355 
1356 	if (!nvmet_check_data_len_lte(req, 0))
1357 		return;
1358 
1359 	switch (cdw10 & 0xff) {
1360 	case NVME_FEAT_ARBITRATION:
1361 		status = nvmet_set_feat_arbitration(req);
1362 		break;
1363 	case NVME_FEAT_NUM_QUEUES:
1364 		ncqr = (cdw11 >> 16) & 0xffff;
1365 		nsqr = cdw11 & 0xffff;
1366 		if (ncqr == 0xffff || nsqr == 0xffff) {
1367 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1368 			break;
1369 		}
1370 		nvmet_set_result(req,
1371 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1372 		break;
1373 	case NVME_FEAT_IRQ_COALESCE:
1374 		status = nvmet_set_feat_irq_coalesce(req);
1375 		break;
1376 	case NVME_FEAT_IRQ_CONFIG:
1377 		status = nvmet_set_feat_irq_config(req);
1378 		break;
1379 	case NVME_FEAT_KATO:
1380 		status = nvmet_set_feat_kato(req);
1381 		break;
1382 	case NVME_FEAT_ASYNC_EVENT:
1383 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1384 		break;
1385 	case NVME_FEAT_HOST_ID:
1386 		status = nvmet_set_feat_host_id(req);
1387 		break;
1388 	case NVME_FEAT_WRITE_PROTECT:
1389 		status = nvmet_set_feat_write_protect(req);
1390 		break;
1391 	case NVME_FEAT_RESV_MASK:
1392 		status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1393 		break;
1394 	default:
1395 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1396 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1397 		break;
1398 	}
1399 
1400 	nvmet_req_complete(req, status);
1401 }
1402 
1403 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1404 {
1405 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1406 	u32 result;
1407 
1408 	result = nvmet_req_find_ns(req);
1409 	if (result)
1410 		return result;
1411 
1412 	mutex_lock(&subsys->lock);
1413 	if (req->ns->readonly == true)
1414 		result = NVME_NS_WRITE_PROTECT;
1415 	else
1416 		result = NVME_NS_NO_WRITE_PROTECT;
1417 	nvmet_set_result(req, result);
1418 	mutex_unlock(&subsys->lock);
1419 
1420 	return 0;
1421 }
1422 
1423 static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
1424 {
1425 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1426 	struct nvmet_feat_irq_coalesce irqc = { };
1427 	u16 status;
1428 
1429 	/*
1430 	 * This feature is not supported for fabrics controllers and mandatory
1431 	 * for PCI controllers.
1432 	 */
1433 	if (!nvmet_is_pci_ctrl(ctrl)) {
1434 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1435 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1436 	}
1437 
1438 	status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1439 	if (status != NVME_SC_SUCCESS)
1440 		return status;
1441 
1442 	nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
1443 
1444 	return NVME_SC_SUCCESS;
1445 }
1446 
1447 static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
1448 {
1449 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1450 	u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
1451 	struct nvmet_feat_irq_config irqcfg = { .iv = iv };
1452 	u16 status;
1453 
1454 	/*
1455 	 * This feature is not supported for fabrics controllers and mandatory
1456 	 * for PCI controllers.
1457 	 */
1458 	if (!nvmet_is_pci_ctrl(ctrl)) {
1459 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1460 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1461 	}
1462 
1463 	status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1464 	if (status != NVME_SC_SUCCESS)
1465 		return status;
1466 
1467 	nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
1468 
1469 	return NVME_SC_SUCCESS;
1470 }
1471 
1472 static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
1473 {
1474 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1475 	struct nvmet_feat_arbitration arb = { };
1476 	u16 status;
1477 
1478 	if (!ctrl->ops->get_feature) {
1479 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1480 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1481 	}
1482 
1483 	status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1484 	if (status != NVME_SC_SUCCESS)
1485 		return status;
1486 
1487 	nvmet_set_result(req,
1488 			 ((u32)arb.hpw << 24) |
1489 			 ((u32)arb.mpw << 16) |
1490 			 ((u32)arb.lpw << 8) |
1491 			 (arb.ab & 0x3));
1492 
1493 	return NVME_SC_SUCCESS;
1494 }
1495 
1496 void nvmet_get_feat_kato(struct nvmet_req *req)
1497 {
1498 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1499 }
1500 
1501 void nvmet_get_feat_async_event(struct nvmet_req *req)
1502 {
1503 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1504 }
1505 
1506 void nvmet_execute_get_features(struct nvmet_req *req)
1507 {
1508 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1509 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1510 	u16 status = 0;
1511 
1512 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1513 		return;
1514 
1515 	switch (cdw10 & 0xff) {
1516 	/*
1517 	 * These features are mandatory in the spec, but we don't
1518 	 * have a useful way to implement them.  We'll eventually
1519 	 * need to come up with some fake values for these.
1520 	 */
1521 #if 0
1522 	case NVME_FEAT_POWER_MGMT:
1523 		break;
1524 	case NVME_FEAT_TEMP_THRESH:
1525 		break;
1526 	case NVME_FEAT_ERR_RECOVERY:
1527 		break;
1528 	case NVME_FEAT_WRITE_ATOMIC:
1529 		break;
1530 #endif
1531 	case NVME_FEAT_ARBITRATION:
1532 		status = nvmet_get_feat_arbitration(req);
1533 		break;
1534 	case NVME_FEAT_IRQ_COALESCE:
1535 		status = nvmet_get_feat_irq_coalesce(req);
1536 		break;
1537 	case NVME_FEAT_IRQ_CONFIG:
1538 		status = nvmet_get_feat_irq_config(req);
1539 		break;
1540 	case NVME_FEAT_ASYNC_EVENT:
1541 		nvmet_get_feat_async_event(req);
1542 		break;
1543 	case NVME_FEAT_VOLATILE_WC:
1544 		nvmet_set_result(req, 1);
1545 		break;
1546 	case NVME_FEAT_NUM_QUEUES:
1547 		nvmet_set_result(req,
1548 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1549 		break;
1550 	case NVME_FEAT_KATO:
1551 		nvmet_get_feat_kato(req);
1552 		break;
1553 	case NVME_FEAT_HOST_ID:
1554 		/* need 128-bit host identifier flag */
1555 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1556 			req->error_loc =
1557 				offsetof(struct nvme_common_command, cdw11);
1558 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1559 			break;
1560 		}
1561 
1562 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1563 				sizeof(req->sq->ctrl->hostid));
1564 		break;
1565 	case NVME_FEAT_WRITE_PROTECT:
1566 		status = nvmet_get_feat_write_protect(req);
1567 		break;
1568 	case NVME_FEAT_RESV_MASK:
1569 		status = nvmet_get_feat_resv_notif_mask(req);
1570 		break;
1571 	default:
1572 		req->error_loc =
1573 			offsetof(struct nvme_common_command, cdw10);
1574 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1575 		break;
1576 	}
1577 
1578 	nvmet_req_complete(req, status);
1579 }
1580 
1581 void nvmet_execute_async_event(struct nvmet_req *req)
1582 {
1583 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1584 
1585 	if (!nvmet_check_transfer_len(req, 0))
1586 		return;
1587 
1588 	mutex_lock(&ctrl->lock);
1589 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1590 		mutex_unlock(&ctrl->lock);
1591 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1592 		return;
1593 	}
1594 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1595 	mutex_unlock(&ctrl->lock);
1596 
1597 	queue_work(nvmet_wq, &ctrl->async_event_work);
1598 }
1599 
1600 void nvmet_execute_keep_alive(struct nvmet_req *req)
1601 {
1602 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1603 	u16 status = 0;
1604 
1605 	if (!nvmet_check_transfer_len(req, 0))
1606 		return;
1607 
1608 	if (!ctrl->kato) {
1609 		status = NVME_SC_KA_TIMEOUT_INVALID;
1610 		goto out;
1611 	}
1612 
1613 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1614 		ctrl->cntlid, ctrl->kato);
1615 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1616 out:
1617 	nvmet_req_complete(req, status);
1618 }
1619 
1620 u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
1621 {
1622 	struct nvme_command *cmd = req->cmd;
1623 
1624 	if (nvme_is_fabrics(cmd))
1625 		return nvmet_fabrics_admin_cmd_data_len(req);
1626 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1627 		return nvmet_discovery_cmd_data_len(req);
1628 
1629 	switch (cmd->common.opcode) {
1630 	case nvme_admin_get_log_page:
1631 		return nvmet_get_log_page_len(cmd);
1632 	case nvme_admin_identify:
1633 		return NVME_IDENTIFY_DATA_SIZE;
1634 	case nvme_admin_get_features:
1635 		return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
1636 	default:
1637 		return 0;
1638 	}
1639 }
1640 
1641 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1642 {
1643 	struct nvme_command *cmd = req->cmd;
1644 	u16 ret;
1645 
1646 	if (nvme_is_fabrics(cmd))
1647 		return nvmet_parse_fabrics_admin_cmd(req);
1648 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1649 		return nvmet_parse_discovery_cmd(req);
1650 
1651 	ret = nvmet_check_ctrl_status(req);
1652 	if (unlikely(ret))
1653 		return ret;
1654 
1655 	/* For PCI controllers, admin commands shall not use SGL. */
1656 	if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
1657 	    cmd->common.flags & NVME_CMD_SGL_ALL)
1658 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1659 
1660 	if (nvmet_is_passthru_req(req))
1661 		return nvmet_parse_passthru_admin_cmd(req);
1662 
1663 	switch (cmd->common.opcode) {
1664 	case nvme_admin_delete_sq:
1665 		req->execute = nvmet_execute_delete_sq;
1666 		return 0;
1667 	case nvme_admin_create_sq:
1668 		req->execute = nvmet_execute_create_sq;
1669 		return 0;
1670 	case nvme_admin_get_log_page:
1671 		req->execute = nvmet_execute_get_log_page;
1672 		return 0;
1673 	case nvme_admin_delete_cq:
1674 		req->execute = nvmet_execute_delete_cq;
1675 		return 0;
1676 	case nvme_admin_create_cq:
1677 		req->execute = nvmet_execute_create_cq;
1678 		return 0;
1679 	case nvme_admin_identify:
1680 		req->execute = nvmet_execute_identify;
1681 		return 0;
1682 	case nvme_admin_abort_cmd:
1683 		req->execute = nvmet_execute_abort;
1684 		return 0;
1685 	case nvme_admin_set_features:
1686 		req->execute = nvmet_execute_set_features;
1687 		return 0;
1688 	case nvme_admin_get_features:
1689 		req->execute = nvmet_execute_get_features;
1690 		return 0;
1691 	case nvme_admin_async_event:
1692 		req->execute = nvmet_execute_async_event;
1693 		return 0;
1694 	case nvme_admin_keep_alive:
1695 		req->execute = nvmet_execute_keep_alive;
1696 		return 0;
1697 	default:
1698 		return nvmet_report_invalid_opcode(req);
1699 	}
1700 }
1701