xref: /linux/drivers/nvme/target/admin-cmd.c (revision 6d8854216ebb60959ddb6f4ea4123bd449ba6cf6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <linux/unaligned.h>
13 #include "nvmet.h"
14 
nvmet_execute_delete_sq(struct nvmet_req * req)15 static void nvmet_execute_delete_sq(struct nvmet_req *req)
16 {
17 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
18 	u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
19 	u16 status;
20 
21 	if (!nvmet_is_pci_ctrl(ctrl)) {
22 		status = nvmet_report_invalid_opcode(req);
23 		goto complete;
24 	}
25 
26 	if (!sqid) {
27 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
28 		goto complete;
29 	}
30 
31 	status = nvmet_check_sqid(ctrl, sqid, false);
32 	if (status != NVME_SC_SUCCESS)
33 		goto complete;
34 
35 	status = ctrl->ops->delete_sq(ctrl, sqid);
36 
37 complete:
38 	nvmet_req_complete(req, status);
39 }
40 
nvmet_execute_create_sq(struct nvmet_req * req)41 static void nvmet_execute_create_sq(struct nvmet_req *req)
42 {
43 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
44 	struct nvme_command *cmd = req->cmd;
45 	u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
46 	u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
47 	u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
48 	u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
49 	u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
50 	u16 status;
51 
52 	if (!nvmet_is_pci_ctrl(ctrl)) {
53 		status = nvmet_report_invalid_opcode(req);
54 		goto complete;
55 	}
56 
57 	if (!sqid) {
58 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
59 		goto complete;
60 	}
61 
62 	status = nvmet_check_sqid(ctrl, sqid, true);
63 	if (status != NVME_SC_SUCCESS)
64 		goto complete;
65 
66 	status = nvmet_check_io_cqid(ctrl, cqid, false);
67 	if (status != NVME_SC_SUCCESS) {
68 		pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid);
69 		goto complete;
70 	}
71 
72 	if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
73 		status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
74 		goto complete;
75 	}
76 
77 	status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1);
78 
79 complete:
80 	nvmet_req_complete(req, status);
81 }
82 
nvmet_execute_delete_cq(struct nvmet_req * req)83 static void nvmet_execute_delete_cq(struct nvmet_req *req)
84 {
85 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
86 	u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
87 	u16 status;
88 
89 	if (!nvmet_is_pci_ctrl(ctrl)) {
90 		status = nvmet_report_invalid_opcode(req);
91 		goto complete;
92 	}
93 
94 	status = nvmet_check_io_cqid(ctrl, cqid, false);
95 	if (status != NVME_SC_SUCCESS)
96 		goto complete;
97 
98 	if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) {
99 		/* Some SQs are still using this CQ */
100 		status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
101 		goto complete;
102 	}
103 
104 	status = ctrl->ops->delete_cq(ctrl, cqid);
105 
106 complete:
107 	nvmet_req_complete(req, status);
108 }
109 
nvmet_execute_create_cq(struct nvmet_req * req)110 static void nvmet_execute_create_cq(struct nvmet_req *req)
111 {
112 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
113 	struct nvme_command *cmd = req->cmd;
114 	u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
115 	u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
116 	u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
117 	u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
118 	u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
119 	u16 status;
120 
121 	if (!nvmet_is_pci_ctrl(ctrl)) {
122 		status = nvmet_report_invalid_opcode(req);
123 		goto complete;
124 	}
125 
126 	status = nvmet_check_io_cqid(ctrl, cqid, true);
127 	if (status != NVME_SC_SUCCESS)
128 		goto complete;
129 
130 	if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
131 		status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
132 		goto complete;
133 	}
134 
135 	status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
136 				      prp1, irq_vector);
137 
138 complete:
139 	nvmet_req_complete(req, status);
140 }
141 
nvmet_get_log_page_len(struct nvme_command * cmd)142 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
143 {
144 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
145 
146 	len <<= 16;
147 	len += le16_to_cpu(cmd->get_log_page.numdl);
148 	/* NUMD is a 0's based value */
149 	len += 1;
150 	len *= sizeof(u32);
151 
152 	return len;
153 }
154 
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)155 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
156 {
157 	switch (cdw10 & 0xff) {
158 	case NVME_FEAT_HOST_ID:
159 		return sizeof(req->sq->ctrl->hostid);
160 	default:
161 		return 0;
162 	}
163 }
164 
nvmet_get_log_page_offset(struct nvme_command * cmd)165 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
166 {
167 	return le64_to_cpu(cmd->get_log_page.lpo);
168 }
169 
nvmet_execute_get_log_page_noop(struct nvmet_req * req)170 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
171 {
172 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
173 }
174 
nvmet_execute_get_log_page_error(struct nvmet_req * req)175 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
176 {
177 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
178 	unsigned long flags;
179 	off_t offset = 0;
180 	u64 slot;
181 	u64 i;
182 
183 	spin_lock_irqsave(&ctrl->error_lock, flags);
184 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
185 
186 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
187 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
188 				sizeof(struct nvme_error_slot)))
189 			break;
190 
191 		if (slot == 0)
192 			slot = NVMET_ERROR_LOG_SLOTS - 1;
193 		else
194 			slot--;
195 		offset += sizeof(struct nvme_error_slot);
196 	}
197 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
198 	nvmet_req_complete(req, 0);
199 }
200 
nvmet_execute_get_supported_log_pages(struct nvmet_req * req)201 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
202 {
203 	struct nvme_supported_log *logs;
204 	u16 status;
205 
206 	logs = kzalloc(sizeof(*logs), GFP_KERNEL);
207 	if (!logs) {
208 		status = NVME_SC_INTERNAL;
209 		goto out;
210 	}
211 
212 	logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
213 	logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
214 	logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
215 	logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
216 	logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
217 	logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
218 	logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
219 	logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
220 	logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
221 	logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
222 	logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
223 
224 	status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
225 	kfree(logs);
226 out:
227 	nvmet_req_complete(req, status);
228 }
229 
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)230 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
231 		struct nvme_smart_log *slog)
232 {
233 	u64 host_reads, host_writes, data_units_read, data_units_written;
234 	u16 status;
235 
236 	status = nvmet_req_find_ns(req);
237 	if (status)
238 		return status;
239 
240 	/* we don't have the right data for file backed ns */
241 	if (!req->ns->bdev)
242 		return NVME_SC_SUCCESS;
243 
244 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
245 	data_units_read =
246 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
247 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
248 	data_units_written =
249 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
250 
251 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
252 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
253 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
254 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
255 
256 	return NVME_SC_SUCCESS;
257 }
258 
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)259 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
260 		struct nvme_smart_log *slog)
261 {
262 	u64 host_reads = 0, host_writes = 0;
263 	u64 data_units_read = 0, data_units_written = 0;
264 	struct nvmet_ns *ns;
265 	struct nvmet_ctrl *ctrl;
266 	unsigned long idx;
267 
268 	ctrl = req->sq->ctrl;
269 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
270 		/* we don't have the right data for file backed ns */
271 		if (!ns->bdev)
272 			continue;
273 		host_reads += part_stat_read(ns->bdev, ios[READ]);
274 		data_units_read += DIV_ROUND_UP(
275 			part_stat_read(ns->bdev, sectors[READ]), 1000);
276 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
277 		data_units_written += DIV_ROUND_UP(
278 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
279 	}
280 
281 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
282 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
283 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
284 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
285 
286 	return NVME_SC_SUCCESS;
287 }
288 
nvmet_execute_get_log_page_rmi(struct nvmet_req * req)289 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
290 {
291 	struct nvme_rotational_media_log *log;
292 	struct gendisk *disk;
293 	u16 status;
294 
295 	req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
296 					    req->cmd->get_log_page.lsi));
297 	status = nvmet_req_find_ns(req);
298 	if (status)
299 		goto out;
300 
301 	if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
302 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
303 		goto out;
304 	}
305 
306 	if (req->transfer_len != sizeof(*log)) {
307 		status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
308 		goto out;
309 	}
310 
311 	log = kzalloc(sizeof(*log), GFP_KERNEL);
312 	if (!log)
313 		goto out;
314 
315 	log->endgid = req->cmd->get_log_page.lsi;
316 	disk = req->ns->bdev->bd_disk;
317 	if (disk && disk->ia_ranges)
318 		log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
319 	else
320 		log->numa = cpu_to_le16(1);
321 
322 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
323 	kfree(log);
324 out:
325 	nvmet_req_complete(req, status);
326 }
327 
nvmet_execute_get_log_page_smart(struct nvmet_req * req)328 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
329 {
330 	struct nvme_smart_log *log;
331 	u16 status = NVME_SC_INTERNAL;
332 	unsigned long flags;
333 
334 	if (req->transfer_len != sizeof(*log))
335 		goto out;
336 
337 	log = kzalloc(sizeof(*log), GFP_KERNEL);
338 	if (!log)
339 		goto out;
340 
341 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
342 		status = nvmet_get_smart_log_all(req, log);
343 	else
344 		status = nvmet_get_smart_log_nsid(req, log);
345 	if (status)
346 		goto out_free_log;
347 
348 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
349 	put_unaligned_le64(req->sq->ctrl->err_counter,
350 			&log->num_err_log_entries);
351 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
352 
353 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
354 out_free_log:
355 	kfree(log);
356 out:
357 	nvmet_req_complete(req, status);
358 }
359 
nvmet_get_cmd_effects_admin(struct nvmet_ctrl * ctrl,struct nvme_effects_log * log)360 static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
361 					struct nvme_effects_log *log)
362 {
363 	/* For a PCI target controller, advertize support for the . */
364 	if (nvmet_is_pci_ctrl(ctrl)) {
365 		log->acs[nvme_admin_delete_sq] =
366 		log->acs[nvme_admin_create_sq] =
367 		log->acs[nvme_admin_delete_cq] =
368 		log->acs[nvme_admin_create_cq] =
369 			cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
370 	}
371 
372 	log->acs[nvme_admin_get_log_page] =
373 	log->acs[nvme_admin_identify] =
374 	log->acs[nvme_admin_abort_cmd] =
375 	log->acs[nvme_admin_set_features] =
376 	log->acs[nvme_admin_get_features] =
377 	log->acs[nvme_admin_async_event] =
378 	log->acs[nvme_admin_keep_alive] =
379 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
380 }
381 
nvmet_get_cmd_effects_nvm(struct nvme_effects_log * log)382 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
383 {
384 	log->iocs[nvme_cmd_read] =
385 	log->iocs[nvme_cmd_flush] =
386 	log->iocs[nvme_cmd_dsm]	=
387 	log->iocs[nvme_cmd_resv_acquire] =
388 	log->iocs[nvme_cmd_resv_register] =
389 	log->iocs[nvme_cmd_resv_release] =
390 	log->iocs[nvme_cmd_resv_report] =
391 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
392 	log->iocs[nvme_cmd_write] =
393 	log->iocs[nvme_cmd_write_zeroes] =
394 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
395 }
396 
nvmet_get_cmd_effects_zns(struct nvme_effects_log * log)397 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
398 {
399 	log->iocs[nvme_cmd_zone_append] =
400 	log->iocs[nvme_cmd_zone_mgmt_send] =
401 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
402 	log->iocs[nvme_cmd_zone_mgmt_recv] =
403 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
404 }
405 
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)406 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
407 {
408 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
409 	struct nvme_effects_log *log;
410 	u16 status = NVME_SC_SUCCESS;
411 
412 	log = kzalloc(sizeof(*log), GFP_KERNEL);
413 	if (!log) {
414 		status = NVME_SC_INTERNAL;
415 		goto out;
416 	}
417 
418 	switch (req->cmd->get_log_page.csi) {
419 	case NVME_CSI_NVM:
420 		nvmet_get_cmd_effects_admin(ctrl, log);
421 		nvmet_get_cmd_effects_nvm(log);
422 		break;
423 	case NVME_CSI_ZNS:
424 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
425 			status = NVME_SC_INVALID_IO_CMD_SET;
426 			goto free;
427 		}
428 		nvmet_get_cmd_effects_admin(ctrl, log);
429 		nvmet_get_cmd_effects_nvm(log);
430 		nvmet_get_cmd_effects_zns(log);
431 		break;
432 	default:
433 		status = NVME_SC_INVALID_LOG_PAGE;
434 		goto free;
435 	}
436 
437 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
438 free:
439 	kfree(log);
440 out:
441 	nvmet_req_complete(req, status);
442 }
443 
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)444 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
445 {
446 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
447 	u16 status = NVME_SC_INTERNAL;
448 	size_t len;
449 
450 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
451 		goto out;
452 
453 	mutex_lock(&ctrl->lock);
454 	if (ctrl->nr_changed_ns == U32_MAX)
455 		len = sizeof(__le32);
456 	else
457 		len = ctrl->nr_changed_ns * sizeof(__le32);
458 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
459 	if (!status)
460 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
461 	ctrl->nr_changed_ns = 0;
462 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
463 	mutex_unlock(&ctrl->lock);
464 out:
465 	nvmet_req_complete(req, status);
466 }
467 
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)468 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
469 		struct nvme_ana_group_desc *desc)
470 {
471 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
472 	struct nvmet_ns *ns;
473 	unsigned long idx;
474 	u32 count = 0;
475 
476 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
477 		nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
478 			if (ns->anagrpid == grpid)
479 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
480 		}
481 	}
482 
483 	desc->grpid = cpu_to_le32(grpid);
484 	desc->nnsids = cpu_to_le32(count);
485 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
486 	desc->state = req->port->ana_state[grpid];
487 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
488 	return struct_size(desc, nsids, count);
489 }
490 
nvmet_execute_get_log_page_endgrp(struct nvmet_req * req)491 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
492 {
493 	u64 host_reads, host_writes, data_units_read, data_units_written;
494 	struct nvme_endurance_group_log *log;
495 	u16 status;
496 
497 	/*
498 	 * The target driver emulates each endurance group as its own
499 	 * namespace, reusing the nsid as the endurance group identifier.
500 	 */
501 	req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
502 					    req->cmd->get_log_page.lsi));
503 	status = nvmet_req_find_ns(req);
504 	if (status)
505 		goto out;
506 
507 	log = kzalloc(sizeof(*log), GFP_KERNEL);
508 	if (!log) {
509 		status = NVME_SC_INTERNAL;
510 		goto out;
511 	}
512 
513 	if (!req->ns->bdev)
514 		goto copy;
515 
516 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
517 	data_units_read =
518 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
519 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
520 	data_units_written =
521 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
522 
523 	put_unaligned_le64(host_reads, &log->hrc[0]);
524 	put_unaligned_le64(data_units_read, &log->dur[0]);
525 	put_unaligned_le64(host_writes, &log->hwc[0]);
526 	put_unaligned_le64(data_units_written, &log->duw[0]);
527 copy:
528 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
529 	kfree(log);
530 out:
531 	nvmet_req_complete(req, status);
532 }
533 
nvmet_execute_get_log_page_ana(struct nvmet_req * req)534 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
535 {
536 	struct nvme_ana_rsp_hdr hdr = { 0, };
537 	struct nvme_ana_group_desc *desc;
538 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
539 	size_t len;
540 	u32 grpid;
541 	u16 ngrps = 0;
542 	u16 status;
543 
544 	status = NVME_SC_INTERNAL;
545 	desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
546 		       GFP_KERNEL);
547 	if (!desc)
548 		goto out;
549 
550 	down_read(&nvmet_ana_sem);
551 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
552 		if (!nvmet_ana_group_enabled[grpid])
553 			continue;
554 		len = nvmet_format_ana_group(req, grpid, desc);
555 		status = nvmet_copy_to_sgl(req, offset, desc, len);
556 		if (status)
557 			break;
558 		offset += len;
559 		ngrps++;
560 	}
561 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
562 		if (nvmet_ana_group_enabled[grpid])
563 			ngrps++;
564 	}
565 
566 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
567 	hdr.ngrps = cpu_to_le16(ngrps);
568 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
569 	up_read(&nvmet_ana_sem);
570 
571 	kfree(desc);
572 
573 	/* copy the header last once we know the number of groups */
574 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
575 out:
576 	nvmet_req_complete(req, status);
577 }
578 
nvmet_execute_get_log_page_features(struct nvmet_req * req)579 static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
580 {
581 	struct nvme_supported_features_log *features;
582 	u16 status;
583 
584 	features = kzalloc(sizeof(*features), GFP_KERNEL);
585 	if (!features) {
586 		status = NVME_SC_INTERNAL;
587 		goto out;
588 	}
589 
590 	features->fis[NVME_FEAT_NUM_QUEUES] =
591 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
592 	features->fis[NVME_FEAT_KATO] =
593 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
594 	features->fis[NVME_FEAT_ASYNC_EVENT] =
595 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
596 	features->fis[NVME_FEAT_HOST_ID] =
597 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
598 	features->fis[NVME_FEAT_WRITE_PROTECT] =
599 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
600 	features->fis[NVME_FEAT_RESV_MASK] =
601 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
602 
603 	status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
604 	kfree(features);
605 out:
606 	nvmet_req_complete(req, status);
607 }
608 
nvmet_execute_get_log_page(struct nvmet_req * req)609 static void nvmet_execute_get_log_page(struct nvmet_req *req)
610 {
611 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
612 		return;
613 
614 	switch (req->cmd->get_log_page.lid) {
615 	case NVME_LOG_SUPPORTED:
616 		return nvmet_execute_get_supported_log_pages(req);
617 	case NVME_LOG_ERROR:
618 		return nvmet_execute_get_log_page_error(req);
619 	case NVME_LOG_SMART:
620 		return nvmet_execute_get_log_page_smart(req);
621 	case NVME_LOG_FW_SLOT:
622 		/*
623 		 * We only support a single firmware slot which always is
624 		 * active, so we can zero out the whole firmware slot log and
625 		 * still claim to fully implement this mandatory log page.
626 		 */
627 		return nvmet_execute_get_log_page_noop(req);
628 	case NVME_LOG_CHANGED_NS:
629 		return nvmet_execute_get_log_changed_ns(req);
630 	case NVME_LOG_CMD_EFFECTS:
631 		return nvmet_execute_get_log_cmd_effects_ns(req);
632 	case NVME_LOG_ENDURANCE_GROUP:
633 		return nvmet_execute_get_log_page_endgrp(req);
634 	case NVME_LOG_ANA:
635 		return nvmet_execute_get_log_page_ana(req);
636 	case NVME_LOG_FEATURES:
637 		return nvmet_execute_get_log_page_features(req);
638 	case NVME_LOG_RMI:
639 		return nvmet_execute_get_log_page_rmi(req);
640 	case NVME_LOG_RESERVATION:
641 		return nvmet_execute_get_log_page_resv(req);
642 	}
643 	pr_debug("unhandled lid %d on qid %d\n",
644 	       req->cmd->get_log_page.lid, req->sq->qid);
645 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
646 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
647 }
648 
nvmet_execute_identify_ctrl(struct nvmet_req * req)649 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
650 {
651 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
652 	struct nvmet_subsys *subsys = ctrl->subsys;
653 	struct nvme_id_ctrl *id;
654 	u32 cmd_capsule_size, ctratt;
655 	u16 status = 0;
656 
657 	if (!subsys->subsys_discovered) {
658 		mutex_lock(&subsys->lock);
659 		subsys->subsys_discovered = true;
660 		mutex_unlock(&subsys->lock);
661 	}
662 
663 	id = kzalloc(sizeof(*id), GFP_KERNEL);
664 	if (!id) {
665 		status = NVME_SC_INTERNAL;
666 		goto out;
667 	}
668 
669 	id->vid = cpu_to_le16(subsys->vendor_id);
670 	id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
671 
672 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
673 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
674 		       strlen(subsys->model_number), ' ');
675 	memcpy_and_pad(id->fr, sizeof(id->fr),
676 		       subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
677 
678 	put_unaligned_le24(subsys->ieee_oui, id->ieee);
679 
680 	id->rab = 6;
681 
682 	if (nvmet_is_disc_subsys(ctrl->subsys))
683 		id->cntrltype = NVME_CTRL_DISC;
684 	else
685 		id->cntrltype = NVME_CTRL_IO;
686 
687 	/* we support multiple ports, multiples hosts and ANA: */
688 	id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
689 		NVME_CTRL_CMIC_ANA;
690 
691 	/* Limit MDTS according to transport capability */
692 	if (ctrl->ops->get_mdts)
693 		id->mdts = ctrl->ops->get_mdts(ctrl);
694 	else
695 		id->mdts = 0;
696 
697 	id->cntlid = cpu_to_le16(ctrl->cntlid);
698 	id->ver = cpu_to_le32(ctrl->subsys->ver);
699 
700 	/* XXX: figure out what to do about RTD3R/RTD3 */
701 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
702 	ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
703 	if (nvmet_is_pci_ctrl(ctrl))
704 		ctratt |= NVME_CTRL_ATTR_RHII;
705 	id->ctratt = cpu_to_le32(ctratt);
706 
707 	id->oacs = 0;
708 
709 	/*
710 	 * We don't really have a practical limit on the number of abort
711 	 * comands.  But we don't do anything useful for abort either, so
712 	 * no point in allowing more abort commands than the spec requires.
713 	 */
714 	id->acl = 3;
715 
716 	id->aerl = NVMET_ASYNC_EVENTS - 1;
717 
718 	/* first slot is read-only, only one slot supported */
719 	id->frmw = (1 << 0) | (1 << 1);
720 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
721 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
722 	id->npss = 0;
723 
724 	/* We support keep-alive timeout in granularity of seconds */
725 	id->kas = cpu_to_le16(NVMET_KAS);
726 
727 	id->sqes = (0x6 << 4) | 0x6;
728 	id->cqes = (0x4 << 4) | 0x4;
729 
730 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
731 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
732 
733 	id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
734 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
735 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
736 			NVME_CTRL_ONCS_WRITE_ZEROES |
737 			NVME_CTRL_ONCS_RESERVATIONS);
738 
739 	/* XXX: don't report vwc if the underlying device is write through */
740 	id->vwc = NVME_CTRL_VWC_PRESENT;
741 
742 	/*
743 	 * We can't support atomic writes bigger than a LBA without support
744 	 * from the backend device.
745 	 */
746 	id->awun = 0;
747 	id->awupf = 0;
748 
749 	/* we always support SGLs */
750 	id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
751 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
752 		id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
753 	if (req->port->inline_data_size)
754 		id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
755 
756 	strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
757 
758 	/*
759 	 * Max command capsule size is sqe + in-capsule data size.
760 	 * Disable in-capsule data for Metadata capable controllers.
761 	 */
762 	cmd_capsule_size = sizeof(struct nvme_command);
763 	if (!ctrl->pi_support)
764 		cmd_capsule_size += req->port->inline_data_size;
765 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
766 
767 	/* Max response capsule size is cqe */
768 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
769 
770 	id->msdbd = ctrl->ops->msdbd;
771 
772 	/*
773 	 * Endurance group identifier is 16 bits, so we can't let namespaces
774 	 * overflow that since we reuse the nsid
775 	 */
776 	BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
777 	id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
778 
779 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
780 	id->anatt = 10; /* random value */
781 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
782 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
783 
784 	/*
785 	 * Meh, we don't really support any power state.  Fake up the same
786 	 * values that qemu does.
787 	 */
788 	id->psd[0].max_power = cpu_to_le16(0x9c4);
789 	id->psd[0].entry_lat = cpu_to_le32(0x10);
790 	id->psd[0].exit_lat = cpu_to_le32(0x4);
791 
792 	id->nwpc = 1 << 0; /* write protect and no write protect */
793 
794 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
795 
796 	kfree(id);
797 out:
798 	nvmet_req_complete(req, status);
799 }
800 
nvmet_execute_identify_ns(struct nvmet_req * req)801 static void nvmet_execute_identify_ns(struct nvmet_req *req)
802 {
803 	struct nvme_id_ns *id;
804 	u16 status;
805 
806 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
807 		req->error_loc = offsetof(struct nvme_identify, nsid);
808 		status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
809 		goto out;
810 	}
811 
812 	id = kzalloc(sizeof(*id), GFP_KERNEL);
813 	if (!id) {
814 		status = NVME_SC_INTERNAL;
815 		goto out;
816 	}
817 
818 	/* return an all zeroed buffer if we can't find an active namespace */
819 	status = nvmet_req_find_ns(req);
820 	if (status) {
821 		status = 0;
822 		goto done;
823 	}
824 
825 	if (nvmet_ns_revalidate(req->ns)) {
826 		mutex_lock(&req->ns->subsys->lock);
827 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
828 		mutex_unlock(&req->ns->subsys->lock);
829 	}
830 
831 	/*
832 	 * nuse = ncap = nsze isn't always true, but we have no way to find
833 	 * that out from the underlying device.
834 	 */
835 	id->ncap = id->nsze =
836 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
837 	switch (req->port->ana_state[req->ns->anagrpid]) {
838 	case NVME_ANA_INACCESSIBLE:
839 	case NVME_ANA_PERSISTENT_LOSS:
840 		break;
841 	default:
842 		id->nuse = id->nsze;
843 		break;
844 	}
845 
846 	if (req->ns->bdev)
847 		nvmet_bdev_set_limits(req->ns->bdev, id);
848 
849 	/*
850 	 * We just provide a single LBA format that matches what the
851 	 * underlying device reports.
852 	 */
853 	id->nlbaf = 0;
854 	id->flbas = 0;
855 
856 	/*
857 	 * Our namespace might always be shared.  Not just with other
858 	 * controllers, but also with any other user of the block device.
859 	 */
860 	id->nmic = NVME_NS_NMIC_SHARED;
861 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
862 
863 	if (req->ns->pr.enable)
864 		id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
865 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
866 			NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
867 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
868 			NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
869 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
870 			NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
871 
872 	/*
873 	 * Since we don't know any better, every namespace is its own endurance
874 	 * group.
875 	 */
876 	id->endgid = cpu_to_le16(req->ns->nsid);
877 
878 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
879 
880 	id->lbaf[0].ds = req->ns->blksize_shift;
881 
882 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
883 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
884 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
885 			  NVME_NS_DPC_PI_TYPE3;
886 		id->mc = NVME_MC_EXTENDED_LBA;
887 		id->dps = req->ns->pi_type;
888 		id->flbas = NVME_NS_FLBAS_META_EXT;
889 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
890 	}
891 
892 	if (req->ns->readonly)
893 		id->nsattr |= NVME_NS_ATTR_RO;
894 done:
895 	if (!status)
896 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
897 
898 	kfree(id);
899 out:
900 	nvmet_req_complete(req, status);
901 }
902 
nvmet_execute_identify_endgrp_list(struct nvmet_req * req)903 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
904 {
905 	u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
906 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
907 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
908 	struct nvmet_ns *ns;
909 	unsigned long idx;
910 	__le16 *list;
911 	u16 status;
912 	int i = 1;
913 
914 	list = kzalloc(buf_size, GFP_KERNEL);
915 	if (!list) {
916 		status = NVME_SC_INTERNAL;
917 		goto out;
918 	}
919 
920 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
921 		if (ns->nsid <= min_endgid)
922 			continue;
923 
924 		list[i++] = cpu_to_le16(ns->nsid);
925 		if (i == buf_size / sizeof(__le16))
926 			break;
927 	}
928 
929 	list[0] = cpu_to_le16(i - 1);
930 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
931 	kfree(list);
932 out:
933 	nvmet_req_complete(req, status);
934 }
935 
nvmet_execute_identify_nslist(struct nvmet_req * req,bool match_css)936 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
937 {
938 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
939 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
940 	struct nvmet_ns *ns;
941 	unsigned long idx;
942 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
943 	__le32 *list;
944 	u16 status = 0;
945 	int i = 0;
946 
947 	/*
948 	 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
949 	 * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
950 	 */
951 	if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
952 		req->error_loc = offsetof(struct nvme_identify, nsid);
953 		status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
954 		goto out;
955 	}
956 
957 	list = kzalloc(buf_size, GFP_KERNEL);
958 	if (!list) {
959 		status = NVME_SC_INTERNAL;
960 		goto out;
961 	}
962 
963 	nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
964 		if (ns->nsid <= min_nsid)
965 			continue;
966 		if (match_css && req->ns->csi != req->cmd->identify.csi)
967 			continue;
968 		list[i++] = cpu_to_le32(ns->nsid);
969 		if (i == buf_size / sizeof(__le32))
970 			break;
971 	}
972 
973 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
974 
975 	kfree(list);
976 out:
977 	nvmet_req_complete(req, status);
978 }
979 
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)980 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
981 				    void *id, off_t *off)
982 {
983 	struct nvme_ns_id_desc desc = {
984 		.nidt = type,
985 		.nidl = len,
986 	};
987 	u16 status;
988 
989 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
990 	if (status)
991 		return status;
992 	*off += sizeof(desc);
993 
994 	status = nvmet_copy_to_sgl(req, *off, id, len);
995 	if (status)
996 		return status;
997 	*off += len;
998 
999 	return 0;
1000 }
1001 
nvmet_execute_identify_desclist(struct nvmet_req * req)1002 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
1003 {
1004 	off_t off = 0;
1005 	u16 status;
1006 
1007 	status = nvmet_req_find_ns(req);
1008 	if (status)
1009 		goto out;
1010 
1011 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
1012 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
1013 						  NVME_NIDT_UUID_LEN,
1014 						  &req->ns->uuid, &off);
1015 		if (status)
1016 			goto out;
1017 	}
1018 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
1019 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
1020 						  NVME_NIDT_NGUID_LEN,
1021 						  &req->ns->nguid, &off);
1022 		if (status)
1023 			goto out;
1024 	}
1025 
1026 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
1027 					  NVME_NIDT_CSI_LEN,
1028 					  &req->ns->csi, &off);
1029 	if (status)
1030 		goto out;
1031 
1032 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
1033 			off) != NVME_IDENTIFY_DATA_SIZE - off)
1034 		status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1035 
1036 out:
1037 	nvmet_req_complete(req, status);
1038 }
1039 
nvmet_execute_identify_ctrl_nvm(struct nvmet_req * req)1040 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
1041 {
1042 	/* Not supported: return zeroes */
1043 	nvmet_req_complete(req,
1044 		   nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
1045 }
1046 
nvme_execute_identify_ns_nvm(struct nvmet_req * req)1047 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
1048 {
1049 	u16 status;
1050 	struct nvme_id_ns_nvm *id;
1051 
1052 	status = nvmet_req_find_ns(req);
1053 	if (status)
1054 		goto out;
1055 
1056 	id = kzalloc(sizeof(*id), GFP_KERNEL);
1057 	if (!id) {
1058 		status = NVME_SC_INTERNAL;
1059 		goto out;
1060 	}
1061 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1062 	kfree(id);
1063 out:
1064 	nvmet_req_complete(req, status);
1065 }
1066 
nvmet_execute_id_cs_indep(struct nvmet_req * req)1067 static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
1068 {
1069 	struct nvme_id_ns_cs_indep *id;
1070 	u16 status;
1071 
1072 	status = nvmet_req_find_ns(req);
1073 	if (status)
1074 		goto out;
1075 
1076 	id = kzalloc(sizeof(*id), GFP_KERNEL);
1077 	if (!id) {
1078 		status = NVME_SC_INTERNAL;
1079 		goto out;
1080 	}
1081 
1082 	id->nstat = NVME_NSTAT_NRDY;
1083 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
1084 	id->nmic = NVME_NS_NMIC_SHARED;
1085 	if (req->ns->readonly)
1086 		id->nsattr |= NVME_NS_ATTR_RO;
1087 	if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
1088 		id->nsfeat |= NVME_NS_ROTATIONAL;
1089 	/*
1090 	 * We need flush command to flush the file's metadata,
1091 	 * so report supporting vwc if backend is file, even
1092 	 * though buffered_io is disable.
1093 	 */
1094 	if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
1095 		id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
1096 
1097 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1098 	kfree(id);
1099 out:
1100 	nvmet_req_complete(req, status);
1101 }
1102 
nvmet_execute_identify(struct nvmet_req * req)1103 static void nvmet_execute_identify(struct nvmet_req *req)
1104 {
1105 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
1106 		return;
1107 
1108 	switch (req->cmd->identify.cns) {
1109 	case NVME_ID_CNS_NS:
1110 		nvmet_execute_identify_ns(req);
1111 		return;
1112 	case NVME_ID_CNS_CTRL:
1113 		nvmet_execute_identify_ctrl(req);
1114 		return;
1115 	case NVME_ID_CNS_NS_ACTIVE_LIST:
1116 		nvmet_execute_identify_nslist(req, false);
1117 		return;
1118 	case NVME_ID_CNS_NS_DESC_LIST:
1119 		nvmet_execute_identify_desclist(req);
1120 		return;
1121 	case NVME_ID_CNS_CS_NS:
1122 		switch (req->cmd->identify.csi) {
1123 		case NVME_CSI_NVM:
1124 			nvme_execute_identify_ns_nvm(req);
1125 			return;
1126 		case NVME_CSI_ZNS:
1127 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1128 				nvmet_execute_identify_ns_zns(req);
1129 				return;
1130 			}
1131 			break;
1132 		}
1133 		break;
1134 	case NVME_ID_CNS_CS_CTRL:
1135 		switch (req->cmd->identify.csi) {
1136 		case NVME_CSI_NVM:
1137 			nvmet_execute_identify_ctrl_nvm(req);
1138 			return;
1139 		case NVME_CSI_ZNS:
1140 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1141 				nvmet_execute_identify_ctrl_zns(req);
1142 				return;
1143 			}
1144 			break;
1145 		}
1146 		break;
1147 	case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
1148 		nvmet_execute_identify_nslist(req, true);
1149 		return;
1150 	case NVME_ID_CNS_NS_CS_INDEP:
1151 		nvmet_execute_id_cs_indep(req);
1152 		return;
1153 	case NVME_ID_CNS_ENDGRP_LIST:
1154 		nvmet_execute_identify_endgrp_list(req);
1155 		return;
1156 	}
1157 
1158 	pr_debug("unhandled identify cns %d on qid %d\n",
1159 	       req->cmd->identify.cns, req->sq->qid);
1160 	req->error_loc = offsetof(struct nvme_identify, cns);
1161 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1162 }
1163 
1164 /*
1165  * A "minimum viable" abort implementation: the command is mandatory in the
1166  * spec, but we are not required to do any useful work.  We couldn't really
1167  * do a useful abort, so don't bother even with waiting for the command
1168  * to be executed and return immediately telling the command to abort
1169  * wasn't found.
1170  */
nvmet_execute_abort(struct nvmet_req * req)1171 static void nvmet_execute_abort(struct nvmet_req *req)
1172 {
1173 	if (!nvmet_check_transfer_len(req, 0))
1174 		return;
1175 	nvmet_set_result(req, 1);
1176 	nvmet_req_complete(req, 0);
1177 }
1178 
nvmet_write_protect_flush_sync(struct nvmet_req * req)1179 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1180 {
1181 	u16 status;
1182 
1183 	if (req->ns->file)
1184 		status = nvmet_file_flush(req);
1185 	else
1186 		status = nvmet_bdev_flush(req);
1187 
1188 	if (status)
1189 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1190 	return status;
1191 }
1192 
nvmet_set_feat_write_protect(struct nvmet_req * req)1193 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1194 {
1195 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1196 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1197 	u16 status;
1198 
1199 	status = nvmet_req_find_ns(req);
1200 	if (status)
1201 		return status;
1202 
1203 	mutex_lock(&subsys->lock);
1204 	switch (write_protect) {
1205 	case NVME_NS_WRITE_PROTECT:
1206 		req->ns->readonly = true;
1207 		status = nvmet_write_protect_flush_sync(req);
1208 		if (status)
1209 			req->ns->readonly = false;
1210 		break;
1211 	case NVME_NS_NO_WRITE_PROTECT:
1212 		req->ns->readonly = false;
1213 		status = 0;
1214 		break;
1215 	default:
1216 		break;
1217 	}
1218 
1219 	if (!status)
1220 		nvmet_ns_changed(subsys, req->ns->nsid);
1221 	mutex_unlock(&subsys->lock);
1222 	return status;
1223 }
1224 
nvmet_set_feat_kato(struct nvmet_req * req)1225 u16 nvmet_set_feat_kato(struct nvmet_req *req)
1226 {
1227 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1228 
1229 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
1230 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1231 	nvmet_start_keep_alive_timer(req->sq->ctrl);
1232 
1233 	nvmet_set_result(req, req->sq->ctrl->kato);
1234 
1235 	return 0;
1236 }
1237 
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)1238 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1239 {
1240 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1241 
1242 	if (val32 & ~mask) {
1243 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
1244 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1245 	}
1246 
1247 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1248 	nvmet_set_result(req, val32);
1249 
1250 	return 0;
1251 }
1252 
nvmet_set_feat_host_id(struct nvmet_req * req)1253 static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
1254 {
1255 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1256 
1257 	if (!nvmet_is_pci_ctrl(ctrl))
1258 		return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1259 
1260 	/*
1261 	 * The NVMe base specifications v2.1 recommends supporting 128-bits host
1262 	 * IDs (section 5.1.25.1.28.1). However, that same section also says
1263 	 * that "The controller may support a 64-bit Host Identifier and/or an
1264 	 * extended 128-bit Host Identifier". So simplify this support and do
1265 	 * not support 64-bits host IDs to avoid needing to check that all
1266 	 * controllers associated with the same subsystem all use the same host
1267 	 * ID size.
1268 	 */
1269 	if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1270 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
1271 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1272 	}
1273 
1274 	return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
1275 				   sizeof(req->sq->ctrl->hostid));
1276 }
1277 
nvmet_set_feat_irq_coalesce(struct nvmet_req * req)1278 static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
1279 {
1280 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1281 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1282 	struct nvmet_feat_irq_coalesce irqc = {
1283 		.time = (cdw11 >> 8) & 0xff,
1284 		.thr = cdw11 & 0xff,
1285 	};
1286 
1287 	/*
1288 	 * This feature is not supported for fabrics controllers and mandatory
1289 	 * for PCI controllers.
1290 	 */
1291 	if (!nvmet_is_pci_ctrl(ctrl)) {
1292 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1293 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1294 	}
1295 
1296 	return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1297 }
1298 
nvmet_set_feat_irq_config(struct nvmet_req * req)1299 static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
1300 {
1301 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1302 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1303 	struct nvmet_feat_irq_config irqcfg = {
1304 		.iv = cdw11 & 0xffff,
1305 		.cd = (cdw11 >> 16) & 0x1,
1306 	};
1307 
1308 	/*
1309 	 * This feature is not supported for fabrics controllers and mandatory
1310 	 * for PCI controllers.
1311 	 */
1312 	if (!nvmet_is_pci_ctrl(ctrl)) {
1313 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1314 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1315 	}
1316 
1317 	return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1318 }
1319 
nvmet_set_feat_arbitration(struct nvmet_req * req)1320 static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
1321 {
1322 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1323 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1324 	struct nvmet_feat_arbitration arb = {
1325 		.hpw = (cdw11 >> 24) & 0xff,
1326 		.mpw = (cdw11 >> 16) & 0xff,
1327 		.lpw = (cdw11 >> 8) & 0xff,
1328 		.ab = cdw11 & 0x3,
1329 	};
1330 
1331 	if (!ctrl->ops->set_feature) {
1332 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1333 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1334 	}
1335 
1336 	return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1337 }
1338 
nvmet_execute_set_features(struct nvmet_req * req)1339 void nvmet_execute_set_features(struct nvmet_req *req)
1340 {
1341 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1342 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1343 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1344 	u16 status = 0;
1345 	u16 nsqr;
1346 	u16 ncqr;
1347 
1348 	if (!nvmet_check_data_len_lte(req, 0))
1349 		return;
1350 
1351 	switch (cdw10 & 0xff) {
1352 	case NVME_FEAT_ARBITRATION:
1353 		status = nvmet_set_feat_arbitration(req);
1354 		break;
1355 	case NVME_FEAT_NUM_QUEUES:
1356 		ncqr = (cdw11 >> 16) & 0xffff;
1357 		nsqr = cdw11 & 0xffff;
1358 		if (ncqr == 0xffff || nsqr == 0xffff) {
1359 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1360 			break;
1361 		}
1362 		nvmet_set_result(req,
1363 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1364 		break;
1365 	case NVME_FEAT_IRQ_COALESCE:
1366 		status = nvmet_set_feat_irq_coalesce(req);
1367 		break;
1368 	case NVME_FEAT_IRQ_CONFIG:
1369 		status = nvmet_set_feat_irq_config(req);
1370 		break;
1371 	case NVME_FEAT_KATO:
1372 		status = nvmet_set_feat_kato(req);
1373 		break;
1374 	case NVME_FEAT_ASYNC_EVENT:
1375 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1376 		break;
1377 	case NVME_FEAT_HOST_ID:
1378 		status = nvmet_set_feat_host_id(req);
1379 		break;
1380 	case NVME_FEAT_WRITE_PROTECT:
1381 		status = nvmet_set_feat_write_protect(req);
1382 		break;
1383 	case NVME_FEAT_RESV_MASK:
1384 		status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1385 		break;
1386 	default:
1387 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1388 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1389 		break;
1390 	}
1391 
1392 	nvmet_req_complete(req, status);
1393 }
1394 
nvmet_get_feat_write_protect(struct nvmet_req * req)1395 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1396 {
1397 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1398 	u32 result;
1399 
1400 	result = nvmet_req_find_ns(req);
1401 	if (result)
1402 		return result;
1403 
1404 	mutex_lock(&subsys->lock);
1405 	if (req->ns->readonly == true)
1406 		result = NVME_NS_WRITE_PROTECT;
1407 	else
1408 		result = NVME_NS_NO_WRITE_PROTECT;
1409 	nvmet_set_result(req, result);
1410 	mutex_unlock(&subsys->lock);
1411 
1412 	return 0;
1413 }
1414 
nvmet_get_feat_irq_coalesce(struct nvmet_req * req)1415 static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
1416 {
1417 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1418 	struct nvmet_feat_irq_coalesce irqc = { };
1419 	u16 status;
1420 
1421 	/*
1422 	 * This feature is not supported for fabrics controllers and mandatory
1423 	 * for PCI controllers.
1424 	 */
1425 	if (!nvmet_is_pci_ctrl(ctrl)) {
1426 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1427 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1428 	}
1429 
1430 	status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1431 	if (status != NVME_SC_SUCCESS)
1432 		return status;
1433 
1434 	nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
1435 
1436 	return NVME_SC_SUCCESS;
1437 }
1438 
nvmet_get_feat_irq_config(struct nvmet_req * req)1439 static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
1440 {
1441 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1442 	u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
1443 	struct nvmet_feat_irq_config irqcfg = { .iv = iv };
1444 	u16 status;
1445 
1446 	/*
1447 	 * This feature is not supported for fabrics controllers and mandatory
1448 	 * for PCI controllers.
1449 	 */
1450 	if (!nvmet_is_pci_ctrl(ctrl)) {
1451 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1452 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1453 	}
1454 
1455 	status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1456 	if (status != NVME_SC_SUCCESS)
1457 		return status;
1458 
1459 	nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
1460 
1461 	return NVME_SC_SUCCESS;
1462 }
1463 
nvmet_get_feat_arbitration(struct nvmet_req * req)1464 static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
1465 {
1466 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1467 	struct nvmet_feat_arbitration arb = { };
1468 	u16 status;
1469 
1470 	if (!ctrl->ops->get_feature) {
1471 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1472 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1473 	}
1474 
1475 	status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1476 	if (status != NVME_SC_SUCCESS)
1477 		return status;
1478 
1479 	nvmet_set_result(req,
1480 			 ((u32)arb.hpw << 24) |
1481 			 ((u32)arb.mpw << 16) |
1482 			 ((u32)arb.lpw << 8) |
1483 			 (arb.ab & 0x3));
1484 
1485 	return NVME_SC_SUCCESS;
1486 }
1487 
nvmet_get_feat_kato(struct nvmet_req * req)1488 void nvmet_get_feat_kato(struct nvmet_req *req)
1489 {
1490 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1491 }
1492 
nvmet_get_feat_async_event(struct nvmet_req * req)1493 void nvmet_get_feat_async_event(struct nvmet_req *req)
1494 {
1495 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1496 }
1497 
nvmet_execute_get_features(struct nvmet_req * req)1498 void nvmet_execute_get_features(struct nvmet_req *req)
1499 {
1500 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1501 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1502 	u16 status = 0;
1503 
1504 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1505 		return;
1506 
1507 	switch (cdw10 & 0xff) {
1508 	/*
1509 	 * These features are mandatory in the spec, but we don't
1510 	 * have a useful way to implement them.  We'll eventually
1511 	 * need to come up with some fake values for these.
1512 	 */
1513 #if 0
1514 	case NVME_FEAT_POWER_MGMT:
1515 		break;
1516 	case NVME_FEAT_TEMP_THRESH:
1517 		break;
1518 	case NVME_FEAT_ERR_RECOVERY:
1519 		break;
1520 	case NVME_FEAT_WRITE_ATOMIC:
1521 		break;
1522 #endif
1523 	case NVME_FEAT_ARBITRATION:
1524 		status = nvmet_get_feat_arbitration(req);
1525 		break;
1526 	case NVME_FEAT_IRQ_COALESCE:
1527 		status = nvmet_get_feat_irq_coalesce(req);
1528 		break;
1529 	case NVME_FEAT_IRQ_CONFIG:
1530 		status = nvmet_get_feat_irq_config(req);
1531 		break;
1532 	case NVME_FEAT_ASYNC_EVENT:
1533 		nvmet_get_feat_async_event(req);
1534 		break;
1535 	case NVME_FEAT_VOLATILE_WC:
1536 		nvmet_set_result(req, 1);
1537 		break;
1538 	case NVME_FEAT_NUM_QUEUES:
1539 		nvmet_set_result(req,
1540 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1541 		break;
1542 	case NVME_FEAT_KATO:
1543 		nvmet_get_feat_kato(req);
1544 		break;
1545 	case NVME_FEAT_HOST_ID:
1546 		/* need 128-bit host identifier flag */
1547 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1548 			req->error_loc =
1549 				offsetof(struct nvme_common_command, cdw11);
1550 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1551 			break;
1552 		}
1553 
1554 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1555 				sizeof(req->sq->ctrl->hostid));
1556 		break;
1557 	case NVME_FEAT_WRITE_PROTECT:
1558 		status = nvmet_get_feat_write_protect(req);
1559 		break;
1560 	case NVME_FEAT_RESV_MASK:
1561 		status = nvmet_get_feat_resv_notif_mask(req);
1562 		break;
1563 	default:
1564 		req->error_loc =
1565 			offsetof(struct nvme_common_command, cdw10);
1566 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1567 		break;
1568 	}
1569 
1570 	nvmet_req_complete(req, status);
1571 }
1572 
nvmet_execute_async_event(struct nvmet_req * req)1573 void nvmet_execute_async_event(struct nvmet_req *req)
1574 {
1575 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1576 
1577 	if (!nvmet_check_transfer_len(req, 0))
1578 		return;
1579 
1580 	mutex_lock(&ctrl->lock);
1581 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1582 		mutex_unlock(&ctrl->lock);
1583 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1584 		return;
1585 	}
1586 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1587 	mutex_unlock(&ctrl->lock);
1588 
1589 	queue_work(nvmet_wq, &ctrl->async_event_work);
1590 }
1591 
nvmet_execute_keep_alive(struct nvmet_req * req)1592 void nvmet_execute_keep_alive(struct nvmet_req *req)
1593 {
1594 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1595 	u16 status = 0;
1596 
1597 	if (!nvmet_check_transfer_len(req, 0))
1598 		return;
1599 
1600 	if (!ctrl->kato) {
1601 		status = NVME_SC_KA_TIMEOUT_INVALID;
1602 		goto out;
1603 	}
1604 
1605 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1606 		ctrl->cntlid, ctrl->kato);
1607 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1608 out:
1609 	nvmet_req_complete(req, status);
1610 }
1611 
nvmet_admin_cmd_data_len(struct nvmet_req * req)1612 u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
1613 {
1614 	struct nvme_command *cmd = req->cmd;
1615 
1616 	if (nvme_is_fabrics(cmd))
1617 		return nvmet_fabrics_admin_cmd_data_len(req);
1618 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1619 		return nvmet_discovery_cmd_data_len(req);
1620 
1621 	switch (cmd->common.opcode) {
1622 	case nvme_admin_get_log_page:
1623 		return nvmet_get_log_page_len(cmd);
1624 	case nvme_admin_identify:
1625 		return NVME_IDENTIFY_DATA_SIZE;
1626 	case nvme_admin_get_features:
1627 		return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
1628 	default:
1629 		return 0;
1630 	}
1631 }
1632 
nvmet_parse_admin_cmd(struct nvmet_req * req)1633 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1634 {
1635 	struct nvme_command *cmd = req->cmd;
1636 	u16 ret;
1637 
1638 	if (nvme_is_fabrics(cmd))
1639 		return nvmet_parse_fabrics_admin_cmd(req);
1640 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1641 		return nvmet_parse_discovery_cmd(req);
1642 
1643 	ret = nvmet_check_ctrl_status(req);
1644 	if (unlikely(ret))
1645 		return ret;
1646 
1647 	/* For PCI controllers, admin commands shall not use SGL. */
1648 	if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
1649 	    cmd->common.flags & NVME_CMD_SGL_ALL)
1650 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1651 
1652 	if (nvmet_is_passthru_req(req))
1653 		return nvmet_parse_passthru_admin_cmd(req);
1654 
1655 	switch (cmd->common.opcode) {
1656 	case nvme_admin_delete_sq:
1657 		req->execute = nvmet_execute_delete_sq;
1658 		return 0;
1659 	case nvme_admin_create_sq:
1660 		req->execute = nvmet_execute_create_sq;
1661 		return 0;
1662 	case nvme_admin_get_log_page:
1663 		req->execute = nvmet_execute_get_log_page;
1664 		return 0;
1665 	case nvme_admin_delete_cq:
1666 		req->execute = nvmet_execute_delete_cq;
1667 		return 0;
1668 	case nvme_admin_create_cq:
1669 		req->execute = nvmet_execute_create_cq;
1670 		return 0;
1671 	case nvme_admin_identify:
1672 		req->execute = nvmet_execute_identify;
1673 		return 0;
1674 	case nvme_admin_abort_cmd:
1675 		req->execute = nvmet_execute_abort;
1676 		return 0;
1677 	case nvme_admin_set_features:
1678 		req->execute = nvmet_execute_set_features;
1679 		return 0;
1680 	case nvme_admin_get_features:
1681 		req->execute = nvmet_execute_get_features;
1682 		return 0;
1683 	case nvme_admin_async_event:
1684 		req->execute = nvmet_execute_async_event;
1685 		return 0;
1686 	case nvme_admin_keep_alive:
1687 		req->execute = nvmet_execute_keep_alive;
1688 		return 0;
1689 	default:
1690 		return nvmet_report_invalid_opcode(req);
1691 	}
1692 }
1693