xref: /linux/drivers/nvme/target/admin-cmd.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10 
11 #include <generated/utsrelease.h>
12 #include <linux/unaligned.h>
13 #include "nvmet.h"
14 
15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 	u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18 
19 	len <<= 16;
20 	len += le16_to_cpu(cmd->get_log_page.numdl);
21 	/* NUMD is a 0's based value */
22 	len += 1;
23 	len *= sizeof(u32);
24 
25 	return len;
26 }
27 
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 	switch (cdw10 & 0xff) {
31 	case NVME_FEAT_HOST_ID:
32 		return sizeof(req->sq->ctrl->hostid);
33 	default:
34 		return 0;
35 	}
36 }
37 
38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 	return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42 
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 	nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47 
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 	unsigned long flags;
52 	off_t offset = 0;
53 	u64 slot;
54 	u64 i;
55 
56 	spin_lock_irqsave(&ctrl->error_lock, flags);
57 	slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58 
59 	for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 		if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 				sizeof(struct nvme_error_slot)))
62 			break;
63 
64 		if (slot == 0)
65 			slot = NVMET_ERROR_LOG_SLOTS - 1;
66 		else
67 			slot--;
68 		offset += sizeof(struct nvme_error_slot);
69 	}
70 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 	nvmet_req_complete(req, 0);
72 }
73 
74 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
75 {
76 	struct nvme_supported_log *logs;
77 	u16 status;
78 
79 	logs = kzalloc(sizeof(*logs), GFP_KERNEL);
80 	if (!logs) {
81 		status = NVME_SC_INTERNAL;
82 		goto out;
83 	}
84 
85 	logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
86 	logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
87 	logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
88 	logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
89 	logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
90 	logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
91 	logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
92 	logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
93 	logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
94 	logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
95 	logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
96 
97 	status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
98 	kfree(logs);
99 out:
100 	nvmet_req_complete(req, status);
101 }
102 
103 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
104 		struct nvme_smart_log *slog)
105 {
106 	u64 host_reads, host_writes, data_units_read, data_units_written;
107 	u16 status;
108 
109 	status = nvmet_req_find_ns(req);
110 	if (status)
111 		return status;
112 
113 	/* we don't have the right data for file backed ns */
114 	if (!req->ns->bdev)
115 		return NVME_SC_SUCCESS;
116 
117 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
118 	data_units_read =
119 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
120 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
121 	data_units_written =
122 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
123 
124 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
125 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
126 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
127 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
128 
129 	return NVME_SC_SUCCESS;
130 }
131 
132 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
133 		struct nvme_smart_log *slog)
134 {
135 	u64 host_reads = 0, host_writes = 0;
136 	u64 data_units_read = 0, data_units_written = 0;
137 	struct nvmet_ns *ns;
138 	struct nvmet_ctrl *ctrl;
139 	unsigned long idx;
140 
141 	ctrl = req->sq->ctrl;
142 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
143 		/* we don't have the right data for file backed ns */
144 		if (!ns->bdev)
145 			continue;
146 		host_reads += part_stat_read(ns->bdev, ios[READ]);
147 		data_units_read += DIV_ROUND_UP(
148 			part_stat_read(ns->bdev, sectors[READ]), 1000);
149 		host_writes += part_stat_read(ns->bdev, ios[WRITE]);
150 		data_units_written += DIV_ROUND_UP(
151 			part_stat_read(ns->bdev, sectors[WRITE]), 1000);
152 	}
153 
154 	put_unaligned_le64(host_reads, &slog->host_reads[0]);
155 	put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
156 	put_unaligned_le64(host_writes, &slog->host_writes[0]);
157 	put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
158 
159 	return NVME_SC_SUCCESS;
160 }
161 
162 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
163 {
164 	struct nvme_rotational_media_log *log;
165 	struct gendisk *disk;
166 	u16 status;
167 
168 	req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
169 					    req->cmd->get_log_page.lsi));
170 	status = nvmet_req_find_ns(req);
171 	if (status)
172 		goto out;
173 
174 	if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
175 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
176 		goto out;
177 	}
178 
179 	if (req->transfer_len != sizeof(*log)) {
180 		status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
181 		goto out;
182 	}
183 
184 	log = kzalloc(sizeof(*log), GFP_KERNEL);
185 	if (!log)
186 		goto out;
187 
188 	log->endgid = req->cmd->get_log_page.lsi;
189 	disk = req->ns->bdev->bd_disk;
190 	if (disk && disk->ia_ranges)
191 		log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
192 	else
193 		log->numa = cpu_to_le16(1);
194 
195 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
196 	kfree(log);
197 out:
198 	nvmet_req_complete(req, status);
199 }
200 
201 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
202 {
203 	struct nvme_smart_log *log;
204 	u16 status = NVME_SC_INTERNAL;
205 	unsigned long flags;
206 
207 	if (req->transfer_len != sizeof(*log))
208 		goto out;
209 
210 	log = kzalloc(sizeof(*log), GFP_KERNEL);
211 	if (!log)
212 		goto out;
213 
214 	if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
215 		status = nvmet_get_smart_log_all(req, log);
216 	else
217 		status = nvmet_get_smart_log_nsid(req, log);
218 	if (status)
219 		goto out_free_log;
220 
221 	spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
222 	put_unaligned_le64(req->sq->ctrl->err_counter,
223 			&log->num_err_log_entries);
224 	spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
225 
226 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
227 out_free_log:
228 	kfree(log);
229 out:
230 	nvmet_req_complete(req, status);
231 }
232 
233 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
234 {
235 	log->acs[nvme_admin_get_log_page] =
236 	log->acs[nvme_admin_identify] =
237 	log->acs[nvme_admin_abort_cmd] =
238 	log->acs[nvme_admin_set_features] =
239 	log->acs[nvme_admin_get_features] =
240 	log->acs[nvme_admin_async_event] =
241 	log->acs[nvme_admin_keep_alive] =
242 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
243 
244 	log->iocs[nvme_cmd_read] =
245 	log->iocs[nvme_cmd_flush] =
246 	log->iocs[nvme_cmd_dsm]	=
247 	log->iocs[nvme_cmd_resv_acquire] =
248 	log->iocs[nvme_cmd_resv_register] =
249 	log->iocs[nvme_cmd_resv_release] =
250 	log->iocs[nvme_cmd_resv_report] =
251 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
252 	log->iocs[nvme_cmd_write] =
253 	log->iocs[nvme_cmd_write_zeroes] =
254 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
255 }
256 
257 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
258 {
259 	log->iocs[nvme_cmd_zone_append] =
260 	log->iocs[nvme_cmd_zone_mgmt_send] =
261 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
262 	log->iocs[nvme_cmd_zone_mgmt_recv] =
263 		cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
264 }
265 
266 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
267 {
268 	struct nvme_effects_log *log;
269 	u16 status = NVME_SC_SUCCESS;
270 
271 	log = kzalloc(sizeof(*log), GFP_KERNEL);
272 	if (!log) {
273 		status = NVME_SC_INTERNAL;
274 		goto out;
275 	}
276 
277 	switch (req->cmd->get_log_page.csi) {
278 	case NVME_CSI_NVM:
279 		nvmet_get_cmd_effects_nvm(log);
280 		break;
281 	case NVME_CSI_ZNS:
282 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
283 			status = NVME_SC_INVALID_IO_CMD_SET;
284 			goto free;
285 		}
286 		nvmet_get_cmd_effects_nvm(log);
287 		nvmet_get_cmd_effects_zns(log);
288 		break;
289 	default:
290 		status = NVME_SC_INVALID_LOG_PAGE;
291 		goto free;
292 	}
293 
294 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
295 free:
296 	kfree(log);
297 out:
298 	nvmet_req_complete(req, status);
299 }
300 
301 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
302 {
303 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
304 	u16 status = NVME_SC_INTERNAL;
305 	size_t len;
306 
307 	if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
308 		goto out;
309 
310 	mutex_lock(&ctrl->lock);
311 	if (ctrl->nr_changed_ns == U32_MAX)
312 		len = sizeof(__le32);
313 	else
314 		len = ctrl->nr_changed_ns * sizeof(__le32);
315 	status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
316 	if (!status)
317 		status = nvmet_zero_sgl(req, len, req->transfer_len - len);
318 	ctrl->nr_changed_ns = 0;
319 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
320 	mutex_unlock(&ctrl->lock);
321 out:
322 	nvmet_req_complete(req, status);
323 }
324 
325 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
326 		struct nvme_ana_group_desc *desc)
327 {
328 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
329 	struct nvmet_ns *ns;
330 	unsigned long idx;
331 	u32 count = 0;
332 
333 	if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
334 		xa_for_each(&ctrl->subsys->namespaces, idx, ns)
335 			if (ns->anagrpid == grpid)
336 				desc->nsids[count++] = cpu_to_le32(ns->nsid);
337 	}
338 
339 	desc->grpid = cpu_to_le32(grpid);
340 	desc->nnsids = cpu_to_le32(count);
341 	desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
342 	desc->state = req->port->ana_state[grpid];
343 	memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
344 	return struct_size(desc, nsids, count);
345 }
346 
347 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
348 {
349 	u64 host_reads, host_writes, data_units_read, data_units_written;
350 	struct nvme_endurance_group_log *log;
351 	u16 status;
352 
353 	/*
354 	 * The target driver emulates each endurance group as its own
355 	 * namespace, reusing the nsid as the endurance group identifier.
356 	 */
357 	req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
358 					    req->cmd->get_log_page.lsi));
359 	status = nvmet_req_find_ns(req);
360 	if (status)
361 		goto out;
362 
363 	log = kzalloc(sizeof(*log), GFP_KERNEL);
364 	if (!log) {
365 		status = NVME_SC_INTERNAL;
366 		goto out;
367 	}
368 
369 	if (!req->ns->bdev)
370 		goto copy;
371 
372 	host_reads = part_stat_read(req->ns->bdev, ios[READ]);
373 	data_units_read =
374 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
375 	host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
376 	data_units_written =
377 		DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
378 
379 	put_unaligned_le64(host_reads, &log->hrc[0]);
380 	put_unaligned_le64(data_units_read, &log->dur[0]);
381 	put_unaligned_le64(host_writes, &log->hwc[0]);
382 	put_unaligned_le64(data_units_written, &log->duw[0]);
383 copy:
384 	status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
385 	kfree(log);
386 out:
387 	nvmet_req_complete(req, status);
388 }
389 
390 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
391 {
392 	struct nvme_ana_rsp_hdr hdr = { 0, };
393 	struct nvme_ana_group_desc *desc;
394 	size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
395 	size_t len;
396 	u32 grpid;
397 	u16 ngrps = 0;
398 	u16 status;
399 
400 	status = NVME_SC_INTERNAL;
401 	desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
402 		       GFP_KERNEL);
403 	if (!desc)
404 		goto out;
405 
406 	down_read(&nvmet_ana_sem);
407 	for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
408 		if (!nvmet_ana_group_enabled[grpid])
409 			continue;
410 		len = nvmet_format_ana_group(req, grpid, desc);
411 		status = nvmet_copy_to_sgl(req, offset, desc, len);
412 		if (status)
413 			break;
414 		offset += len;
415 		ngrps++;
416 	}
417 	for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
418 		if (nvmet_ana_group_enabled[grpid])
419 			ngrps++;
420 	}
421 
422 	hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
423 	hdr.ngrps = cpu_to_le16(ngrps);
424 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
425 	up_read(&nvmet_ana_sem);
426 
427 	kfree(desc);
428 
429 	/* copy the header last once we know the number of groups */
430 	status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
431 out:
432 	nvmet_req_complete(req, status);
433 }
434 
435 static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
436 {
437 	struct nvme_supported_features_log *features;
438 	u16 status;
439 
440 	features = kzalloc(sizeof(*features), GFP_KERNEL);
441 	if (!features) {
442 		status = NVME_SC_INTERNAL;
443 		goto out;
444 	}
445 
446 	features->fis[NVME_FEAT_NUM_QUEUES] =
447 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
448 	features->fis[NVME_FEAT_KATO] =
449 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
450 	features->fis[NVME_FEAT_ASYNC_EVENT] =
451 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
452 	features->fis[NVME_FEAT_HOST_ID] =
453 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
454 	features->fis[NVME_FEAT_WRITE_PROTECT] =
455 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
456 	features->fis[NVME_FEAT_RESV_MASK] =
457 		cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
458 
459 	status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
460 	kfree(features);
461 out:
462 	nvmet_req_complete(req, status);
463 }
464 
465 static void nvmet_execute_get_log_page(struct nvmet_req *req)
466 {
467 	if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
468 		return;
469 
470 	switch (req->cmd->get_log_page.lid) {
471 	case NVME_LOG_SUPPORTED:
472 		return nvmet_execute_get_supported_log_pages(req);
473 	case NVME_LOG_ERROR:
474 		return nvmet_execute_get_log_page_error(req);
475 	case NVME_LOG_SMART:
476 		return nvmet_execute_get_log_page_smart(req);
477 	case NVME_LOG_FW_SLOT:
478 		/*
479 		 * We only support a single firmware slot which always is
480 		 * active, so we can zero out the whole firmware slot log and
481 		 * still claim to fully implement this mandatory log page.
482 		 */
483 		return nvmet_execute_get_log_page_noop(req);
484 	case NVME_LOG_CHANGED_NS:
485 		return nvmet_execute_get_log_changed_ns(req);
486 	case NVME_LOG_CMD_EFFECTS:
487 		return nvmet_execute_get_log_cmd_effects_ns(req);
488 	case NVME_LOG_ENDURANCE_GROUP:
489 		return nvmet_execute_get_log_page_endgrp(req);
490 	case NVME_LOG_ANA:
491 		return nvmet_execute_get_log_page_ana(req);
492 	case NVME_LOG_FEATURES:
493 		return nvmet_execute_get_log_page_features(req);
494 	case NVME_LOG_RMI:
495 		return nvmet_execute_get_log_page_rmi(req);
496 	case NVME_LOG_RESERVATION:
497 		return nvmet_execute_get_log_page_resv(req);
498 	}
499 	pr_debug("unhandled lid %d on qid %d\n",
500 	       req->cmd->get_log_page.lid, req->sq->qid);
501 	req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
502 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
503 }
504 
505 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
506 {
507 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
508 	struct nvmet_subsys *subsys = ctrl->subsys;
509 	struct nvme_id_ctrl *id;
510 	u32 cmd_capsule_size;
511 	u16 status = 0;
512 
513 	if (!subsys->subsys_discovered) {
514 		mutex_lock(&subsys->lock);
515 		subsys->subsys_discovered = true;
516 		mutex_unlock(&subsys->lock);
517 	}
518 
519 	id = kzalloc(sizeof(*id), GFP_KERNEL);
520 	if (!id) {
521 		status = NVME_SC_INTERNAL;
522 		goto out;
523 	}
524 
525 	/* XXX: figure out how to assign real vendors IDs. */
526 	id->vid = 0;
527 	id->ssvid = 0;
528 
529 	memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
530 	memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
531 		       strlen(subsys->model_number), ' ');
532 	memcpy_and_pad(id->fr, sizeof(id->fr),
533 		       subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
534 
535 	put_unaligned_le24(subsys->ieee_oui, id->ieee);
536 
537 	id->rab = 6;
538 
539 	if (nvmet_is_disc_subsys(ctrl->subsys))
540 		id->cntrltype = NVME_CTRL_DISC;
541 	else
542 		id->cntrltype = NVME_CTRL_IO;
543 
544 	/* we support multiple ports, multiples hosts and ANA: */
545 	id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
546 		NVME_CTRL_CMIC_ANA;
547 
548 	/* Limit MDTS according to transport capability */
549 	if (ctrl->ops->get_mdts)
550 		id->mdts = ctrl->ops->get_mdts(ctrl);
551 	else
552 		id->mdts = 0;
553 
554 	id->cntlid = cpu_to_le16(ctrl->cntlid);
555 	id->ver = cpu_to_le32(ctrl->subsys->ver);
556 
557 	/* XXX: figure out what to do about RTD3R/RTD3 */
558 	id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
559 	id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
560 		NVME_CTRL_ATTR_TBKAS);
561 
562 	id->oacs = 0;
563 
564 	/*
565 	 * We don't really have a practical limit on the number of abort
566 	 * comands.  But we don't do anything useful for abort either, so
567 	 * no point in allowing more abort commands than the spec requires.
568 	 */
569 	id->acl = 3;
570 
571 	id->aerl = NVMET_ASYNC_EVENTS - 1;
572 
573 	/* first slot is read-only, only one slot supported */
574 	id->frmw = (1 << 0) | (1 << 1);
575 	id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
576 	id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
577 	id->npss = 0;
578 
579 	/* We support keep-alive timeout in granularity of seconds */
580 	id->kas = cpu_to_le16(NVMET_KAS);
581 
582 	id->sqes = (0x6 << 4) | 0x6;
583 	id->cqes = (0x4 << 4) | 0x4;
584 
585 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
586 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
587 
588 	id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
589 	id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
590 	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
591 			NVME_CTRL_ONCS_WRITE_ZEROES |
592 			NVME_CTRL_ONCS_RESERVATIONS);
593 
594 	/* XXX: don't report vwc if the underlying device is write through */
595 	id->vwc = NVME_CTRL_VWC_PRESENT;
596 
597 	/*
598 	 * We can't support atomic writes bigger than a LBA without support
599 	 * from the backend device.
600 	 */
601 	id->awun = 0;
602 	id->awupf = 0;
603 
604 	/* we always support SGLs */
605 	id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
606 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
607 		id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
608 	if (req->port->inline_data_size)
609 		id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
610 
611 	strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
612 
613 	/*
614 	 * Max command capsule size is sqe + in-capsule data size.
615 	 * Disable in-capsule data for Metadata capable controllers.
616 	 */
617 	cmd_capsule_size = sizeof(struct nvme_command);
618 	if (!ctrl->pi_support)
619 		cmd_capsule_size += req->port->inline_data_size;
620 	id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
621 
622 	/* Max response capsule size is cqe */
623 	id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
624 
625 	id->msdbd = ctrl->ops->msdbd;
626 
627 	/*
628 	 * Endurance group identifier is 16 bits, so we can't let namespaces
629 	 * overflow that since we reuse the nsid
630 	 */
631 	BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
632 	id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
633 
634 	id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
635 	id->anatt = 10; /* random value */
636 	id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
637 	id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
638 
639 	/*
640 	 * Meh, we don't really support any power state.  Fake up the same
641 	 * values that qemu does.
642 	 */
643 	id->psd[0].max_power = cpu_to_le16(0x9c4);
644 	id->psd[0].entry_lat = cpu_to_le32(0x10);
645 	id->psd[0].exit_lat = cpu_to_le32(0x4);
646 
647 	id->nwpc = 1 << 0; /* write protect and no write protect */
648 
649 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
650 
651 	kfree(id);
652 out:
653 	nvmet_req_complete(req, status);
654 }
655 
656 static void nvmet_execute_identify_ns(struct nvmet_req *req)
657 {
658 	struct nvme_id_ns *id;
659 	u16 status;
660 
661 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
662 		req->error_loc = offsetof(struct nvme_identify, nsid);
663 		status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
664 		goto out;
665 	}
666 
667 	id = kzalloc(sizeof(*id), GFP_KERNEL);
668 	if (!id) {
669 		status = NVME_SC_INTERNAL;
670 		goto out;
671 	}
672 
673 	/* return an all zeroed buffer if we can't find an active namespace */
674 	status = nvmet_req_find_ns(req);
675 	if (status) {
676 		status = 0;
677 		goto done;
678 	}
679 
680 	if (nvmet_ns_revalidate(req->ns)) {
681 		mutex_lock(&req->ns->subsys->lock);
682 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
683 		mutex_unlock(&req->ns->subsys->lock);
684 	}
685 
686 	/*
687 	 * nuse = ncap = nsze isn't always true, but we have no way to find
688 	 * that out from the underlying device.
689 	 */
690 	id->ncap = id->nsze =
691 		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
692 	switch (req->port->ana_state[req->ns->anagrpid]) {
693 	case NVME_ANA_INACCESSIBLE:
694 	case NVME_ANA_PERSISTENT_LOSS:
695 		break;
696 	default:
697 		id->nuse = id->nsze;
698 		break;
699 	}
700 
701 	if (req->ns->bdev)
702 		nvmet_bdev_set_limits(req->ns->bdev, id);
703 
704 	/*
705 	 * We just provide a single LBA format that matches what the
706 	 * underlying device reports.
707 	 */
708 	id->nlbaf = 0;
709 	id->flbas = 0;
710 
711 	/*
712 	 * Our namespace might always be shared.  Not just with other
713 	 * controllers, but also with any other user of the block device.
714 	 */
715 	id->nmic = NVME_NS_NMIC_SHARED;
716 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
717 
718 	if (req->ns->pr.enable)
719 		id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
720 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
721 			NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
722 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
723 			NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
724 			NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
725 			NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
726 
727 	/*
728 	 * Since we don't know any better, every namespace is its own endurance
729 	 * group.
730 	 */
731 	id->endgid = cpu_to_le16(req->ns->nsid);
732 
733 	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
734 
735 	id->lbaf[0].ds = req->ns->blksize_shift;
736 
737 	if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
738 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
739 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
740 			  NVME_NS_DPC_PI_TYPE3;
741 		id->mc = NVME_MC_EXTENDED_LBA;
742 		id->dps = req->ns->pi_type;
743 		id->flbas = NVME_NS_FLBAS_META_EXT;
744 		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
745 	}
746 
747 	if (req->ns->readonly)
748 		id->nsattr |= NVME_NS_ATTR_RO;
749 done:
750 	if (!status)
751 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
752 
753 	kfree(id);
754 out:
755 	nvmet_req_complete(req, status);
756 }
757 
758 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
759 {
760 	u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
761 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
762 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
763 	struct nvmet_ns *ns;
764 	unsigned long idx;
765 	__le16 *list;
766 	u16 status;
767 	int i = 1;
768 
769 	list = kzalloc(buf_size, GFP_KERNEL);
770 	if (!list) {
771 		status = NVME_SC_INTERNAL;
772 		goto out;
773 	}
774 
775 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
776 		if (ns->nsid <= min_endgid)
777 			continue;
778 
779 		list[i++] = cpu_to_le16(ns->nsid);
780 		if (i == buf_size / sizeof(__le16))
781 			break;
782 	}
783 
784 	list[0] = cpu_to_le16(i - 1);
785 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
786 	kfree(list);
787 out:
788 	nvmet_req_complete(req, status);
789 }
790 
791 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
792 {
793 	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
794 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
795 	struct nvmet_ns *ns;
796 	unsigned long idx;
797 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
798 	__le32 *list;
799 	u16 status = 0;
800 	int i = 0;
801 
802 	/*
803 	 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
804 	 * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
805 	 */
806 	if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
807 		req->error_loc = offsetof(struct nvme_identify, nsid);
808 		status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
809 		goto out;
810 	}
811 
812 	list = kzalloc(buf_size, GFP_KERNEL);
813 	if (!list) {
814 		status = NVME_SC_INTERNAL;
815 		goto out;
816 	}
817 
818 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
819 		if (ns->nsid <= min_nsid)
820 			continue;
821 		if (match_css && req->ns->csi != req->cmd->identify.csi)
822 			continue;
823 		list[i++] = cpu_to_le32(ns->nsid);
824 		if (i == buf_size / sizeof(__le32))
825 			break;
826 	}
827 
828 	status = nvmet_copy_to_sgl(req, 0, list, buf_size);
829 
830 	kfree(list);
831 out:
832 	nvmet_req_complete(req, status);
833 }
834 
835 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
836 				    void *id, off_t *off)
837 {
838 	struct nvme_ns_id_desc desc = {
839 		.nidt = type,
840 		.nidl = len,
841 	};
842 	u16 status;
843 
844 	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
845 	if (status)
846 		return status;
847 	*off += sizeof(desc);
848 
849 	status = nvmet_copy_to_sgl(req, *off, id, len);
850 	if (status)
851 		return status;
852 	*off += len;
853 
854 	return 0;
855 }
856 
857 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
858 {
859 	off_t off = 0;
860 	u16 status;
861 
862 	status = nvmet_req_find_ns(req);
863 	if (status)
864 		goto out;
865 
866 	if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
867 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
868 						  NVME_NIDT_UUID_LEN,
869 						  &req->ns->uuid, &off);
870 		if (status)
871 			goto out;
872 	}
873 	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
874 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
875 						  NVME_NIDT_NGUID_LEN,
876 						  &req->ns->nguid, &off);
877 		if (status)
878 			goto out;
879 	}
880 
881 	status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
882 					  NVME_NIDT_CSI_LEN,
883 					  &req->ns->csi, &off);
884 	if (status)
885 		goto out;
886 
887 	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
888 			off) != NVME_IDENTIFY_DATA_SIZE - off)
889 		status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
890 
891 out:
892 	nvmet_req_complete(req, status);
893 }
894 
895 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
896 {
897 	/* Not supported: return zeroes */
898 	nvmet_req_complete(req,
899 		   nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
900 }
901 
902 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
903 {
904 	u16 status;
905 
906 	status = nvmet_req_find_ns(req);
907 	if (status)
908 		goto out;
909 
910 	status = nvmet_copy_to_sgl(req, 0, ZERO_PAGE(0),
911 				   NVME_IDENTIFY_DATA_SIZE);
912 out:
913 	nvmet_req_complete(req, status);
914 }
915 
916 static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
917 {
918 	struct nvme_id_ns_cs_indep *id;
919 	u16 status;
920 
921 	status = nvmet_req_find_ns(req);
922 	if (status)
923 		goto out;
924 
925 	id = kzalloc(sizeof(*id), GFP_KERNEL);
926 	if (!id) {
927 		status = NVME_SC_INTERNAL;
928 		goto out;
929 	}
930 
931 	id->nstat = NVME_NSTAT_NRDY;
932 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
933 	id->nmic = NVME_NS_NMIC_SHARED;
934 	if (req->ns->readonly)
935 		id->nsattr |= NVME_NS_ATTR_RO;
936 	if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
937 		id->nsfeat |= NVME_NS_ROTATIONAL;
938 	/*
939 	 * We need flush command to flush the file's metadata,
940 	 * so report supporting vwc if backend is file, even
941 	 * though buffered_io is disable.
942 	 */
943 	if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
944 		id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
945 
946 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
947 	kfree(id);
948 out:
949 	nvmet_req_complete(req, status);
950 }
951 
952 static void nvmet_execute_identify(struct nvmet_req *req)
953 {
954 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
955 		return;
956 
957 	switch (req->cmd->identify.cns) {
958 	case NVME_ID_CNS_NS:
959 		nvmet_execute_identify_ns(req);
960 		return;
961 	case NVME_ID_CNS_CTRL:
962 		nvmet_execute_identify_ctrl(req);
963 		return;
964 	case NVME_ID_CNS_NS_ACTIVE_LIST:
965 		nvmet_execute_identify_nslist(req, false);
966 		return;
967 	case NVME_ID_CNS_NS_DESC_LIST:
968 		nvmet_execute_identify_desclist(req);
969 		return;
970 	case NVME_ID_CNS_CS_NS:
971 		switch (req->cmd->identify.csi) {
972 		case NVME_CSI_NVM:
973 			nvme_execute_identify_ns_nvm(req);
974 			return;
975 		case NVME_CSI_ZNS:
976 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
977 				nvmet_execute_identify_ns_zns(req);
978 				return;
979 			}
980 			break;
981 		}
982 		break;
983 	case NVME_ID_CNS_CS_CTRL:
984 		switch (req->cmd->identify.csi) {
985 		case NVME_CSI_NVM:
986 			nvmet_execute_identify_ctrl_nvm(req);
987 			return;
988 		case NVME_CSI_ZNS:
989 			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
990 				nvmet_execute_identify_ctrl_zns(req);
991 				return;
992 			}
993 			break;
994 		}
995 		break;
996 	case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
997 		nvmet_execute_identify_nslist(req, true);
998 		return;
999 	case NVME_ID_CNS_NS_CS_INDEP:
1000 		nvmet_execute_id_cs_indep(req);
1001 		return;
1002 	case NVME_ID_CNS_ENDGRP_LIST:
1003 		nvmet_execute_identify_endgrp_list(req);
1004 		return;
1005 	}
1006 
1007 	pr_debug("unhandled identify cns %d on qid %d\n",
1008 	       req->cmd->identify.cns, req->sq->qid);
1009 	req->error_loc = offsetof(struct nvme_identify, cns);
1010 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1011 }
1012 
1013 /*
1014  * A "minimum viable" abort implementation: the command is mandatory in the
1015  * spec, but we are not required to do any useful work.  We couldn't really
1016  * do a useful abort, so don't bother even with waiting for the command
1017  * to be exectuted and return immediately telling the command to abort
1018  * wasn't found.
1019  */
1020 static void nvmet_execute_abort(struct nvmet_req *req)
1021 {
1022 	if (!nvmet_check_transfer_len(req, 0))
1023 		return;
1024 	nvmet_set_result(req, 1);
1025 	nvmet_req_complete(req, 0);
1026 }
1027 
1028 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1029 {
1030 	u16 status;
1031 
1032 	if (req->ns->file)
1033 		status = nvmet_file_flush(req);
1034 	else
1035 		status = nvmet_bdev_flush(req);
1036 
1037 	if (status)
1038 		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1039 	return status;
1040 }
1041 
1042 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1043 {
1044 	u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1045 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1046 	u16 status;
1047 
1048 	status = nvmet_req_find_ns(req);
1049 	if (status)
1050 		return status;
1051 
1052 	mutex_lock(&subsys->lock);
1053 	switch (write_protect) {
1054 	case NVME_NS_WRITE_PROTECT:
1055 		req->ns->readonly = true;
1056 		status = nvmet_write_protect_flush_sync(req);
1057 		if (status)
1058 			req->ns->readonly = false;
1059 		break;
1060 	case NVME_NS_NO_WRITE_PROTECT:
1061 		req->ns->readonly = false;
1062 		status = 0;
1063 		break;
1064 	default:
1065 		break;
1066 	}
1067 
1068 	if (!status)
1069 		nvmet_ns_changed(subsys, req->ns->nsid);
1070 	mutex_unlock(&subsys->lock);
1071 	return status;
1072 }
1073 
1074 u16 nvmet_set_feat_kato(struct nvmet_req *req)
1075 {
1076 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1077 
1078 	nvmet_stop_keep_alive_timer(req->sq->ctrl);
1079 	req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1080 	nvmet_start_keep_alive_timer(req->sq->ctrl);
1081 
1082 	nvmet_set_result(req, req->sq->ctrl->kato);
1083 
1084 	return 0;
1085 }
1086 
1087 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1088 {
1089 	u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1090 
1091 	if (val32 & ~mask) {
1092 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
1093 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1094 	}
1095 
1096 	WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1097 	nvmet_set_result(req, val32);
1098 
1099 	return 0;
1100 }
1101 
1102 void nvmet_execute_set_features(struct nvmet_req *req)
1103 {
1104 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1105 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1106 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1107 	u16 status = 0;
1108 	u16 nsqr;
1109 	u16 ncqr;
1110 
1111 	if (!nvmet_check_data_len_lte(req, 0))
1112 		return;
1113 
1114 	switch (cdw10 & 0xff) {
1115 	case NVME_FEAT_NUM_QUEUES:
1116 		ncqr = (cdw11 >> 16) & 0xffff;
1117 		nsqr = cdw11 & 0xffff;
1118 		if (ncqr == 0xffff || nsqr == 0xffff) {
1119 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1120 			break;
1121 		}
1122 		nvmet_set_result(req,
1123 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1124 		break;
1125 	case NVME_FEAT_KATO:
1126 		status = nvmet_set_feat_kato(req);
1127 		break;
1128 	case NVME_FEAT_ASYNC_EVENT:
1129 		status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1130 		break;
1131 	case NVME_FEAT_HOST_ID:
1132 		status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1133 		break;
1134 	case NVME_FEAT_WRITE_PROTECT:
1135 		status = nvmet_set_feat_write_protect(req);
1136 		break;
1137 	case NVME_FEAT_RESV_MASK:
1138 		status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1139 		break;
1140 	default:
1141 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
1142 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1143 		break;
1144 	}
1145 
1146 	nvmet_req_complete(req, status);
1147 }
1148 
1149 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1150 {
1151 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1152 	u32 result;
1153 
1154 	result = nvmet_req_find_ns(req);
1155 	if (result)
1156 		return result;
1157 
1158 	mutex_lock(&subsys->lock);
1159 	if (req->ns->readonly == true)
1160 		result = NVME_NS_WRITE_PROTECT;
1161 	else
1162 		result = NVME_NS_NO_WRITE_PROTECT;
1163 	nvmet_set_result(req, result);
1164 	mutex_unlock(&subsys->lock);
1165 
1166 	return 0;
1167 }
1168 
1169 void nvmet_get_feat_kato(struct nvmet_req *req)
1170 {
1171 	nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1172 }
1173 
1174 void nvmet_get_feat_async_event(struct nvmet_req *req)
1175 {
1176 	nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1177 }
1178 
1179 void nvmet_execute_get_features(struct nvmet_req *req)
1180 {
1181 	struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1182 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1183 	u16 status = 0;
1184 
1185 	if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1186 		return;
1187 
1188 	switch (cdw10 & 0xff) {
1189 	/*
1190 	 * These features are mandatory in the spec, but we don't
1191 	 * have a useful way to implement them.  We'll eventually
1192 	 * need to come up with some fake values for these.
1193 	 */
1194 #if 0
1195 	case NVME_FEAT_ARBITRATION:
1196 		break;
1197 	case NVME_FEAT_POWER_MGMT:
1198 		break;
1199 	case NVME_FEAT_TEMP_THRESH:
1200 		break;
1201 	case NVME_FEAT_ERR_RECOVERY:
1202 		break;
1203 	case NVME_FEAT_IRQ_COALESCE:
1204 		break;
1205 	case NVME_FEAT_IRQ_CONFIG:
1206 		break;
1207 	case NVME_FEAT_WRITE_ATOMIC:
1208 		break;
1209 #endif
1210 	case NVME_FEAT_ASYNC_EVENT:
1211 		nvmet_get_feat_async_event(req);
1212 		break;
1213 	case NVME_FEAT_VOLATILE_WC:
1214 		nvmet_set_result(req, 1);
1215 		break;
1216 	case NVME_FEAT_NUM_QUEUES:
1217 		nvmet_set_result(req,
1218 			(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1219 		break;
1220 	case NVME_FEAT_KATO:
1221 		nvmet_get_feat_kato(req);
1222 		break;
1223 	case NVME_FEAT_HOST_ID:
1224 		/* need 128-bit host identifier flag */
1225 		if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1226 			req->error_loc =
1227 				offsetof(struct nvme_common_command, cdw11);
1228 			status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1229 			break;
1230 		}
1231 
1232 		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1233 				sizeof(req->sq->ctrl->hostid));
1234 		break;
1235 	case NVME_FEAT_WRITE_PROTECT:
1236 		status = nvmet_get_feat_write_protect(req);
1237 		break;
1238 	case NVME_FEAT_RESV_MASK:
1239 		status = nvmet_get_feat_resv_notif_mask(req);
1240 		break;
1241 	default:
1242 		req->error_loc =
1243 			offsetof(struct nvme_common_command, cdw10);
1244 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1245 		break;
1246 	}
1247 
1248 	nvmet_req_complete(req, status);
1249 }
1250 
1251 void nvmet_execute_async_event(struct nvmet_req *req)
1252 {
1253 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1254 
1255 	if (!nvmet_check_transfer_len(req, 0))
1256 		return;
1257 
1258 	mutex_lock(&ctrl->lock);
1259 	if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1260 		mutex_unlock(&ctrl->lock);
1261 		nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1262 		return;
1263 	}
1264 	ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1265 	mutex_unlock(&ctrl->lock);
1266 
1267 	queue_work(nvmet_wq, &ctrl->async_event_work);
1268 }
1269 
1270 void nvmet_execute_keep_alive(struct nvmet_req *req)
1271 {
1272 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
1273 	u16 status = 0;
1274 
1275 	if (!nvmet_check_transfer_len(req, 0))
1276 		return;
1277 
1278 	if (!ctrl->kato) {
1279 		status = NVME_SC_KA_TIMEOUT_INVALID;
1280 		goto out;
1281 	}
1282 
1283 	pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1284 		ctrl->cntlid, ctrl->kato);
1285 	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1286 out:
1287 	nvmet_req_complete(req, status);
1288 }
1289 
1290 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1291 {
1292 	struct nvme_command *cmd = req->cmd;
1293 	u16 ret;
1294 
1295 	if (nvme_is_fabrics(cmd))
1296 		return nvmet_parse_fabrics_admin_cmd(req);
1297 	if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1298 		return nvmet_parse_discovery_cmd(req);
1299 
1300 	ret = nvmet_check_ctrl_status(req);
1301 	if (unlikely(ret))
1302 		return ret;
1303 
1304 	if (nvmet_is_passthru_req(req))
1305 		return nvmet_parse_passthru_admin_cmd(req);
1306 
1307 	switch (cmd->common.opcode) {
1308 	case nvme_admin_get_log_page:
1309 		req->execute = nvmet_execute_get_log_page;
1310 		return 0;
1311 	case nvme_admin_identify:
1312 		req->execute = nvmet_execute_identify;
1313 		return 0;
1314 	case nvme_admin_abort_cmd:
1315 		req->execute = nvmet_execute_abort;
1316 		return 0;
1317 	case nvme_admin_set_features:
1318 		req->execute = nvmet_execute_set_features;
1319 		return 0;
1320 	case nvme_admin_get_features:
1321 		req->execute = nvmet_execute_get_features;
1322 		return 0;
1323 	case nvme_admin_async_event:
1324 		req->execute = nvmet_execute_async_event;
1325 		return 0;
1326 	case nvme_admin_keep_alive:
1327 		req->execute = nvmet_execute_keep_alive;
1328 		return 0;
1329 	default:
1330 		return nvmet_report_invalid_opcode(req);
1331 	}
1332 }
1333