1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10
11 #include <generated/utsrelease.h>
12 #include <linux/unaligned.h>
13 #include "nvmet.h"
14
nvmet_get_log_page_len(struct nvme_command * cmd)15 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
16 {
17 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
18
19 len <<= 16;
20 len += le16_to_cpu(cmd->get_log_page.numdl);
21 /* NUMD is a 0's based value */
22 len += 1;
23 len *= sizeof(u32);
24
25 return len;
26 }
27
nvmet_feat_data_len(struct nvmet_req * req,u32 cdw10)28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29 {
30 switch (cdw10 & 0xff) {
31 case NVME_FEAT_HOST_ID:
32 return sizeof(req->sq->ctrl->hostid);
33 default:
34 return 0;
35 }
36 }
37
nvmet_get_log_page_offset(struct nvme_command * cmd)38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39 {
40 return le64_to_cpu(cmd->get_log_page.lpo);
41 }
42
nvmet_execute_get_log_page_noop(struct nvmet_req * req)43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
44 {
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46 }
47
nvmet_execute_get_log_page_error(struct nvmet_req * req)48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49 {
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
51 unsigned long flags;
52 off_t offset = 0;
53 u64 slot;
54 u64 i;
55
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61 sizeof(struct nvme_error_slot)))
62 break;
63
64 if (slot == 0)
65 slot = NVMET_ERROR_LOG_SLOTS - 1;
66 else
67 slot--;
68 offset += sizeof(struct nvme_error_slot);
69 }
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
71 nvmet_req_complete(req, 0);
72 }
73
nvmet_execute_get_supported_log_pages(struct nvmet_req * req)74 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
75 {
76 struct nvme_supported_log *logs;
77 u16 status;
78
79 logs = kzalloc(sizeof(*logs), GFP_KERNEL);
80 if (!logs) {
81 status = NVME_SC_INTERNAL;
82 goto out;
83 }
84
85 logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
86 logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
87 logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
88 logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
89 logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
90 logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
91 logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
92 logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
93 logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
94 logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
95 logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
96
97 status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
98 kfree(logs);
99 out:
100 nvmet_req_complete(req, status);
101 }
102
nvmet_get_smart_log_nsid(struct nvmet_req * req,struct nvme_smart_log * slog)103 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
104 struct nvme_smart_log *slog)
105 {
106 u64 host_reads, host_writes, data_units_read, data_units_written;
107 u16 status;
108
109 status = nvmet_req_find_ns(req);
110 if (status)
111 return status;
112
113 /* we don't have the right data for file backed ns */
114 if (!req->ns->bdev)
115 return NVME_SC_SUCCESS;
116
117 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
118 data_units_read =
119 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
120 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
121 data_units_written =
122 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
123
124 put_unaligned_le64(host_reads, &slog->host_reads[0]);
125 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
126 put_unaligned_le64(host_writes, &slog->host_writes[0]);
127 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
128
129 return NVME_SC_SUCCESS;
130 }
131
nvmet_get_smart_log_all(struct nvmet_req * req,struct nvme_smart_log * slog)132 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
133 struct nvme_smart_log *slog)
134 {
135 u64 host_reads = 0, host_writes = 0;
136 u64 data_units_read = 0, data_units_written = 0;
137 struct nvmet_ns *ns;
138 struct nvmet_ctrl *ctrl;
139 unsigned long idx;
140
141 ctrl = req->sq->ctrl;
142 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
143 /* we don't have the right data for file backed ns */
144 if (!ns->bdev)
145 continue;
146 host_reads += part_stat_read(ns->bdev, ios[READ]);
147 data_units_read += DIV_ROUND_UP(
148 part_stat_read(ns->bdev, sectors[READ]), 1000);
149 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
150 data_units_written += DIV_ROUND_UP(
151 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
152 }
153
154 put_unaligned_le64(host_reads, &slog->host_reads[0]);
155 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
156 put_unaligned_le64(host_writes, &slog->host_writes[0]);
157 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
158
159 return NVME_SC_SUCCESS;
160 }
161
nvmet_execute_get_log_page_rmi(struct nvmet_req * req)162 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
163 {
164 struct nvme_rotational_media_log *log;
165 struct gendisk *disk;
166 u16 status;
167
168 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
169 req->cmd->get_log_page.lsi));
170 status = nvmet_req_find_ns(req);
171 if (status)
172 goto out;
173
174 if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
175 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
176 goto out;
177 }
178
179 if (req->transfer_len != sizeof(*log)) {
180 status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
181 goto out;
182 }
183
184 log = kzalloc(sizeof(*log), GFP_KERNEL);
185 if (!log)
186 goto out;
187
188 log->endgid = req->cmd->get_log_page.lsi;
189 disk = req->ns->bdev->bd_disk;
190 if (disk && disk->ia_ranges)
191 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
192 else
193 log->numa = cpu_to_le16(1);
194
195 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
196 kfree(log);
197 out:
198 nvmet_req_complete(req, status);
199 }
200
nvmet_execute_get_log_page_smart(struct nvmet_req * req)201 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
202 {
203 struct nvme_smart_log *log;
204 u16 status = NVME_SC_INTERNAL;
205 unsigned long flags;
206
207 if (req->transfer_len != sizeof(*log))
208 goto out;
209
210 log = kzalloc(sizeof(*log), GFP_KERNEL);
211 if (!log)
212 goto out;
213
214 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
215 status = nvmet_get_smart_log_all(req, log);
216 else
217 status = nvmet_get_smart_log_nsid(req, log);
218 if (status)
219 goto out_free_log;
220
221 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
222 put_unaligned_le64(req->sq->ctrl->err_counter,
223 &log->num_err_log_entries);
224 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
225
226 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
227 out_free_log:
228 kfree(log);
229 out:
230 nvmet_req_complete(req, status);
231 }
232
nvmet_get_cmd_effects_nvm(struct nvme_effects_log * log)233 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
234 {
235 log->acs[nvme_admin_get_log_page] =
236 log->acs[nvme_admin_identify] =
237 log->acs[nvme_admin_abort_cmd] =
238 log->acs[nvme_admin_set_features] =
239 log->acs[nvme_admin_get_features] =
240 log->acs[nvme_admin_async_event] =
241 log->acs[nvme_admin_keep_alive] =
242 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
243
244 log->iocs[nvme_cmd_read] =
245 log->iocs[nvme_cmd_flush] =
246 log->iocs[nvme_cmd_dsm] =
247 log->iocs[nvme_cmd_resv_acquire] =
248 log->iocs[nvme_cmd_resv_register] =
249 log->iocs[nvme_cmd_resv_release] =
250 log->iocs[nvme_cmd_resv_report] =
251 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
252 log->iocs[nvme_cmd_write] =
253 log->iocs[nvme_cmd_write_zeroes] =
254 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
255 }
256
nvmet_get_cmd_effects_zns(struct nvme_effects_log * log)257 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
258 {
259 log->iocs[nvme_cmd_zone_append] =
260 log->iocs[nvme_cmd_zone_mgmt_send] =
261 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
262 log->iocs[nvme_cmd_zone_mgmt_recv] =
263 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
264 }
265
nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req * req)266 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
267 {
268 struct nvme_effects_log *log;
269 u16 status = NVME_SC_SUCCESS;
270
271 log = kzalloc(sizeof(*log), GFP_KERNEL);
272 if (!log) {
273 status = NVME_SC_INTERNAL;
274 goto out;
275 }
276
277 switch (req->cmd->get_log_page.csi) {
278 case NVME_CSI_NVM:
279 nvmet_get_cmd_effects_nvm(log);
280 break;
281 case NVME_CSI_ZNS:
282 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
283 status = NVME_SC_INVALID_IO_CMD_SET;
284 goto free;
285 }
286 nvmet_get_cmd_effects_nvm(log);
287 nvmet_get_cmd_effects_zns(log);
288 break;
289 default:
290 status = NVME_SC_INVALID_LOG_PAGE;
291 goto free;
292 }
293
294 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
295 free:
296 kfree(log);
297 out:
298 nvmet_req_complete(req, status);
299 }
300
nvmet_execute_get_log_changed_ns(struct nvmet_req * req)301 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
302 {
303 struct nvmet_ctrl *ctrl = req->sq->ctrl;
304 u16 status = NVME_SC_INTERNAL;
305 size_t len;
306
307 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
308 goto out;
309
310 mutex_lock(&ctrl->lock);
311 if (ctrl->nr_changed_ns == U32_MAX)
312 len = sizeof(__le32);
313 else
314 len = ctrl->nr_changed_ns * sizeof(__le32);
315 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
316 if (!status)
317 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
318 ctrl->nr_changed_ns = 0;
319 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
320 mutex_unlock(&ctrl->lock);
321 out:
322 nvmet_req_complete(req, status);
323 }
324
nvmet_format_ana_group(struct nvmet_req * req,u32 grpid,struct nvme_ana_group_desc * desc)325 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
326 struct nvme_ana_group_desc *desc)
327 {
328 struct nvmet_ctrl *ctrl = req->sq->ctrl;
329 struct nvmet_ns *ns;
330 unsigned long idx;
331 u32 count = 0;
332
333 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
334 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
335 if (ns->anagrpid == grpid)
336 desc->nsids[count++] = cpu_to_le32(ns->nsid);
337 }
338 }
339
340 desc->grpid = cpu_to_le32(grpid);
341 desc->nnsids = cpu_to_le32(count);
342 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
343 desc->state = req->port->ana_state[grpid];
344 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
345 return struct_size(desc, nsids, count);
346 }
347
nvmet_execute_get_log_page_endgrp(struct nvmet_req * req)348 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
349 {
350 u64 host_reads, host_writes, data_units_read, data_units_written;
351 struct nvme_endurance_group_log *log;
352 u16 status;
353
354 /*
355 * The target driver emulates each endurance group as its own
356 * namespace, reusing the nsid as the endurance group identifier.
357 */
358 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
359 req->cmd->get_log_page.lsi));
360 status = nvmet_req_find_ns(req);
361 if (status)
362 goto out;
363
364 log = kzalloc(sizeof(*log), GFP_KERNEL);
365 if (!log) {
366 status = NVME_SC_INTERNAL;
367 goto out;
368 }
369
370 if (!req->ns->bdev)
371 goto copy;
372
373 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
374 data_units_read =
375 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
376 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
377 data_units_written =
378 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
379
380 put_unaligned_le64(host_reads, &log->hrc[0]);
381 put_unaligned_le64(data_units_read, &log->dur[0]);
382 put_unaligned_le64(host_writes, &log->hwc[0]);
383 put_unaligned_le64(data_units_written, &log->duw[0]);
384 copy:
385 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
386 kfree(log);
387 out:
388 nvmet_req_complete(req, status);
389 }
390
nvmet_execute_get_log_page_ana(struct nvmet_req * req)391 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
392 {
393 struct nvme_ana_rsp_hdr hdr = { 0, };
394 struct nvme_ana_group_desc *desc;
395 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
396 size_t len;
397 u32 grpid;
398 u16 ngrps = 0;
399 u16 status;
400
401 status = NVME_SC_INTERNAL;
402 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
403 GFP_KERNEL);
404 if (!desc)
405 goto out;
406
407 down_read(&nvmet_ana_sem);
408 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
409 if (!nvmet_ana_group_enabled[grpid])
410 continue;
411 len = nvmet_format_ana_group(req, grpid, desc);
412 status = nvmet_copy_to_sgl(req, offset, desc, len);
413 if (status)
414 break;
415 offset += len;
416 ngrps++;
417 }
418 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
419 if (nvmet_ana_group_enabled[grpid])
420 ngrps++;
421 }
422
423 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
424 hdr.ngrps = cpu_to_le16(ngrps);
425 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
426 up_read(&nvmet_ana_sem);
427
428 kfree(desc);
429
430 /* copy the header last once we know the number of groups */
431 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
432 out:
433 nvmet_req_complete(req, status);
434 }
435
nvmet_execute_get_log_page_features(struct nvmet_req * req)436 static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
437 {
438 struct nvme_supported_features_log *features;
439 u16 status;
440
441 features = kzalloc(sizeof(*features), GFP_KERNEL);
442 if (!features) {
443 status = NVME_SC_INTERNAL;
444 goto out;
445 }
446
447 features->fis[NVME_FEAT_NUM_QUEUES] =
448 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
449 features->fis[NVME_FEAT_KATO] =
450 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
451 features->fis[NVME_FEAT_ASYNC_EVENT] =
452 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
453 features->fis[NVME_FEAT_HOST_ID] =
454 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
455 features->fis[NVME_FEAT_WRITE_PROTECT] =
456 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
457 features->fis[NVME_FEAT_RESV_MASK] =
458 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
459
460 status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
461 kfree(features);
462 out:
463 nvmet_req_complete(req, status);
464 }
465
nvmet_execute_get_log_page(struct nvmet_req * req)466 static void nvmet_execute_get_log_page(struct nvmet_req *req)
467 {
468 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
469 return;
470
471 switch (req->cmd->get_log_page.lid) {
472 case NVME_LOG_SUPPORTED:
473 return nvmet_execute_get_supported_log_pages(req);
474 case NVME_LOG_ERROR:
475 return nvmet_execute_get_log_page_error(req);
476 case NVME_LOG_SMART:
477 return nvmet_execute_get_log_page_smart(req);
478 case NVME_LOG_FW_SLOT:
479 /*
480 * We only support a single firmware slot which always is
481 * active, so we can zero out the whole firmware slot log and
482 * still claim to fully implement this mandatory log page.
483 */
484 return nvmet_execute_get_log_page_noop(req);
485 case NVME_LOG_CHANGED_NS:
486 return nvmet_execute_get_log_changed_ns(req);
487 case NVME_LOG_CMD_EFFECTS:
488 return nvmet_execute_get_log_cmd_effects_ns(req);
489 case NVME_LOG_ENDURANCE_GROUP:
490 return nvmet_execute_get_log_page_endgrp(req);
491 case NVME_LOG_ANA:
492 return nvmet_execute_get_log_page_ana(req);
493 case NVME_LOG_FEATURES:
494 return nvmet_execute_get_log_page_features(req);
495 case NVME_LOG_RMI:
496 return nvmet_execute_get_log_page_rmi(req);
497 case NVME_LOG_RESERVATION:
498 return nvmet_execute_get_log_page_resv(req);
499 }
500 pr_debug("unhandled lid %d on qid %d\n",
501 req->cmd->get_log_page.lid, req->sq->qid);
502 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
503 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
504 }
505
nvmet_execute_identify_ctrl(struct nvmet_req * req)506 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
507 {
508 struct nvmet_ctrl *ctrl = req->sq->ctrl;
509 struct nvmet_subsys *subsys = ctrl->subsys;
510 struct nvme_id_ctrl *id;
511 u32 cmd_capsule_size;
512 u16 status = 0;
513
514 if (!subsys->subsys_discovered) {
515 mutex_lock(&subsys->lock);
516 subsys->subsys_discovered = true;
517 mutex_unlock(&subsys->lock);
518 }
519
520 id = kzalloc(sizeof(*id), GFP_KERNEL);
521 if (!id) {
522 status = NVME_SC_INTERNAL;
523 goto out;
524 }
525
526 /* XXX: figure out how to assign real vendors IDs. */
527 id->vid = 0;
528 id->ssvid = 0;
529
530 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
531 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
532 strlen(subsys->model_number), ' ');
533 memcpy_and_pad(id->fr, sizeof(id->fr),
534 subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
535
536 put_unaligned_le24(subsys->ieee_oui, id->ieee);
537
538 id->rab = 6;
539
540 if (nvmet_is_disc_subsys(ctrl->subsys))
541 id->cntrltype = NVME_CTRL_DISC;
542 else
543 id->cntrltype = NVME_CTRL_IO;
544
545 /* we support multiple ports, multiples hosts and ANA: */
546 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
547 NVME_CTRL_CMIC_ANA;
548
549 /* Limit MDTS according to transport capability */
550 if (ctrl->ops->get_mdts)
551 id->mdts = ctrl->ops->get_mdts(ctrl);
552 else
553 id->mdts = 0;
554
555 id->cntlid = cpu_to_le16(ctrl->cntlid);
556 id->ver = cpu_to_le32(ctrl->subsys->ver);
557
558 /* XXX: figure out what to do about RTD3R/RTD3 */
559 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
560 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
561 NVME_CTRL_ATTR_TBKAS);
562
563 id->oacs = 0;
564
565 /*
566 * We don't really have a practical limit on the number of abort
567 * comands. But we don't do anything useful for abort either, so
568 * no point in allowing more abort commands than the spec requires.
569 */
570 id->acl = 3;
571
572 id->aerl = NVMET_ASYNC_EVENTS - 1;
573
574 /* first slot is read-only, only one slot supported */
575 id->frmw = (1 << 0) | (1 << 1);
576 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
577 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
578 id->npss = 0;
579
580 /* We support keep-alive timeout in granularity of seconds */
581 id->kas = cpu_to_le16(NVMET_KAS);
582
583 id->sqes = (0x6 << 4) | 0x6;
584 id->cqes = (0x4 << 4) | 0x4;
585
586 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
587 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
588
589 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
590 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
591 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
592 NVME_CTRL_ONCS_WRITE_ZEROES |
593 NVME_CTRL_ONCS_RESERVATIONS);
594
595 /* XXX: don't report vwc if the underlying device is write through */
596 id->vwc = NVME_CTRL_VWC_PRESENT;
597
598 /*
599 * We can't support atomic writes bigger than a LBA without support
600 * from the backend device.
601 */
602 id->awun = 0;
603 id->awupf = 0;
604
605 /* we always support SGLs */
606 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
607 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
608 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
609 if (req->port->inline_data_size)
610 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
611
612 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
613
614 /*
615 * Max command capsule size is sqe + in-capsule data size.
616 * Disable in-capsule data for Metadata capable controllers.
617 */
618 cmd_capsule_size = sizeof(struct nvme_command);
619 if (!ctrl->pi_support)
620 cmd_capsule_size += req->port->inline_data_size;
621 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
622
623 /* Max response capsule size is cqe */
624 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
625
626 id->msdbd = ctrl->ops->msdbd;
627
628 /*
629 * Endurance group identifier is 16 bits, so we can't let namespaces
630 * overflow that since we reuse the nsid
631 */
632 BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
633 id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
634
635 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
636 id->anatt = 10; /* random value */
637 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
638 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
639
640 /*
641 * Meh, we don't really support any power state. Fake up the same
642 * values that qemu does.
643 */
644 id->psd[0].max_power = cpu_to_le16(0x9c4);
645 id->psd[0].entry_lat = cpu_to_le32(0x10);
646 id->psd[0].exit_lat = cpu_to_le32(0x4);
647
648 id->nwpc = 1 << 0; /* write protect and no write protect */
649
650 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
651
652 kfree(id);
653 out:
654 nvmet_req_complete(req, status);
655 }
656
nvmet_execute_identify_ns(struct nvmet_req * req)657 static void nvmet_execute_identify_ns(struct nvmet_req *req)
658 {
659 struct nvme_id_ns *id;
660 u16 status;
661
662 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
663 req->error_loc = offsetof(struct nvme_identify, nsid);
664 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
665 goto out;
666 }
667
668 id = kzalloc(sizeof(*id), GFP_KERNEL);
669 if (!id) {
670 status = NVME_SC_INTERNAL;
671 goto out;
672 }
673
674 /* return an all zeroed buffer if we can't find an active namespace */
675 status = nvmet_req_find_ns(req);
676 if (status) {
677 status = 0;
678 goto done;
679 }
680
681 if (nvmet_ns_revalidate(req->ns)) {
682 mutex_lock(&req->ns->subsys->lock);
683 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
684 mutex_unlock(&req->ns->subsys->lock);
685 }
686
687 /*
688 * nuse = ncap = nsze isn't always true, but we have no way to find
689 * that out from the underlying device.
690 */
691 id->ncap = id->nsze =
692 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
693 switch (req->port->ana_state[req->ns->anagrpid]) {
694 case NVME_ANA_INACCESSIBLE:
695 case NVME_ANA_PERSISTENT_LOSS:
696 break;
697 default:
698 id->nuse = id->nsze;
699 break;
700 }
701
702 if (req->ns->bdev)
703 nvmet_bdev_set_limits(req->ns->bdev, id);
704
705 /*
706 * We just provide a single LBA format that matches what the
707 * underlying device reports.
708 */
709 id->nlbaf = 0;
710 id->flbas = 0;
711
712 /*
713 * Our namespace might always be shared. Not just with other
714 * controllers, but also with any other user of the block device.
715 */
716 id->nmic = NVME_NS_NMIC_SHARED;
717 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
718
719 if (req->ns->pr.enable)
720 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
721 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
722 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
723 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
724 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
725 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
726 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
727
728 /*
729 * Since we don't know any better, every namespace is its own endurance
730 * group.
731 */
732 id->endgid = cpu_to_le16(req->ns->nsid);
733
734 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
735
736 id->lbaf[0].ds = req->ns->blksize_shift;
737
738 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
739 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
740 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
741 NVME_NS_DPC_PI_TYPE3;
742 id->mc = NVME_MC_EXTENDED_LBA;
743 id->dps = req->ns->pi_type;
744 id->flbas = NVME_NS_FLBAS_META_EXT;
745 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
746 }
747
748 if (req->ns->readonly)
749 id->nsattr |= NVME_NS_ATTR_RO;
750 done:
751 if (!status)
752 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
753
754 kfree(id);
755 out:
756 nvmet_req_complete(req, status);
757 }
758
nvmet_execute_identify_endgrp_list(struct nvmet_req * req)759 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
760 {
761 u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
762 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
763 struct nvmet_ctrl *ctrl = req->sq->ctrl;
764 struct nvmet_ns *ns;
765 unsigned long idx;
766 __le16 *list;
767 u16 status;
768 int i = 1;
769
770 list = kzalloc(buf_size, GFP_KERNEL);
771 if (!list) {
772 status = NVME_SC_INTERNAL;
773 goto out;
774 }
775
776 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
777 if (ns->nsid <= min_endgid)
778 continue;
779
780 list[i++] = cpu_to_le16(ns->nsid);
781 if (i == buf_size / sizeof(__le16))
782 break;
783 }
784
785 list[0] = cpu_to_le16(i - 1);
786 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
787 kfree(list);
788 out:
789 nvmet_req_complete(req, status);
790 }
791
nvmet_execute_identify_nslist(struct nvmet_req * req,bool match_css)792 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
793 {
794 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
795 struct nvmet_ctrl *ctrl = req->sq->ctrl;
796 struct nvmet_ns *ns;
797 unsigned long idx;
798 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
799 __le32 *list;
800 u16 status = 0;
801 int i = 0;
802
803 /*
804 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
805 * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
806 */
807 if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
808 req->error_loc = offsetof(struct nvme_identify, nsid);
809 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
810 goto out;
811 }
812
813 list = kzalloc(buf_size, GFP_KERNEL);
814 if (!list) {
815 status = NVME_SC_INTERNAL;
816 goto out;
817 }
818
819 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
820 if (ns->nsid <= min_nsid)
821 continue;
822 if (match_css && req->ns->csi != req->cmd->identify.csi)
823 continue;
824 list[i++] = cpu_to_le32(ns->nsid);
825 if (i == buf_size / sizeof(__le32))
826 break;
827 }
828
829 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
830
831 kfree(list);
832 out:
833 nvmet_req_complete(req, status);
834 }
835
nvmet_copy_ns_identifier(struct nvmet_req * req,u8 type,u8 len,void * id,off_t * off)836 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
837 void *id, off_t *off)
838 {
839 struct nvme_ns_id_desc desc = {
840 .nidt = type,
841 .nidl = len,
842 };
843 u16 status;
844
845 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
846 if (status)
847 return status;
848 *off += sizeof(desc);
849
850 status = nvmet_copy_to_sgl(req, *off, id, len);
851 if (status)
852 return status;
853 *off += len;
854
855 return 0;
856 }
857
nvmet_execute_identify_desclist(struct nvmet_req * req)858 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
859 {
860 off_t off = 0;
861 u16 status;
862
863 status = nvmet_req_find_ns(req);
864 if (status)
865 goto out;
866
867 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
868 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
869 NVME_NIDT_UUID_LEN,
870 &req->ns->uuid, &off);
871 if (status)
872 goto out;
873 }
874 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
875 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
876 NVME_NIDT_NGUID_LEN,
877 &req->ns->nguid, &off);
878 if (status)
879 goto out;
880 }
881
882 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
883 NVME_NIDT_CSI_LEN,
884 &req->ns->csi, &off);
885 if (status)
886 goto out;
887
888 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
889 off) != NVME_IDENTIFY_DATA_SIZE - off)
890 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
891
892 out:
893 nvmet_req_complete(req, status);
894 }
895
nvmet_execute_identify_ctrl_nvm(struct nvmet_req * req)896 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
897 {
898 /* Not supported: return zeroes */
899 nvmet_req_complete(req,
900 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
901 }
902
nvme_execute_identify_ns_nvm(struct nvmet_req * req)903 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
904 {
905 u16 status;
906 struct nvme_id_ns_nvm *id;
907
908 status = nvmet_req_find_ns(req);
909 if (status)
910 goto out;
911
912 id = kzalloc(sizeof(*id), GFP_KERNEL);
913 if (!id) {
914 status = NVME_SC_INTERNAL;
915 goto out;
916 }
917 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
918 out:
919 nvmet_req_complete(req, status);
920 }
921
nvmet_execute_id_cs_indep(struct nvmet_req * req)922 static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
923 {
924 struct nvme_id_ns_cs_indep *id;
925 u16 status;
926
927 status = nvmet_req_find_ns(req);
928 if (status)
929 goto out;
930
931 id = kzalloc(sizeof(*id), GFP_KERNEL);
932 if (!id) {
933 status = NVME_SC_INTERNAL;
934 goto out;
935 }
936
937 id->nstat = NVME_NSTAT_NRDY;
938 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
939 id->nmic = NVME_NS_NMIC_SHARED;
940 if (req->ns->readonly)
941 id->nsattr |= NVME_NS_ATTR_RO;
942 if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
943 id->nsfeat |= NVME_NS_ROTATIONAL;
944 /*
945 * We need flush command to flush the file's metadata,
946 * so report supporting vwc if backend is file, even
947 * though buffered_io is disable.
948 */
949 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
950 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
951
952 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
953 kfree(id);
954 out:
955 nvmet_req_complete(req, status);
956 }
957
nvmet_execute_identify(struct nvmet_req * req)958 static void nvmet_execute_identify(struct nvmet_req *req)
959 {
960 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
961 return;
962
963 switch (req->cmd->identify.cns) {
964 case NVME_ID_CNS_NS:
965 nvmet_execute_identify_ns(req);
966 return;
967 case NVME_ID_CNS_CTRL:
968 nvmet_execute_identify_ctrl(req);
969 return;
970 case NVME_ID_CNS_NS_ACTIVE_LIST:
971 nvmet_execute_identify_nslist(req, false);
972 return;
973 case NVME_ID_CNS_NS_DESC_LIST:
974 nvmet_execute_identify_desclist(req);
975 return;
976 case NVME_ID_CNS_CS_NS:
977 switch (req->cmd->identify.csi) {
978 case NVME_CSI_NVM:
979 nvme_execute_identify_ns_nvm(req);
980 return;
981 case NVME_CSI_ZNS:
982 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
983 nvmet_execute_identify_ns_zns(req);
984 return;
985 }
986 break;
987 }
988 break;
989 case NVME_ID_CNS_CS_CTRL:
990 switch (req->cmd->identify.csi) {
991 case NVME_CSI_NVM:
992 nvmet_execute_identify_ctrl_nvm(req);
993 return;
994 case NVME_CSI_ZNS:
995 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
996 nvmet_execute_identify_ctrl_zns(req);
997 return;
998 }
999 break;
1000 }
1001 break;
1002 case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
1003 nvmet_execute_identify_nslist(req, true);
1004 return;
1005 case NVME_ID_CNS_NS_CS_INDEP:
1006 nvmet_execute_id_cs_indep(req);
1007 return;
1008 case NVME_ID_CNS_ENDGRP_LIST:
1009 nvmet_execute_identify_endgrp_list(req);
1010 return;
1011 }
1012
1013 pr_debug("unhandled identify cns %d on qid %d\n",
1014 req->cmd->identify.cns, req->sq->qid);
1015 req->error_loc = offsetof(struct nvme_identify, cns);
1016 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1017 }
1018
1019 /*
1020 * A "minimum viable" abort implementation: the command is mandatory in the
1021 * spec, but we are not required to do any useful work. We couldn't really
1022 * do a useful abort, so don't bother even with waiting for the command
1023 * to be exectuted and return immediately telling the command to abort
1024 * wasn't found.
1025 */
nvmet_execute_abort(struct nvmet_req * req)1026 static void nvmet_execute_abort(struct nvmet_req *req)
1027 {
1028 if (!nvmet_check_transfer_len(req, 0))
1029 return;
1030 nvmet_set_result(req, 1);
1031 nvmet_req_complete(req, 0);
1032 }
1033
nvmet_write_protect_flush_sync(struct nvmet_req * req)1034 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1035 {
1036 u16 status;
1037
1038 if (req->ns->file)
1039 status = nvmet_file_flush(req);
1040 else
1041 status = nvmet_bdev_flush(req);
1042
1043 if (status)
1044 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1045 return status;
1046 }
1047
nvmet_set_feat_write_protect(struct nvmet_req * req)1048 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1049 {
1050 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1051 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1052 u16 status;
1053
1054 status = nvmet_req_find_ns(req);
1055 if (status)
1056 return status;
1057
1058 mutex_lock(&subsys->lock);
1059 switch (write_protect) {
1060 case NVME_NS_WRITE_PROTECT:
1061 req->ns->readonly = true;
1062 status = nvmet_write_protect_flush_sync(req);
1063 if (status)
1064 req->ns->readonly = false;
1065 break;
1066 case NVME_NS_NO_WRITE_PROTECT:
1067 req->ns->readonly = false;
1068 status = 0;
1069 break;
1070 default:
1071 break;
1072 }
1073
1074 if (!status)
1075 nvmet_ns_changed(subsys, req->ns->nsid);
1076 mutex_unlock(&subsys->lock);
1077 return status;
1078 }
1079
nvmet_set_feat_kato(struct nvmet_req * req)1080 u16 nvmet_set_feat_kato(struct nvmet_req *req)
1081 {
1082 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1083
1084 nvmet_stop_keep_alive_timer(req->sq->ctrl);
1085 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1086 nvmet_start_keep_alive_timer(req->sq->ctrl);
1087
1088 nvmet_set_result(req, req->sq->ctrl->kato);
1089
1090 return 0;
1091 }
1092
nvmet_set_feat_async_event(struct nvmet_req * req,u32 mask)1093 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1094 {
1095 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1096
1097 if (val32 & ~mask) {
1098 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1099 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1100 }
1101
1102 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1103 nvmet_set_result(req, val32);
1104
1105 return 0;
1106 }
1107
nvmet_execute_set_features(struct nvmet_req * req)1108 void nvmet_execute_set_features(struct nvmet_req *req)
1109 {
1110 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1111 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1112 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1113 u16 status = 0;
1114 u16 nsqr;
1115 u16 ncqr;
1116
1117 if (!nvmet_check_data_len_lte(req, 0))
1118 return;
1119
1120 switch (cdw10 & 0xff) {
1121 case NVME_FEAT_NUM_QUEUES:
1122 ncqr = (cdw11 >> 16) & 0xffff;
1123 nsqr = cdw11 & 0xffff;
1124 if (ncqr == 0xffff || nsqr == 0xffff) {
1125 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1126 break;
1127 }
1128 nvmet_set_result(req,
1129 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1130 break;
1131 case NVME_FEAT_KATO:
1132 status = nvmet_set_feat_kato(req);
1133 break;
1134 case NVME_FEAT_ASYNC_EVENT:
1135 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1136 break;
1137 case NVME_FEAT_HOST_ID:
1138 status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1139 break;
1140 case NVME_FEAT_WRITE_PROTECT:
1141 status = nvmet_set_feat_write_protect(req);
1142 break;
1143 case NVME_FEAT_RESV_MASK:
1144 status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1145 break;
1146 default:
1147 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1148 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1149 break;
1150 }
1151
1152 nvmet_req_complete(req, status);
1153 }
1154
nvmet_get_feat_write_protect(struct nvmet_req * req)1155 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1156 {
1157 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1158 u32 result;
1159
1160 result = nvmet_req_find_ns(req);
1161 if (result)
1162 return result;
1163
1164 mutex_lock(&subsys->lock);
1165 if (req->ns->readonly == true)
1166 result = NVME_NS_WRITE_PROTECT;
1167 else
1168 result = NVME_NS_NO_WRITE_PROTECT;
1169 nvmet_set_result(req, result);
1170 mutex_unlock(&subsys->lock);
1171
1172 return 0;
1173 }
1174
nvmet_get_feat_kato(struct nvmet_req * req)1175 void nvmet_get_feat_kato(struct nvmet_req *req)
1176 {
1177 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1178 }
1179
nvmet_get_feat_async_event(struct nvmet_req * req)1180 void nvmet_get_feat_async_event(struct nvmet_req *req)
1181 {
1182 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1183 }
1184
nvmet_execute_get_features(struct nvmet_req * req)1185 void nvmet_execute_get_features(struct nvmet_req *req)
1186 {
1187 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1188 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1189 u16 status = 0;
1190
1191 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1192 return;
1193
1194 switch (cdw10 & 0xff) {
1195 /*
1196 * These features are mandatory in the spec, but we don't
1197 * have a useful way to implement them. We'll eventually
1198 * need to come up with some fake values for these.
1199 */
1200 #if 0
1201 case NVME_FEAT_ARBITRATION:
1202 break;
1203 case NVME_FEAT_POWER_MGMT:
1204 break;
1205 case NVME_FEAT_TEMP_THRESH:
1206 break;
1207 case NVME_FEAT_ERR_RECOVERY:
1208 break;
1209 case NVME_FEAT_IRQ_COALESCE:
1210 break;
1211 case NVME_FEAT_IRQ_CONFIG:
1212 break;
1213 case NVME_FEAT_WRITE_ATOMIC:
1214 break;
1215 #endif
1216 case NVME_FEAT_ASYNC_EVENT:
1217 nvmet_get_feat_async_event(req);
1218 break;
1219 case NVME_FEAT_VOLATILE_WC:
1220 nvmet_set_result(req, 1);
1221 break;
1222 case NVME_FEAT_NUM_QUEUES:
1223 nvmet_set_result(req,
1224 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1225 break;
1226 case NVME_FEAT_KATO:
1227 nvmet_get_feat_kato(req);
1228 break;
1229 case NVME_FEAT_HOST_ID:
1230 /* need 128-bit host identifier flag */
1231 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1232 req->error_loc =
1233 offsetof(struct nvme_common_command, cdw11);
1234 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1235 break;
1236 }
1237
1238 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1239 sizeof(req->sq->ctrl->hostid));
1240 break;
1241 case NVME_FEAT_WRITE_PROTECT:
1242 status = nvmet_get_feat_write_protect(req);
1243 break;
1244 case NVME_FEAT_RESV_MASK:
1245 status = nvmet_get_feat_resv_notif_mask(req);
1246 break;
1247 default:
1248 req->error_loc =
1249 offsetof(struct nvme_common_command, cdw10);
1250 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1251 break;
1252 }
1253
1254 nvmet_req_complete(req, status);
1255 }
1256
nvmet_execute_async_event(struct nvmet_req * req)1257 void nvmet_execute_async_event(struct nvmet_req *req)
1258 {
1259 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1260
1261 if (!nvmet_check_transfer_len(req, 0))
1262 return;
1263
1264 mutex_lock(&ctrl->lock);
1265 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1266 mutex_unlock(&ctrl->lock);
1267 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1268 return;
1269 }
1270 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1271 mutex_unlock(&ctrl->lock);
1272
1273 queue_work(nvmet_wq, &ctrl->async_event_work);
1274 }
1275
nvmet_execute_keep_alive(struct nvmet_req * req)1276 void nvmet_execute_keep_alive(struct nvmet_req *req)
1277 {
1278 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1279 u16 status = 0;
1280
1281 if (!nvmet_check_transfer_len(req, 0))
1282 return;
1283
1284 if (!ctrl->kato) {
1285 status = NVME_SC_KA_TIMEOUT_INVALID;
1286 goto out;
1287 }
1288
1289 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1290 ctrl->cntlid, ctrl->kato);
1291 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1292 out:
1293 nvmet_req_complete(req, status);
1294 }
1295
nvmet_parse_admin_cmd(struct nvmet_req * req)1296 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1297 {
1298 struct nvme_command *cmd = req->cmd;
1299 u16 ret;
1300
1301 if (nvme_is_fabrics(cmd))
1302 return nvmet_parse_fabrics_admin_cmd(req);
1303 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1304 return nvmet_parse_discovery_cmd(req);
1305
1306 ret = nvmet_check_ctrl_status(req);
1307 if (unlikely(ret))
1308 return ret;
1309
1310 if (nvmet_is_passthru_req(req))
1311 return nvmet_parse_passthru_admin_cmd(req);
1312
1313 switch (cmd->common.opcode) {
1314 case nvme_admin_get_log_page:
1315 req->execute = nvmet_execute_get_log_page;
1316 return 0;
1317 case nvme_admin_identify:
1318 req->execute = nvmet_execute_identify;
1319 return 0;
1320 case nvme_admin_abort_cmd:
1321 req->execute = nvmet_execute_abort;
1322 return 0;
1323 case nvme_admin_set_features:
1324 req->execute = nvmet_execute_set_features;
1325 return 0;
1326 case nvme_admin_get_features:
1327 req->execute = nvmet_execute_get_features;
1328 return 0;
1329 case nvme_admin_async_event:
1330 req->execute = nvmet_execute_async_event;
1331 return 0;
1332 case nvme_admin_keep_alive:
1333 req->execute = nvmet_execute_keep_alive;
1334 return 0;
1335 default:
1336 return nvmet_report_invalid_opcode(req);
1337 }
1338 }
1339