1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe admin command implementation. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/rculist.h> 9 #include <linux/part_stat.h> 10 11 #include <generated/utsrelease.h> 12 #include <linux/unaligned.h> 13 #include "nvmet.h" 14 15 u32 nvmet_get_log_page_len(struct nvme_command *cmd) 16 { 17 u32 len = le16_to_cpu(cmd->get_log_page.numdu); 18 19 len <<= 16; 20 len += le16_to_cpu(cmd->get_log_page.numdl); 21 /* NUMD is a 0's based value */ 22 len += 1; 23 len *= sizeof(u32); 24 25 return len; 26 } 27 28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) 29 { 30 switch (cdw10 & 0xff) { 31 case NVME_FEAT_HOST_ID: 32 return sizeof(req->sq->ctrl->hostid); 33 default: 34 return 0; 35 } 36 } 37 38 u64 nvmet_get_log_page_offset(struct nvme_command *cmd) 39 { 40 return le64_to_cpu(cmd->get_log_page.lpo); 41 } 42 43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) 44 { 45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); 46 } 47 48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req) 49 { 50 struct nvmet_ctrl *ctrl = req->sq->ctrl; 51 unsigned long flags; 52 off_t offset = 0; 53 u64 slot; 54 u64 i; 55 56 spin_lock_irqsave(&ctrl->error_lock, flags); 57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; 58 59 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) { 60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], 61 sizeof(struct nvme_error_slot))) 62 break; 63 64 if (slot == 0) 65 slot = NVMET_ERROR_LOG_SLOTS - 1; 66 else 67 slot--; 68 offset += sizeof(struct nvme_error_slot); 69 } 70 spin_unlock_irqrestore(&ctrl->error_lock, flags); 71 nvmet_req_complete(req, 0); 72 } 73 74 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req) 75 { 76 struct nvme_supported_log *logs; 77 u16 status; 78 79 logs = kzalloc(sizeof(*logs), GFP_KERNEL); 80 if (!logs) { 81 status = NVME_SC_INTERNAL; 82 goto out; 83 } 84 85 logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP); 86 logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP); 87 logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP); 88 logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP); 89 logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP); 90 logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP); 91 logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP); 92 logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP); 93 logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP); 94 logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP); 95 logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP); 96 97 status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs)); 98 kfree(logs); 99 out: 100 nvmet_req_complete(req, status); 101 } 102 103 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, 104 struct nvme_smart_log *slog) 105 { 106 u64 host_reads, host_writes, data_units_read, data_units_written; 107 u16 status; 108 109 status = nvmet_req_find_ns(req); 110 if (status) 111 return status; 112 113 /* we don't have the right data for file backed ns */ 114 if (!req->ns->bdev) 115 return NVME_SC_SUCCESS; 116 117 host_reads = part_stat_read(req->ns->bdev, ios[READ]); 118 data_units_read = 119 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); 120 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); 121 data_units_written = 122 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); 123 124 put_unaligned_le64(host_reads, &slog->host_reads[0]); 125 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 126 put_unaligned_le64(host_writes, &slog->host_writes[0]); 127 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 128 129 return NVME_SC_SUCCESS; 130 } 131 132 static u16 nvmet_get_smart_log_all(struct nvmet_req *req, 133 struct nvme_smart_log *slog) 134 { 135 u64 host_reads = 0, host_writes = 0; 136 u64 data_units_read = 0, data_units_written = 0; 137 struct nvmet_ns *ns; 138 struct nvmet_ctrl *ctrl; 139 unsigned long idx; 140 141 ctrl = req->sq->ctrl; 142 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { 143 /* we don't have the right data for file backed ns */ 144 if (!ns->bdev) 145 continue; 146 host_reads += part_stat_read(ns->bdev, ios[READ]); 147 data_units_read += DIV_ROUND_UP( 148 part_stat_read(ns->bdev, sectors[READ]), 1000); 149 host_writes += part_stat_read(ns->bdev, ios[WRITE]); 150 data_units_written += DIV_ROUND_UP( 151 part_stat_read(ns->bdev, sectors[WRITE]), 1000); 152 } 153 154 put_unaligned_le64(host_reads, &slog->host_reads[0]); 155 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); 156 put_unaligned_le64(host_writes, &slog->host_writes[0]); 157 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); 158 159 return NVME_SC_SUCCESS; 160 } 161 162 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req) 163 { 164 struct nvme_rotational_media_log *log; 165 struct gendisk *disk; 166 u16 status; 167 168 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( 169 req->cmd->get_log_page.lsi)); 170 status = nvmet_req_find_ns(req); 171 if (status) 172 goto out; 173 174 if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) { 175 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 176 goto out; 177 } 178 179 if (req->transfer_len != sizeof(*log)) { 180 status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; 181 goto out; 182 } 183 184 log = kzalloc(sizeof(*log), GFP_KERNEL); 185 if (!log) 186 goto out; 187 188 log->endgid = req->cmd->get_log_page.lsi; 189 disk = req->ns->bdev->bd_disk; 190 if (disk && disk->ia_ranges) 191 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges); 192 else 193 log->numa = cpu_to_le16(1); 194 195 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); 196 kfree(log); 197 out: 198 nvmet_req_complete(req, status); 199 } 200 201 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) 202 { 203 struct nvme_smart_log *log; 204 u16 status = NVME_SC_INTERNAL; 205 unsigned long flags; 206 207 if (req->transfer_len != sizeof(*log)) 208 goto out; 209 210 log = kzalloc(sizeof(*log), GFP_KERNEL); 211 if (!log) 212 goto out; 213 214 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) 215 status = nvmet_get_smart_log_all(req, log); 216 else 217 status = nvmet_get_smart_log_nsid(req, log); 218 if (status) 219 goto out_free_log; 220 221 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); 222 put_unaligned_le64(req->sq->ctrl->err_counter, 223 &log->num_err_log_entries); 224 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); 225 226 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); 227 out_free_log: 228 kfree(log); 229 out: 230 nvmet_req_complete(req, status); 231 } 232 233 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) 234 { 235 log->acs[nvme_admin_get_log_page] = 236 log->acs[nvme_admin_identify] = 237 log->acs[nvme_admin_abort_cmd] = 238 log->acs[nvme_admin_set_features] = 239 log->acs[nvme_admin_get_features] = 240 log->acs[nvme_admin_async_event] = 241 log->acs[nvme_admin_keep_alive] = 242 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); 243 244 log->iocs[nvme_cmd_read] = 245 log->iocs[nvme_cmd_flush] = 246 log->iocs[nvme_cmd_dsm] = 247 log->iocs[nvme_cmd_resv_acquire] = 248 log->iocs[nvme_cmd_resv_register] = 249 log->iocs[nvme_cmd_resv_release] = 250 log->iocs[nvme_cmd_resv_report] = 251 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); 252 log->iocs[nvme_cmd_write] = 253 log->iocs[nvme_cmd_write_zeroes] = 254 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); 255 } 256 257 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) 258 { 259 log->iocs[nvme_cmd_zone_append] = 260 log->iocs[nvme_cmd_zone_mgmt_send] = 261 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC); 262 log->iocs[nvme_cmd_zone_mgmt_recv] = 263 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); 264 } 265 266 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) 267 { 268 struct nvme_effects_log *log; 269 u16 status = NVME_SC_SUCCESS; 270 271 log = kzalloc(sizeof(*log), GFP_KERNEL); 272 if (!log) { 273 status = NVME_SC_INTERNAL; 274 goto out; 275 } 276 277 switch (req->cmd->get_log_page.csi) { 278 case NVME_CSI_NVM: 279 nvmet_get_cmd_effects_nvm(log); 280 break; 281 case NVME_CSI_ZNS: 282 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 283 status = NVME_SC_INVALID_IO_CMD_SET; 284 goto free; 285 } 286 nvmet_get_cmd_effects_nvm(log); 287 nvmet_get_cmd_effects_zns(log); 288 break; 289 default: 290 status = NVME_SC_INVALID_LOG_PAGE; 291 goto free; 292 } 293 294 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); 295 free: 296 kfree(log); 297 out: 298 nvmet_req_complete(req, status); 299 } 300 301 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) 302 { 303 struct nvmet_ctrl *ctrl = req->sq->ctrl; 304 u16 status = NVME_SC_INTERNAL; 305 size_t len; 306 307 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) 308 goto out; 309 310 mutex_lock(&ctrl->lock); 311 if (ctrl->nr_changed_ns == U32_MAX) 312 len = sizeof(__le32); 313 else 314 len = ctrl->nr_changed_ns * sizeof(__le32); 315 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); 316 if (!status) 317 status = nvmet_zero_sgl(req, len, req->transfer_len - len); 318 ctrl->nr_changed_ns = 0; 319 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); 320 mutex_unlock(&ctrl->lock); 321 out: 322 nvmet_req_complete(req, status); 323 } 324 325 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, 326 struct nvme_ana_group_desc *desc) 327 { 328 struct nvmet_ctrl *ctrl = req->sq->ctrl; 329 struct nvmet_ns *ns; 330 unsigned long idx; 331 u32 count = 0; 332 333 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { 334 xa_for_each(&ctrl->subsys->namespaces, idx, ns) 335 if (ns->anagrpid == grpid) 336 desc->nsids[count++] = cpu_to_le32(ns->nsid); 337 } 338 339 desc->grpid = cpu_to_le32(grpid); 340 desc->nnsids = cpu_to_le32(count); 341 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); 342 desc->state = req->port->ana_state[grpid]; 343 memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); 344 return struct_size(desc, nsids, count); 345 } 346 347 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req) 348 { 349 u64 host_reads, host_writes, data_units_read, data_units_written; 350 struct nvme_endurance_group_log *log; 351 u16 status; 352 353 /* 354 * The target driver emulates each endurance group as its own 355 * namespace, reusing the nsid as the endurance group identifier. 356 */ 357 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( 358 req->cmd->get_log_page.lsi)); 359 status = nvmet_req_find_ns(req); 360 if (status) 361 goto out; 362 363 log = kzalloc(sizeof(*log), GFP_KERNEL); 364 if (!log) { 365 status = NVME_SC_INTERNAL; 366 goto out; 367 } 368 369 if (!req->ns->bdev) 370 goto copy; 371 372 host_reads = part_stat_read(req->ns->bdev, ios[READ]); 373 data_units_read = 374 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); 375 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); 376 data_units_written = 377 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); 378 379 put_unaligned_le64(host_reads, &log->hrc[0]); 380 put_unaligned_le64(data_units_read, &log->dur[0]); 381 put_unaligned_le64(host_writes, &log->hwc[0]); 382 put_unaligned_le64(data_units_written, &log->duw[0]); 383 copy: 384 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); 385 kfree(log); 386 out: 387 nvmet_req_complete(req, status); 388 } 389 390 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) 391 { 392 struct nvme_ana_rsp_hdr hdr = { 0, }; 393 struct nvme_ana_group_desc *desc; 394 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */ 395 size_t len; 396 u32 grpid; 397 u16 ngrps = 0; 398 u16 status; 399 400 status = NVME_SC_INTERNAL; 401 desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES), 402 GFP_KERNEL); 403 if (!desc) 404 goto out; 405 406 down_read(&nvmet_ana_sem); 407 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) { 408 if (!nvmet_ana_group_enabled[grpid]) 409 continue; 410 len = nvmet_format_ana_group(req, grpid, desc); 411 status = nvmet_copy_to_sgl(req, offset, desc, len); 412 if (status) 413 break; 414 offset += len; 415 ngrps++; 416 } 417 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { 418 if (nvmet_ana_group_enabled[grpid]) 419 ngrps++; 420 } 421 422 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); 423 hdr.ngrps = cpu_to_le16(ngrps); 424 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE); 425 up_read(&nvmet_ana_sem); 426 427 kfree(desc); 428 429 /* copy the header last once we know the number of groups */ 430 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr)); 431 out: 432 nvmet_req_complete(req, status); 433 } 434 435 static void nvmet_execute_get_log_page_features(struct nvmet_req *req) 436 { 437 struct nvme_supported_features_log *features; 438 u16 status; 439 440 features = kzalloc(sizeof(*features), GFP_KERNEL); 441 if (!features) { 442 status = NVME_SC_INTERNAL; 443 goto out; 444 } 445 446 features->fis[NVME_FEAT_NUM_QUEUES] = 447 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); 448 features->fis[NVME_FEAT_KATO] = 449 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); 450 features->fis[NVME_FEAT_ASYNC_EVENT] = 451 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); 452 features->fis[NVME_FEAT_HOST_ID] = 453 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); 454 features->fis[NVME_FEAT_WRITE_PROTECT] = 455 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE); 456 features->fis[NVME_FEAT_RESV_MASK] = 457 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE); 458 459 status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features)); 460 kfree(features); 461 out: 462 nvmet_req_complete(req, status); 463 } 464 465 static void nvmet_execute_get_log_page(struct nvmet_req *req) 466 { 467 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) 468 return; 469 470 switch (req->cmd->get_log_page.lid) { 471 case NVME_LOG_SUPPORTED: 472 return nvmet_execute_get_supported_log_pages(req); 473 case NVME_LOG_ERROR: 474 return nvmet_execute_get_log_page_error(req); 475 case NVME_LOG_SMART: 476 return nvmet_execute_get_log_page_smart(req); 477 case NVME_LOG_FW_SLOT: 478 /* 479 * We only support a single firmware slot which always is 480 * active, so we can zero out the whole firmware slot log and 481 * still claim to fully implement this mandatory log page. 482 */ 483 return nvmet_execute_get_log_page_noop(req); 484 case NVME_LOG_CHANGED_NS: 485 return nvmet_execute_get_log_changed_ns(req); 486 case NVME_LOG_CMD_EFFECTS: 487 return nvmet_execute_get_log_cmd_effects_ns(req); 488 case NVME_LOG_ENDURANCE_GROUP: 489 return nvmet_execute_get_log_page_endgrp(req); 490 case NVME_LOG_ANA: 491 return nvmet_execute_get_log_page_ana(req); 492 case NVME_LOG_FEATURES: 493 return nvmet_execute_get_log_page_features(req); 494 case NVME_LOG_RMI: 495 return nvmet_execute_get_log_page_rmi(req); 496 case NVME_LOG_RESERVATION: 497 return nvmet_execute_get_log_page_resv(req); 498 } 499 pr_debug("unhandled lid %d on qid %d\n", 500 req->cmd->get_log_page.lid, req->sq->qid); 501 req->error_loc = offsetof(struct nvme_get_log_page_command, lid); 502 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); 503 } 504 505 static void nvmet_execute_identify_ctrl(struct nvmet_req *req) 506 { 507 struct nvmet_ctrl *ctrl = req->sq->ctrl; 508 struct nvmet_subsys *subsys = ctrl->subsys; 509 struct nvme_id_ctrl *id; 510 u32 cmd_capsule_size; 511 u16 status = 0; 512 513 if (!subsys->subsys_discovered) { 514 mutex_lock(&subsys->lock); 515 subsys->subsys_discovered = true; 516 mutex_unlock(&subsys->lock); 517 } 518 519 id = kzalloc(sizeof(*id), GFP_KERNEL); 520 if (!id) { 521 status = NVME_SC_INTERNAL; 522 goto out; 523 } 524 525 /* XXX: figure out how to assign real vendors IDs. */ 526 id->vid = 0; 527 id->ssvid = 0; 528 529 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); 530 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, 531 strlen(subsys->model_number), ' '); 532 memcpy_and_pad(id->fr, sizeof(id->fr), 533 subsys->firmware_rev, strlen(subsys->firmware_rev), ' '); 534 535 put_unaligned_le24(subsys->ieee_oui, id->ieee); 536 537 id->rab = 6; 538 539 if (nvmet_is_disc_subsys(ctrl->subsys)) 540 id->cntrltype = NVME_CTRL_DISC; 541 else 542 id->cntrltype = NVME_CTRL_IO; 543 544 /* we support multiple ports, multiples hosts and ANA: */ 545 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | 546 NVME_CTRL_CMIC_ANA; 547 548 /* Limit MDTS according to transport capability */ 549 if (ctrl->ops->get_mdts) 550 id->mdts = ctrl->ops->get_mdts(ctrl); 551 else 552 id->mdts = 0; 553 554 id->cntlid = cpu_to_le16(ctrl->cntlid); 555 id->ver = cpu_to_le32(ctrl->subsys->ver); 556 557 /* XXX: figure out what to do about RTD3R/RTD3 */ 558 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); 559 id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | 560 NVME_CTRL_ATTR_TBKAS); 561 562 id->oacs = 0; 563 564 /* 565 * We don't really have a practical limit on the number of abort 566 * comands. But we don't do anything useful for abort either, so 567 * no point in allowing more abort commands than the spec requires. 568 */ 569 id->acl = 3; 570 571 id->aerl = NVMET_ASYNC_EVENTS - 1; 572 573 /* first slot is read-only, only one slot supported */ 574 id->frmw = (1 << 0) | (1 << 1); 575 id->lpa = (1 << 0) | (1 << 1) | (1 << 2); 576 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; 577 id->npss = 0; 578 579 /* We support keep-alive timeout in granularity of seconds */ 580 id->kas = cpu_to_le16(NVMET_KAS); 581 582 id->sqes = (0x6 << 4) | 0x6; 583 id->cqes = (0x4 << 4) | 0x4; 584 585 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 586 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); 587 588 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); 589 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); 590 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | 591 NVME_CTRL_ONCS_WRITE_ZEROES | 592 NVME_CTRL_ONCS_RESERVATIONS); 593 594 /* XXX: don't report vwc if the underlying device is write through */ 595 id->vwc = NVME_CTRL_VWC_PRESENT; 596 597 /* 598 * We can't support atomic writes bigger than a LBA without support 599 * from the backend device. 600 */ 601 id->awun = 0; 602 id->awupf = 0; 603 604 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 605 if (ctrl->ops->flags & NVMF_KEYED_SGLS) 606 id->sgls |= cpu_to_le32(1 << 2); 607 if (req->port->inline_data_size) 608 id->sgls |= cpu_to_le32(1 << 20); 609 610 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); 611 612 /* 613 * Max command capsule size is sqe + in-capsule data size. 614 * Disable in-capsule data for Metadata capable controllers. 615 */ 616 cmd_capsule_size = sizeof(struct nvme_command); 617 if (!ctrl->pi_support) 618 cmd_capsule_size += req->port->inline_data_size; 619 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); 620 621 /* Max response capsule size is cqe */ 622 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); 623 624 id->msdbd = ctrl->ops->msdbd; 625 626 /* 627 * Endurance group identifier is 16 bits, so we can't let namespaces 628 * overflow that since we reuse the nsid 629 */ 630 BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX); 631 id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES); 632 633 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); 634 id->anatt = 10; /* random value */ 635 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); 636 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); 637 638 /* 639 * Meh, we don't really support any power state. Fake up the same 640 * values that qemu does. 641 */ 642 id->psd[0].max_power = cpu_to_le16(0x9c4); 643 id->psd[0].entry_lat = cpu_to_le32(0x10); 644 id->psd[0].exit_lat = cpu_to_le32(0x4); 645 646 id->nwpc = 1 << 0; /* write protect and no write protect */ 647 648 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 649 650 kfree(id); 651 out: 652 nvmet_req_complete(req, status); 653 } 654 655 static void nvmet_execute_identify_ns(struct nvmet_req *req) 656 { 657 struct nvme_id_ns *id; 658 u16 status; 659 660 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { 661 req->error_loc = offsetof(struct nvme_identify, nsid); 662 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; 663 goto out; 664 } 665 666 id = kzalloc(sizeof(*id), GFP_KERNEL); 667 if (!id) { 668 status = NVME_SC_INTERNAL; 669 goto out; 670 } 671 672 /* return an all zeroed buffer if we can't find an active namespace */ 673 status = nvmet_req_find_ns(req); 674 if (status) { 675 status = 0; 676 goto done; 677 } 678 679 if (nvmet_ns_revalidate(req->ns)) { 680 mutex_lock(&req->ns->subsys->lock); 681 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); 682 mutex_unlock(&req->ns->subsys->lock); 683 } 684 685 /* 686 * nuse = ncap = nsze isn't always true, but we have no way to find 687 * that out from the underlying device. 688 */ 689 id->ncap = id->nsze = 690 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); 691 switch (req->port->ana_state[req->ns->anagrpid]) { 692 case NVME_ANA_INACCESSIBLE: 693 case NVME_ANA_PERSISTENT_LOSS: 694 break; 695 default: 696 id->nuse = id->nsze; 697 break; 698 } 699 700 if (req->ns->bdev) 701 nvmet_bdev_set_limits(req->ns->bdev, id); 702 703 /* 704 * We just provide a single LBA format that matches what the 705 * underlying device reports. 706 */ 707 id->nlbaf = 0; 708 id->flbas = 0; 709 710 /* 711 * Our namespace might always be shared. Not just with other 712 * controllers, but also with any other user of the block device. 713 */ 714 id->nmic = NVME_NS_NMIC_SHARED; 715 id->anagrpid = cpu_to_le32(req->ns->anagrpid); 716 717 if (req->ns->pr.enable) 718 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE | 719 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS | 720 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY | 721 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY | 722 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS | 723 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS | 724 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF; 725 726 /* 727 * Since we don't know any better, every namespace is its own endurance 728 * group. 729 */ 730 id->endgid = cpu_to_le16(req->ns->nsid); 731 732 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); 733 734 id->lbaf[0].ds = req->ns->blksize_shift; 735 736 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { 737 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | 738 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | 739 NVME_NS_DPC_PI_TYPE3; 740 id->mc = NVME_MC_EXTENDED_LBA; 741 id->dps = req->ns->pi_type; 742 id->flbas = NVME_NS_FLBAS_META_EXT; 743 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); 744 } 745 746 if (req->ns->readonly) 747 id->nsattr |= NVME_NS_ATTR_RO; 748 done: 749 if (!status) 750 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 751 752 kfree(id); 753 out: 754 nvmet_req_complete(req, status); 755 } 756 757 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req) 758 { 759 u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid); 760 static const int buf_size = NVME_IDENTIFY_DATA_SIZE; 761 struct nvmet_ctrl *ctrl = req->sq->ctrl; 762 struct nvmet_ns *ns; 763 unsigned long idx; 764 __le16 *list; 765 u16 status; 766 int i = 1; 767 768 list = kzalloc(buf_size, GFP_KERNEL); 769 if (!list) { 770 status = NVME_SC_INTERNAL; 771 goto out; 772 } 773 774 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { 775 if (ns->nsid <= min_endgid) 776 continue; 777 778 list[i++] = cpu_to_le16(ns->nsid); 779 if (i == buf_size / sizeof(__le16)) 780 break; 781 } 782 783 list[0] = cpu_to_le16(i - 1); 784 status = nvmet_copy_to_sgl(req, 0, list, buf_size); 785 kfree(list); 786 out: 787 nvmet_req_complete(req, status); 788 } 789 790 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css) 791 { 792 static const int buf_size = NVME_IDENTIFY_DATA_SIZE; 793 struct nvmet_ctrl *ctrl = req->sq->ctrl; 794 struct nvmet_ns *ns; 795 unsigned long idx; 796 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); 797 __le32 *list; 798 u16 status = 0; 799 int i = 0; 800 801 /* 802 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid 803 * See NVMe Base Specification, Active Namespace ID list (CNS 02h). 804 */ 805 if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) { 806 req->error_loc = offsetof(struct nvme_identify, nsid); 807 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; 808 goto out; 809 } 810 811 list = kzalloc(buf_size, GFP_KERNEL); 812 if (!list) { 813 status = NVME_SC_INTERNAL; 814 goto out; 815 } 816 817 xa_for_each(&ctrl->subsys->namespaces, idx, ns) { 818 if (ns->nsid <= min_nsid) 819 continue; 820 if (match_css && req->ns->csi != req->cmd->identify.csi) 821 continue; 822 list[i++] = cpu_to_le32(ns->nsid); 823 if (i == buf_size / sizeof(__le32)) 824 break; 825 } 826 827 status = nvmet_copy_to_sgl(req, 0, list, buf_size); 828 829 kfree(list); 830 out: 831 nvmet_req_complete(req, status); 832 } 833 834 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, 835 void *id, off_t *off) 836 { 837 struct nvme_ns_id_desc desc = { 838 .nidt = type, 839 .nidl = len, 840 }; 841 u16 status; 842 843 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc)); 844 if (status) 845 return status; 846 *off += sizeof(desc); 847 848 status = nvmet_copy_to_sgl(req, *off, id, len); 849 if (status) 850 return status; 851 *off += len; 852 853 return 0; 854 } 855 856 static void nvmet_execute_identify_desclist(struct nvmet_req *req) 857 { 858 off_t off = 0; 859 u16 status; 860 861 status = nvmet_req_find_ns(req); 862 if (status) 863 goto out; 864 865 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { 866 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, 867 NVME_NIDT_UUID_LEN, 868 &req->ns->uuid, &off); 869 if (status) 870 goto out; 871 } 872 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { 873 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, 874 NVME_NIDT_NGUID_LEN, 875 &req->ns->nguid, &off); 876 if (status) 877 goto out; 878 } 879 880 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI, 881 NVME_NIDT_CSI_LEN, 882 &req->ns->csi, &off); 883 if (status) 884 goto out; 885 886 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, 887 off) != NVME_IDENTIFY_DATA_SIZE - off) 888 status = NVME_SC_INTERNAL | NVME_STATUS_DNR; 889 890 out: 891 nvmet_req_complete(req, status); 892 } 893 894 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) 895 { 896 /* Not supported: return zeroes */ 897 nvmet_req_complete(req, 898 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm))); 899 } 900 901 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req) 902 { 903 u16 status; 904 905 status = nvmet_req_find_ns(req); 906 if (status) 907 goto out; 908 909 status = nvmet_copy_to_sgl(req, 0, ZERO_PAGE(0), 910 NVME_IDENTIFY_DATA_SIZE); 911 out: 912 nvmet_req_complete(req, status); 913 } 914 915 static void nvmet_execute_id_cs_indep(struct nvmet_req *req) 916 { 917 struct nvme_id_ns_cs_indep *id; 918 u16 status; 919 920 status = nvmet_req_find_ns(req); 921 if (status) 922 goto out; 923 924 id = kzalloc(sizeof(*id), GFP_KERNEL); 925 if (!id) { 926 status = NVME_SC_INTERNAL; 927 goto out; 928 } 929 930 id->nstat = NVME_NSTAT_NRDY; 931 id->anagrpid = cpu_to_le32(req->ns->anagrpid); 932 id->nmic = NVME_NS_NMIC_SHARED; 933 if (req->ns->readonly) 934 id->nsattr |= NVME_NS_ATTR_RO; 935 if (req->ns->bdev && !bdev_nonrot(req->ns->bdev)) 936 id->nsfeat |= NVME_NS_ROTATIONAL; 937 /* 938 * We need flush command to flush the file's metadata, 939 * so report supporting vwc if backend is file, even 940 * though buffered_io is disable. 941 */ 942 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev)) 943 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT; 944 945 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 946 kfree(id); 947 out: 948 nvmet_req_complete(req, status); 949 } 950 951 static void nvmet_execute_identify(struct nvmet_req *req) 952 { 953 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) 954 return; 955 956 switch (req->cmd->identify.cns) { 957 case NVME_ID_CNS_NS: 958 nvmet_execute_identify_ns(req); 959 return; 960 case NVME_ID_CNS_CTRL: 961 nvmet_execute_identify_ctrl(req); 962 return; 963 case NVME_ID_CNS_NS_ACTIVE_LIST: 964 nvmet_execute_identify_nslist(req, false); 965 return; 966 case NVME_ID_CNS_NS_DESC_LIST: 967 nvmet_execute_identify_desclist(req); 968 return; 969 case NVME_ID_CNS_CS_NS: 970 switch (req->cmd->identify.csi) { 971 case NVME_CSI_NVM: 972 nvme_execute_identify_ns_nvm(req); 973 return; 974 case NVME_CSI_ZNS: 975 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 976 nvmet_execute_identify_ns_zns(req); 977 return; 978 } 979 break; 980 } 981 break; 982 case NVME_ID_CNS_CS_CTRL: 983 switch (req->cmd->identify.csi) { 984 case NVME_CSI_NVM: 985 nvmet_execute_identify_ctrl_nvm(req); 986 return; 987 case NVME_CSI_ZNS: 988 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 989 nvmet_execute_identify_ctrl_zns(req); 990 return; 991 } 992 break; 993 } 994 break; 995 case NVME_ID_CNS_NS_ACTIVE_LIST_CS: 996 nvmet_execute_identify_nslist(req, true); 997 return; 998 case NVME_ID_CNS_NS_CS_INDEP: 999 nvmet_execute_id_cs_indep(req); 1000 return; 1001 case NVME_ID_CNS_ENDGRP_LIST: 1002 nvmet_execute_identify_endgrp_list(req); 1003 return; 1004 } 1005 1006 pr_debug("unhandled identify cns %d on qid %d\n", 1007 req->cmd->identify.cns, req->sq->qid); 1008 req->error_loc = offsetof(struct nvme_identify, cns); 1009 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); 1010 } 1011 1012 /* 1013 * A "minimum viable" abort implementation: the command is mandatory in the 1014 * spec, but we are not required to do any useful work. We couldn't really 1015 * do a useful abort, so don't bother even with waiting for the command 1016 * to be exectuted and return immediately telling the command to abort 1017 * wasn't found. 1018 */ 1019 static void nvmet_execute_abort(struct nvmet_req *req) 1020 { 1021 if (!nvmet_check_transfer_len(req, 0)) 1022 return; 1023 nvmet_set_result(req, 1); 1024 nvmet_req_complete(req, 0); 1025 } 1026 1027 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) 1028 { 1029 u16 status; 1030 1031 if (req->ns->file) 1032 status = nvmet_file_flush(req); 1033 else 1034 status = nvmet_bdev_flush(req); 1035 1036 if (status) 1037 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); 1038 return status; 1039 } 1040 1041 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) 1042 { 1043 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); 1044 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 1045 u16 status; 1046 1047 status = nvmet_req_find_ns(req); 1048 if (status) 1049 return status; 1050 1051 mutex_lock(&subsys->lock); 1052 switch (write_protect) { 1053 case NVME_NS_WRITE_PROTECT: 1054 req->ns->readonly = true; 1055 status = nvmet_write_protect_flush_sync(req); 1056 if (status) 1057 req->ns->readonly = false; 1058 break; 1059 case NVME_NS_NO_WRITE_PROTECT: 1060 req->ns->readonly = false; 1061 status = 0; 1062 break; 1063 default: 1064 break; 1065 } 1066 1067 if (!status) 1068 nvmet_ns_changed(subsys, req->ns->nsid); 1069 mutex_unlock(&subsys->lock); 1070 return status; 1071 } 1072 1073 u16 nvmet_set_feat_kato(struct nvmet_req *req) 1074 { 1075 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); 1076 1077 nvmet_stop_keep_alive_timer(req->sq->ctrl); 1078 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); 1079 nvmet_start_keep_alive_timer(req->sq->ctrl); 1080 1081 nvmet_set_result(req, req->sq->ctrl->kato); 1082 1083 return 0; 1084 } 1085 1086 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) 1087 { 1088 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); 1089 1090 if (val32 & ~mask) { 1091 req->error_loc = offsetof(struct nvme_common_command, cdw11); 1092 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1093 } 1094 1095 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); 1096 nvmet_set_result(req, val32); 1097 1098 return 0; 1099 } 1100 1101 void nvmet_execute_set_features(struct nvmet_req *req) 1102 { 1103 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 1104 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 1105 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); 1106 u16 status = 0; 1107 u16 nsqr; 1108 u16 ncqr; 1109 1110 if (!nvmet_check_data_len_lte(req, 0)) 1111 return; 1112 1113 switch (cdw10 & 0xff) { 1114 case NVME_FEAT_NUM_QUEUES: 1115 ncqr = (cdw11 >> 16) & 0xffff; 1116 nsqr = cdw11 & 0xffff; 1117 if (ncqr == 0xffff || nsqr == 0xffff) { 1118 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1119 break; 1120 } 1121 nvmet_set_result(req, 1122 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); 1123 break; 1124 case NVME_FEAT_KATO: 1125 status = nvmet_set_feat_kato(req); 1126 break; 1127 case NVME_FEAT_ASYNC_EVENT: 1128 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); 1129 break; 1130 case NVME_FEAT_HOST_ID: 1131 status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; 1132 break; 1133 case NVME_FEAT_WRITE_PROTECT: 1134 status = nvmet_set_feat_write_protect(req); 1135 break; 1136 case NVME_FEAT_RESV_MASK: 1137 status = nvmet_set_feat_resv_notif_mask(req, cdw11); 1138 break; 1139 default: 1140 req->error_loc = offsetof(struct nvme_common_command, cdw10); 1141 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1142 break; 1143 } 1144 1145 nvmet_req_complete(req, status); 1146 } 1147 1148 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) 1149 { 1150 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 1151 u32 result; 1152 1153 result = nvmet_req_find_ns(req); 1154 if (result) 1155 return result; 1156 1157 mutex_lock(&subsys->lock); 1158 if (req->ns->readonly == true) 1159 result = NVME_NS_WRITE_PROTECT; 1160 else 1161 result = NVME_NS_NO_WRITE_PROTECT; 1162 nvmet_set_result(req, result); 1163 mutex_unlock(&subsys->lock); 1164 1165 return 0; 1166 } 1167 1168 void nvmet_get_feat_kato(struct nvmet_req *req) 1169 { 1170 nvmet_set_result(req, req->sq->ctrl->kato * 1000); 1171 } 1172 1173 void nvmet_get_feat_async_event(struct nvmet_req *req) 1174 { 1175 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); 1176 } 1177 1178 void nvmet_execute_get_features(struct nvmet_req *req) 1179 { 1180 struct nvmet_subsys *subsys = nvmet_req_subsys(req); 1181 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 1182 u16 status = 0; 1183 1184 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10))) 1185 return; 1186 1187 switch (cdw10 & 0xff) { 1188 /* 1189 * These features are mandatory in the spec, but we don't 1190 * have a useful way to implement them. We'll eventually 1191 * need to come up with some fake values for these. 1192 */ 1193 #if 0 1194 case NVME_FEAT_ARBITRATION: 1195 break; 1196 case NVME_FEAT_POWER_MGMT: 1197 break; 1198 case NVME_FEAT_TEMP_THRESH: 1199 break; 1200 case NVME_FEAT_ERR_RECOVERY: 1201 break; 1202 case NVME_FEAT_IRQ_COALESCE: 1203 break; 1204 case NVME_FEAT_IRQ_CONFIG: 1205 break; 1206 case NVME_FEAT_WRITE_ATOMIC: 1207 break; 1208 #endif 1209 case NVME_FEAT_ASYNC_EVENT: 1210 nvmet_get_feat_async_event(req); 1211 break; 1212 case NVME_FEAT_VOLATILE_WC: 1213 nvmet_set_result(req, 1); 1214 break; 1215 case NVME_FEAT_NUM_QUEUES: 1216 nvmet_set_result(req, 1217 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); 1218 break; 1219 case NVME_FEAT_KATO: 1220 nvmet_get_feat_kato(req); 1221 break; 1222 case NVME_FEAT_HOST_ID: 1223 /* need 128-bit host identifier flag */ 1224 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { 1225 req->error_loc = 1226 offsetof(struct nvme_common_command, cdw11); 1227 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1228 break; 1229 } 1230 1231 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, 1232 sizeof(req->sq->ctrl->hostid)); 1233 break; 1234 case NVME_FEAT_WRITE_PROTECT: 1235 status = nvmet_get_feat_write_protect(req); 1236 break; 1237 case NVME_FEAT_RESV_MASK: 1238 status = nvmet_get_feat_resv_notif_mask(req); 1239 break; 1240 default: 1241 req->error_loc = 1242 offsetof(struct nvme_common_command, cdw10); 1243 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 1244 break; 1245 } 1246 1247 nvmet_req_complete(req, status); 1248 } 1249 1250 void nvmet_execute_async_event(struct nvmet_req *req) 1251 { 1252 struct nvmet_ctrl *ctrl = req->sq->ctrl; 1253 1254 if (!nvmet_check_transfer_len(req, 0)) 1255 return; 1256 1257 mutex_lock(&ctrl->lock); 1258 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { 1259 mutex_unlock(&ctrl->lock); 1260 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR); 1261 return; 1262 } 1263 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 1264 mutex_unlock(&ctrl->lock); 1265 1266 queue_work(nvmet_wq, &ctrl->async_event_work); 1267 } 1268 1269 void nvmet_execute_keep_alive(struct nvmet_req *req) 1270 { 1271 struct nvmet_ctrl *ctrl = req->sq->ctrl; 1272 u16 status = 0; 1273 1274 if (!nvmet_check_transfer_len(req, 0)) 1275 return; 1276 1277 if (!ctrl->kato) { 1278 status = NVME_SC_KA_TIMEOUT_INVALID; 1279 goto out; 1280 } 1281 1282 pr_debug("ctrl %d update keep-alive timer for %d secs\n", 1283 ctrl->cntlid, ctrl->kato); 1284 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); 1285 out: 1286 nvmet_req_complete(req, status); 1287 } 1288 1289 u16 nvmet_parse_admin_cmd(struct nvmet_req *req) 1290 { 1291 struct nvme_command *cmd = req->cmd; 1292 u16 ret; 1293 1294 if (nvme_is_fabrics(cmd)) 1295 return nvmet_parse_fabrics_admin_cmd(req); 1296 if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) 1297 return nvmet_parse_discovery_cmd(req); 1298 1299 ret = nvmet_check_ctrl_status(req); 1300 if (unlikely(ret)) 1301 return ret; 1302 1303 if (nvmet_is_passthru_req(req)) 1304 return nvmet_parse_passthru_admin_cmd(req); 1305 1306 switch (cmd->common.opcode) { 1307 case nvme_admin_get_log_page: 1308 req->execute = nvmet_execute_get_log_page; 1309 return 0; 1310 case nvme_admin_identify: 1311 req->execute = nvmet_execute_identify; 1312 return 0; 1313 case nvme_admin_abort_cmd: 1314 req->execute = nvmet_execute_abort; 1315 return 0; 1316 case nvme_admin_set_features: 1317 req->execute = nvmet_execute_set_features; 1318 return 0; 1319 case nvme_admin_get_features: 1320 req->execute = nvmet_execute_get_features; 1321 return 0; 1322 case nvme_admin_async_event: 1323 req->execute = nvmet_execute_async_event; 1324 return 0; 1325 case nvme_admin_keep_alive: 1326 req->execute = nvmet_execute_keep_alive; 1327 return 0; 1328 default: 1329 return nvmet_report_invalid_opcode(req); 1330 } 1331 } 1332