Lines Matching +full:fis +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
17 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_delete_sq()
18 u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid); in nvmet_execute_delete_sq()
35 status = ctrl->ops->delete_sq(ctrl, sqid); in nvmet_execute_delete_sq()
43 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_create_sq()
44 struct nvme_command *cmd = req->cmd; in nvmet_execute_create_sq()
45 u16 sqid = le16_to_cpu(cmd->create_sq.sqid); in nvmet_execute_create_sq()
46 u16 cqid = le16_to_cpu(cmd->create_sq.cqid); in nvmet_execute_create_sq()
47 u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags); in nvmet_execute_create_sq()
48 u16 qsize = le16_to_cpu(cmd->create_sq.qsize); in nvmet_execute_create_sq()
49 u64 prp1 = le64_to_cpu(cmd->create_sq.prp1); in nvmet_execute_create_sq()
77 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { in nvmet_execute_create_sq()
82 status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1); in nvmet_execute_create_sq()
90 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_delete_cq()
91 u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid); in nvmet_execute_delete_cq()
108 status = ctrl->ops->delete_cq(ctrl, cqid); in nvmet_execute_delete_cq()
116 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_create_cq()
117 struct nvme_command *cmd = req->cmd; in nvmet_execute_create_cq()
118 u16 cqid = le16_to_cpu(cmd->create_cq.cqid); in nvmet_execute_create_cq()
119 u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags); in nvmet_execute_create_cq()
120 u16 qsize = le16_to_cpu(cmd->create_cq.qsize); in nvmet_execute_create_cq()
121 u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector); in nvmet_execute_create_cq()
122 u64 prp1 = le64_to_cpu(cmd->create_cq.prp1); in nvmet_execute_create_cq()
139 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { in nvmet_execute_create_cq()
144 status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize, in nvmet_execute_create_cq()
153 u32 len = le16_to_cpu(cmd->get_log_page.numdu); in nvmet_get_log_page_len()
156 len += le16_to_cpu(cmd->get_log_page.numdl); in nvmet_get_log_page_len()
157 /* NUMD is a 0's based value */ in nvmet_get_log_page_len()
168 return sizeof(req->sq->ctrl->hostid); in nvmet_feat_data_len()
176 return le64_to_cpu(cmd->get_log_page.lpo); in nvmet_get_log_page_offset()
181 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); in nvmet_execute_get_log_page_noop()
186 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_page_error()
192 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
193 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; in nvmet_execute_get_log_page_error()
196 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], in nvmet_execute_get_log_page_error()
201 slot = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_get_log_page_error()
203 slot--; in nvmet_execute_get_log_page_error()
206 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
221 logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
222 logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
223 logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
224 logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
225 logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
226 logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
227 logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
228 logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
229 logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
230 logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
231 logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
250 if (!req->ns->bdev) in nvmet_get_smart_log_nsid()
253 host_reads = part_stat_read(req->ns->bdev, ios[READ]); in nvmet_get_smart_log_nsid()
255 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_nsid()
256 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); in nvmet_get_smart_log_nsid()
258 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_nsid()
260 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_nsid()
261 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_nsid()
262 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_nsid()
263 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_nsid()
277 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all()
278 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_get_smart_log_all()
280 if (!ns->bdev) in nvmet_get_smart_log_all()
282 host_reads += part_stat_read(ns->bdev, ios[READ]); in nvmet_get_smart_log_all()
284 part_stat_read(ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_all()
285 host_writes += part_stat_read(ns->bdev, ios[WRITE]); in nvmet_get_smart_log_all()
287 part_stat_read(ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_all()
290 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_all()
291 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_all()
292 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_all()
293 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_all()
304 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( in nvmet_execute_get_log_page_rmi()
305 req->cmd->get_log_page.lsi)); in nvmet_execute_get_log_page_rmi()
310 if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) { in nvmet_execute_get_log_page_rmi()
315 if (req->transfer_len != sizeof(*log)) { in nvmet_execute_get_log_page_rmi()
324 log->endgid = req->cmd->get_log_page.lsi; in nvmet_execute_get_log_page_rmi()
325 disk = req->ns->bdev->bd_disk; in nvmet_execute_get_log_page_rmi()
326 if (disk && disk->ia_ranges) in nvmet_execute_get_log_page_rmi()
327 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges); in nvmet_execute_get_log_page_rmi()
329 log->numa = cpu_to_le16(1); in nvmet_execute_get_log_page_rmi()
343 if (req->transfer_len != sizeof(*log)) in nvmet_execute_get_log_page_smart()
350 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) in nvmet_execute_get_log_page_smart()
357 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
358 put_unaligned_le64(req->sq->ctrl->err_counter, in nvmet_execute_get_log_page_smart()
359 &log->num_err_log_entries); in nvmet_execute_get_log_page_smart()
360 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
374 log->acs[nvme_admin_delete_sq] = in nvmet_get_cmd_effects_admin()
375 log->acs[nvme_admin_create_sq] = in nvmet_get_cmd_effects_admin()
376 log->acs[nvme_admin_delete_cq] = in nvmet_get_cmd_effects_admin()
377 log->acs[nvme_admin_create_cq] = in nvmet_get_cmd_effects_admin()
381 log->acs[nvme_admin_get_log_page] = in nvmet_get_cmd_effects_admin()
382 log->acs[nvme_admin_identify] = in nvmet_get_cmd_effects_admin()
383 log->acs[nvme_admin_abort_cmd] = in nvmet_get_cmd_effects_admin()
384 log->acs[nvme_admin_set_features] = in nvmet_get_cmd_effects_admin()
385 log->acs[nvme_admin_get_features] = in nvmet_get_cmd_effects_admin()
386 log->acs[nvme_admin_async_event] = in nvmet_get_cmd_effects_admin()
387 log->acs[nvme_admin_keep_alive] = in nvmet_get_cmd_effects_admin()
393 log->iocs[nvme_cmd_read] = in nvmet_get_cmd_effects_nvm()
394 log->iocs[nvme_cmd_flush] = in nvmet_get_cmd_effects_nvm()
395 log->iocs[nvme_cmd_dsm] = in nvmet_get_cmd_effects_nvm()
396 log->iocs[nvme_cmd_resv_acquire] = in nvmet_get_cmd_effects_nvm()
397 log->iocs[nvme_cmd_resv_register] = in nvmet_get_cmd_effects_nvm()
398 log->iocs[nvme_cmd_resv_release] = in nvmet_get_cmd_effects_nvm()
399 log->iocs[nvme_cmd_resv_report] = in nvmet_get_cmd_effects_nvm()
401 log->iocs[nvme_cmd_write] = in nvmet_get_cmd_effects_nvm()
402 log->iocs[nvme_cmd_write_zeroes] = in nvmet_get_cmd_effects_nvm()
408 log->iocs[nvme_cmd_zone_append] = in nvmet_get_cmd_effects_zns()
409 log->iocs[nvme_cmd_zone_mgmt_send] = in nvmet_get_cmd_effects_zns()
411 log->iocs[nvme_cmd_zone_mgmt_recv] = in nvmet_get_cmd_effects_zns()
417 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_cmd_effects_ns()
427 switch (req->cmd->get_log_page.csi) { in nvmet_execute_get_log_cmd_effects_ns()
455 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_changed_ns()
459 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) in nvmet_execute_get_log_changed_ns()
462 mutex_lock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
463 if (ctrl->nr_changed_ns == U32_MAX) in nvmet_execute_get_log_changed_ns()
466 len = ctrl->nr_changed_ns * sizeof(__le32); in nvmet_execute_get_log_changed_ns()
467 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); in nvmet_execute_get_log_changed_ns()
469 status = nvmet_zero_sgl(req, len, req->transfer_len - len); in nvmet_execute_get_log_changed_ns()
470 ctrl->nr_changed_ns = 0; in nvmet_execute_get_log_changed_ns()
472 mutex_unlock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
480 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_format_ana_group()
485 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { in nvmet_format_ana_group()
486 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_format_ana_group()
487 if (ns->anagrpid == grpid) in nvmet_format_ana_group()
488 desc->nsids[count++] = cpu_to_le32(ns->nsid); in nvmet_format_ana_group()
492 desc->grpid = cpu_to_le32(grpid); in nvmet_format_ana_group()
493 desc->nnsids = cpu_to_le32(count); in nvmet_format_ana_group()
494 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); in nvmet_format_ana_group()
495 desc->state = req->port->ana_state[grpid]; in nvmet_format_ana_group()
496 memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); in nvmet_format_ana_group()
510 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( in nvmet_execute_get_log_page_endgrp()
511 req->cmd->get_log_page.lsi)); in nvmet_execute_get_log_page_endgrp()
522 if (!req->ns->bdev) in nvmet_execute_get_log_page_endgrp()
525 host_reads = part_stat_read(req->ns->bdev, ios[READ]); in nvmet_execute_get_log_page_endgrp()
527 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); in nvmet_execute_get_log_page_endgrp()
528 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); in nvmet_execute_get_log_page_endgrp()
530 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); in nvmet_execute_get_log_page_endgrp()
532 put_unaligned_le64(host_reads, &log->hrc[0]); in nvmet_execute_get_log_page_endgrp()
533 put_unaligned_le64(data_units_read, &log->dur[0]); in nvmet_execute_get_log_page_endgrp()
534 put_unaligned_le64(host_writes, &log->hwc[0]); in nvmet_execute_get_log_page_endgrp()
535 put_unaligned_le64(data_units_written, &log->duw[0]); in nvmet_execute_get_log_page_endgrp()
599 features->fis[NVME_FEAT_NUM_QUEUES] = in nvmet_execute_get_log_page_features()
601 features->fis[NVME_FEAT_KATO] = in nvmet_execute_get_log_page_features()
603 features->fis[NVME_FEAT_ASYNC_EVENT] = in nvmet_execute_get_log_page_features()
605 features->fis[NVME_FEAT_HOST_ID] = in nvmet_execute_get_log_page_features()
607 features->fis[NVME_FEAT_WRITE_PROTECT] = in nvmet_execute_get_log_page_features()
609 features->fis[NVME_FEAT_RESV_MASK] = in nvmet_execute_get_log_page_features()
620 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) in nvmet_execute_get_log_page()
623 switch (req->cmd->get_log_page.lid) { in nvmet_execute_get_log_page()
653 req->cmd->get_log_page.lid, req->sq->qid); in nvmet_execute_get_log_page()
654 req->error_loc = offsetof(struct nvme_get_log_page_command, lid); in nvmet_execute_get_log_page()
660 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl()
661 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_execute_identify_ctrl()
666 if (!subsys->subsys_discovered) { in nvmet_execute_identify_ctrl()
667 mutex_lock(&subsys->lock); in nvmet_execute_identify_ctrl()
668 subsys->subsys_discovered = true; in nvmet_execute_identify_ctrl()
669 mutex_unlock(&subsys->lock); in nvmet_execute_identify_ctrl()
678 id->vid = cpu_to_le16(subsys->vendor_id); in nvmet_execute_identify_ctrl()
679 id->ssvid = cpu_to_le16(subsys->subsys_vendor_id); in nvmet_execute_identify_ctrl()
681 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); in nvmet_execute_identify_ctrl()
682 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, in nvmet_execute_identify_ctrl()
683 strlen(subsys->model_number), ' '); in nvmet_execute_identify_ctrl()
684 memcpy_and_pad(id->fr, sizeof(id->fr), in nvmet_execute_identify_ctrl()
685 subsys->firmware_rev, strlen(subsys->firmware_rev), ' '); in nvmet_execute_identify_ctrl()
687 put_unaligned_le24(subsys->ieee_oui, id->ieee); in nvmet_execute_identify_ctrl()
689 id->rab = 6; in nvmet_execute_identify_ctrl()
691 if (nvmet_is_disc_subsys(ctrl->subsys)) in nvmet_execute_identify_ctrl()
692 id->cntrltype = NVME_CTRL_DISC; in nvmet_execute_identify_ctrl()
694 id->cntrltype = NVME_CTRL_IO; in nvmet_execute_identify_ctrl()
697 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | in nvmet_execute_identify_ctrl()
701 if (ctrl->ops->get_mdts) in nvmet_execute_identify_ctrl()
702 id->mdts = ctrl->ops->get_mdts(ctrl); in nvmet_execute_identify_ctrl()
704 id->mdts = 0; in nvmet_execute_identify_ctrl()
706 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_execute_identify_ctrl()
707 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_execute_identify_ctrl()
710 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); in nvmet_execute_identify_ctrl()
714 id->ctratt = cpu_to_le32(ctratt); in nvmet_execute_identify_ctrl()
716 id->oacs = 0; in nvmet_execute_identify_ctrl()
723 id->acl = 3; in nvmet_execute_identify_ctrl()
725 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_execute_identify_ctrl()
727 /* first slot is read-only, only one slot supported */ in nvmet_execute_identify_ctrl()
728 id->frmw = (1 << 0) | (1 << 1); in nvmet_execute_identify_ctrl()
729 id->lpa = (1 << 0) | (1 << 1) | (1 << 2); in nvmet_execute_identify_ctrl()
730 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_identify_ctrl()
731 id->npss = 0; in nvmet_execute_identify_ctrl()
733 /* We support keep-alive timeout in granularity of seconds */ in nvmet_execute_identify_ctrl()
734 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_execute_identify_ctrl()
736 id->sqes = (0x6 << 4) | 0x6; in nvmet_execute_identify_ctrl()
737 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
739 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ in nvmet_execute_identify_ctrl()
740 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); in nvmet_execute_identify_ctrl()
742 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
743 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
744 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | in nvmet_execute_identify_ctrl()
749 id->vwc = NVME_CTRL_VWC_PRESENT; in nvmet_execute_identify_ctrl()
755 id->awun = 0; in nvmet_execute_identify_ctrl()
756 id->awupf = 0; in nvmet_execute_identify_ctrl()
759 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED); in nvmet_execute_identify_ctrl()
760 if (ctrl->ops->flags & NVMF_KEYED_SGLS) in nvmet_execute_identify_ctrl()
761 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS); in nvmet_execute_identify_ctrl()
762 if (req->port->inline_data_size) in nvmet_execute_identify_ctrl()
763 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS); in nvmet_execute_identify_ctrl()
765 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); in nvmet_execute_identify_ctrl()
768 * Max command capsule size is sqe + in-capsule data size. in nvmet_execute_identify_ctrl()
769 * Disable in-capsule data for Metadata capable controllers. in nvmet_execute_identify_ctrl()
772 if (!ctrl->pi_support) in nvmet_execute_identify_ctrl()
773 cmd_capsule_size += req->port->inline_data_size; in nvmet_execute_identify_ctrl()
774 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); in nvmet_execute_identify_ctrl()
777 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_execute_identify_ctrl()
779 id->msdbd = ctrl->ops->msdbd; in nvmet_execute_identify_ctrl()
786 id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
788 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); in nvmet_execute_identify_ctrl()
789 id->anatt = 10; /* random value */ in nvmet_execute_identify_ctrl()
790 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
791 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
797 id->psd[0].max_power = cpu_to_le16(0x9c4); in nvmet_execute_identify_ctrl()
798 id->psd[0].entry_lat = cpu_to_le32(0x10); in nvmet_execute_identify_ctrl()
799 id->psd[0].exit_lat = cpu_to_le32(0x4); in nvmet_execute_identify_ctrl()
801 id->nwpc = 1 << 0; /* write protect and no write protect */ in nvmet_execute_identify_ctrl()
815 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns()
816 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns()
834 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns()
835 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
836 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns()
837 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
844 id->ncap = id->nsze = in nvmet_execute_identify_ns()
845 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); in nvmet_execute_identify_ns()
846 switch (req->port->ana_state[req->ns->anagrpid]) { in nvmet_execute_identify_ns()
851 id->nuse = id->nsze; in nvmet_execute_identify_ns()
855 if (req->ns->bdev) in nvmet_execute_identify_ns()
856 nvmet_bdev_set_limits(req->ns->bdev, id); in nvmet_execute_identify_ns()
862 id->nlbaf = 0; in nvmet_execute_identify_ns()
863 id->flbas = 0; in nvmet_execute_identify_ns()
869 id->nmic = NVME_NS_NMIC_SHARED; in nvmet_execute_identify_ns()
870 id->anagrpid = cpu_to_le32(req->ns->anagrpid); in nvmet_execute_identify_ns()
872 if (req->ns->pr.enable) in nvmet_execute_identify_ns()
873 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE | in nvmet_execute_identify_ns()
885 id->endgid = cpu_to_le16(req->ns->nsid); in nvmet_execute_identify_ns()
887 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); in nvmet_execute_identify_ns()
889 id->lbaf[0].ds = req->ns->blksize_shift; in nvmet_execute_identify_ns()
891 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { in nvmet_execute_identify_ns()
892 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | in nvmet_execute_identify_ns()
895 id->mc = NVME_MC_EXTENDED_LBA; in nvmet_execute_identify_ns()
896 id->dps = req->ns->pi_type; in nvmet_execute_identify_ns()
897 id->flbas = NVME_NS_FLBAS_META_EXT; in nvmet_execute_identify_ns()
898 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); in nvmet_execute_identify_ns()
901 if (req->ns->readonly) in nvmet_execute_identify_ns()
902 id->nsattr |= NVME_NS_ATTR_RO; in nvmet_execute_identify_ns()
914 u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid); in nvmet_execute_identify_endgrp_list()
916 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_endgrp_list()
929 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_execute_identify_endgrp_list()
930 if (ns->nsid <= min_endgid) in nvmet_execute_identify_endgrp_list()
933 list[i++] = cpu_to_le16(ns->nsid); in nvmet_execute_identify_endgrp_list()
938 list[0] = cpu_to_le16(i - 1); in nvmet_execute_identify_endgrp_list()
948 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist()
951 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); in nvmet_execute_identify_nslist()
961 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_nslist()
972 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_execute_identify_nslist()
973 if (ns->nsid <= min_nsid) in nvmet_execute_identify_nslist()
975 if (match_css && req->ns->csi != req->cmd->identify.csi) in nvmet_execute_identify_nslist()
977 list[i++] = cpu_to_le32(ns->nsid); in nvmet_execute_identify_nslist()
1020 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { in nvmet_execute_identify_desclist()
1023 &req->ns->uuid, &off); in nvmet_execute_identify_desclist()
1027 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { in nvmet_execute_identify_desclist()
1030 &req->ns->nguid, &off); in nvmet_execute_identify_desclist()
1037 &req->ns->csi, &off); in nvmet_execute_identify_desclist()
1041 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, in nvmet_execute_identify_desclist()
1042 off) != NVME_IDENTIFY_DATA_SIZE - off) in nvmet_execute_identify_desclist()
1091 id->nstat = NVME_NSTAT_NRDY; in nvmet_execute_id_cs_indep()
1092 id->anagrpid = cpu_to_le32(req->ns->anagrpid); in nvmet_execute_id_cs_indep()
1093 id->nmic = NVME_NS_NMIC_SHARED; in nvmet_execute_id_cs_indep()
1094 if (req->ns->readonly) in nvmet_execute_id_cs_indep()
1095 id->nsattr |= NVME_NS_ATTR_RO; in nvmet_execute_id_cs_indep()
1096 if (req->ns->bdev && !bdev_nonrot(req->ns->bdev)) in nvmet_execute_id_cs_indep()
1097 id->nsfeat |= NVME_NS_ROTATIONAL; in nvmet_execute_id_cs_indep()
1103 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev)) in nvmet_execute_id_cs_indep()
1104 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT; in nvmet_execute_id_cs_indep()
1117 switch (req->cmd->identify.cns) { in nvmet_execute_identify()
1131 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
1144 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
1168 req->cmd->identify.cns, req->sq->qid); in nvmet_execute_identify()
1169 req->error_loc = offsetof(struct nvme_identify, cns); in nvmet_execute_identify()
1192 if (req->ns->file) in nvmet_write_protect_flush_sync()
1198 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); in nvmet_write_protect_flush_sync()
1204 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_write_protect()
1212 mutex_lock(&subsys->lock); in nvmet_set_feat_write_protect()
1215 req->ns->readonly = true; in nvmet_set_feat_write_protect()
1218 req->ns->readonly = false; in nvmet_set_feat_write_protect()
1221 req->ns->readonly = false; in nvmet_set_feat_write_protect()
1229 nvmet_ns_changed(subsys, req->ns->nsid); in nvmet_set_feat_write_protect()
1230 mutex_unlock(&subsys->lock); in nvmet_set_feat_write_protect()
1236 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_kato()
1238 nvmet_stop_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
1239 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_set_feat_kato()
1240 nvmet_start_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
1242 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato()
1249 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_async_event()
1252 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_async_event()
1256 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); in nvmet_set_feat_async_event()
1264 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_host_id()
1270 * The NVMe base specifications v2.1 recommends supporting 128-bits host in nvmet_set_feat_host_id()
1272 * that "The controller may support a 64-bit Host Identifier and/or an in nvmet_set_feat_host_id()
1273 * extended 128-bit Host Identifier". So simplify this support and do in nvmet_set_feat_host_id()
1274 * not support 64-bits host IDs to avoid needing to check that all in nvmet_set_feat_host_id()
1278 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_set_feat_host_id()
1279 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_host_id()
1283 return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_set_feat_host_id()
1284 sizeof(req->sq->ctrl->hostid)); in nvmet_set_feat_host_id()
1289 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_irq_coalesce()
1290 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_irq_coalesce()
1301 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_set_feat_irq_coalesce()
1305 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); in nvmet_set_feat_irq_coalesce()
1310 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_irq_config()
1311 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_irq_config()
1322 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_set_feat_irq_config()
1326 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); in nvmet_set_feat_irq_config()
1331 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_arbitration()
1332 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_arbitration()
1340 if (!ctrl->ops->set_feature) { in nvmet_set_feat_arbitration()
1341 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_set_feat_arbitration()
1345 return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); in nvmet_set_feat_arbitration()
1351 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_set_features()
1352 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_execute_set_features()
1372 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); in nvmet_execute_set_features()
1396 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_execute_set_features()
1413 mutex_lock(&subsys->lock); in nvmet_get_feat_write_protect()
1414 if (req->ns->readonly == true) in nvmet_get_feat_write_protect()
1419 mutex_unlock(&subsys->lock); in nvmet_get_feat_write_protect()
1426 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_get_feat_irq_coalesce()
1435 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_get_feat_irq_coalesce()
1439 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); in nvmet_get_feat_irq_coalesce()
1450 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_get_feat_irq_config()
1451 u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff; in nvmet_get_feat_irq_config()
1460 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_get_feat_irq_config()
1464 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); in nvmet_get_feat_irq_config()
1475 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_get_feat_arbitration()
1479 if (!ctrl->ops->get_feature) { in nvmet_get_feat_arbitration()
1480 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_get_feat_arbitration()
1484 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); in nvmet_get_feat_arbitration()
1499 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_get_feat_kato()
1504 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); in nvmet_get_feat_async_event()
1510 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_get_features()
1549 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); in nvmet_execute_get_features()
1555 /* need 128-bit host identifier flag */ in nvmet_execute_get_features()
1556 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_execute_get_features()
1557 req->error_loc = in nvmet_execute_get_features()
1563 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_execute_get_features()
1564 sizeof(req->sq->ctrl->hostid)); in nvmet_execute_get_features()
1573 req->error_loc = in nvmet_execute_get_features()
1584 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_async_event()
1589 mutex_lock(&ctrl->lock); in nvmet_execute_async_event()
1590 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { in nvmet_execute_async_event()
1591 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
1595 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; in nvmet_execute_async_event()
1596 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
1598 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_execute_async_event()
1603 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_keep_alive()
1609 if (!ctrl->kato) { in nvmet_execute_keep_alive()
1614 pr_debug("ctrl %d update keep-alive timer for %d secs\n", in nvmet_execute_keep_alive()
1615 ctrl->cntlid, ctrl->kato); in nvmet_execute_keep_alive()
1616 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_execute_keep_alive()
1623 struct nvme_command *cmd = req->cmd; in nvmet_admin_cmd_data_len()
1630 switch (cmd->common.opcode) { in nvmet_admin_cmd_data_len()
1636 return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10)); in nvmet_admin_cmd_data_len()
1644 struct nvme_command *cmd = req->cmd; in nvmet_parse_admin_cmd()
1657 if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid && in nvmet_parse_admin_cmd()
1658 cmd->common.flags & NVME_CMD_SGL_ALL) in nvmet_parse_admin_cmd()
1664 switch (cmd->common.opcode) { in nvmet_parse_admin_cmd()
1666 req->execute = nvmet_execute_delete_sq; in nvmet_parse_admin_cmd()
1669 req->execute = nvmet_execute_create_sq; in nvmet_parse_admin_cmd()
1672 req->execute = nvmet_execute_get_log_page; in nvmet_parse_admin_cmd()
1675 req->execute = nvmet_execute_delete_cq; in nvmet_parse_admin_cmd()
1678 req->execute = nvmet_execute_create_cq; in nvmet_parse_admin_cmd()
1681 req->execute = nvmet_execute_identify; in nvmet_parse_admin_cmd()
1684 req->execute = nvmet_execute_abort; in nvmet_parse_admin_cmd()
1687 req->execute = nvmet_execute_set_features; in nvmet_parse_admin_cmd()
1690 req->execute = nvmet_execute_get_features; in nvmet_parse_admin_cmd()
1693 req->execute = nvmet_execute_async_event; in nvmet_parse_admin_cmd()
1696 req->execute = nvmet_execute_keep_alive; in nvmet_parse_admin_cmd()