Lines Matching +full:we +full:- +full:off +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
17 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_delete_sq()
18 u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid); in nvmet_execute_delete_sq()
35 status = ctrl->ops->delete_sq(ctrl, sqid); in nvmet_execute_delete_sq()
43 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_create_sq()
44 struct nvme_command *cmd = req->cmd; in nvmet_execute_create_sq()
45 u16 sqid = le16_to_cpu(cmd->create_sq.sqid); in nvmet_execute_create_sq()
46 u16 cqid = le16_to_cpu(cmd->create_sq.cqid); in nvmet_execute_create_sq()
47 u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags); in nvmet_execute_create_sq()
48 u16 qsize = le16_to_cpu(cmd->create_sq.qsize); in nvmet_execute_create_sq()
49 u64 prp1 = le64_to_cpu(cmd->create_sq.prp1); in nvmet_execute_create_sq()
72 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { in nvmet_execute_create_sq()
77 status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1); in nvmet_execute_create_sq()
85 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_delete_cq()
86 u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid); in nvmet_execute_delete_cq()
98 if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) { in nvmet_execute_delete_cq()
104 status = ctrl->ops->delete_cq(ctrl, cqid); in nvmet_execute_delete_cq()
112 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_create_cq()
113 struct nvme_command *cmd = req->cmd; in nvmet_execute_create_cq()
114 u16 cqid = le16_to_cpu(cmd->create_cq.cqid); in nvmet_execute_create_cq()
115 u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags); in nvmet_execute_create_cq()
116 u16 qsize = le16_to_cpu(cmd->create_cq.qsize); in nvmet_execute_create_cq()
117 u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector); in nvmet_execute_create_cq()
118 u64 prp1 = le64_to_cpu(cmd->create_cq.prp1); in nvmet_execute_create_cq()
130 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { in nvmet_execute_create_cq()
135 status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize, in nvmet_execute_create_cq()
144 u32 len = le16_to_cpu(cmd->get_log_page.numdu); in nvmet_get_log_page_len()
147 len += le16_to_cpu(cmd->get_log_page.numdl); in nvmet_get_log_page_len()
159 return sizeof(req->sq->ctrl->hostid); in nvmet_feat_data_len()
167 return le64_to_cpu(cmd->get_log_page.lpo); in nvmet_get_log_page_offset()
172 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); in nvmet_execute_get_log_page_noop()
177 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_page_error()
183 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
184 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; in nvmet_execute_get_log_page_error()
187 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], in nvmet_execute_get_log_page_error()
192 slot = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_get_log_page_error()
194 slot--; in nvmet_execute_get_log_page_error()
197 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_execute_get_log_page_error()
212 logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
213 logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
214 logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
215 logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
216 logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
217 logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
218 logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
219 logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
220 logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
221 logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
222 logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP); in nvmet_execute_get_supported_log_pages()
240 /* we don't have the right data for file backed ns */ in nvmet_get_smart_log_nsid()
241 if (!req->ns->bdev) in nvmet_get_smart_log_nsid()
244 host_reads = part_stat_read(req->ns->bdev, ios[READ]); in nvmet_get_smart_log_nsid()
246 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_nsid()
247 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); in nvmet_get_smart_log_nsid()
249 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_nsid()
251 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_nsid()
252 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_nsid()
253 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_nsid()
254 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_nsid()
264 struct nvmet_ns *ns; in nvmet_get_smart_log_all() local
268 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all()
269 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_get_smart_log_all()
270 /* we don't have the right data for file backed ns */ in nvmet_get_smart_log_all()
271 if (!ns->bdev) in nvmet_get_smart_log_all()
273 host_reads += part_stat_read(ns->bdev, ios[READ]); in nvmet_get_smart_log_all()
275 part_stat_read(ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_all()
276 host_writes += part_stat_read(ns->bdev, ios[WRITE]); in nvmet_get_smart_log_all()
278 part_stat_read(ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_all()
281 put_unaligned_le64(host_reads, &slog->host_reads[0]); in nvmet_get_smart_log_all()
282 put_unaligned_le64(data_units_read, &slog->data_units_read[0]); in nvmet_get_smart_log_all()
283 put_unaligned_le64(host_writes, &slog->host_writes[0]); in nvmet_get_smart_log_all()
284 put_unaligned_le64(data_units_written, &slog->data_units_written[0]); in nvmet_get_smart_log_all()
295 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( in nvmet_execute_get_log_page_rmi()
296 req->cmd->get_log_page.lsi)); in nvmet_execute_get_log_page_rmi()
301 if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) { in nvmet_execute_get_log_page_rmi()
306 if (req->transfer_len != sizeof(*log)) { in nvmet_execute_get_log_page_rmi()
315 log->endgid = req->cmd->get_log_page.lsi; in nvmet_execute_get_log_page_rmi()
316 disk = req->ns->bdev->bd_disk; in nvmet_execute_get_log_page_rmi()
317 if (disk && disk->ia_ranges) in nvmet_execute_get_log_page_rmi()
318 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges); in nvmet_execute_get_log_page_rmi()
320 log->numa = cpu_to_le16(1); in nvmet_execute_get_log_page_rmi()
334 if (req->transfer_len != sizeof(*log)) in nvmet_execute_get_log_page_smart()
341 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) in nvmet_execute_get_log_page_smart()
348 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
349 put_unaligned_le64(req->sq->ctrl->err_counter, in nvmet_execute_get_log_page_smart()
350 &log->num_err_log_entries); in nvmet_execute_get_log_page_smart()
351 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
365 log->acs[nvme_admin_delete_sq] = in nvmet_get_cmd_effects_admin()
366 log->acs[nvme_admin_create_sq] = in nvmet_get_cmd_effects_admin()
367 log->acs[nvme_admin_delete_cq] = in nvmet_get_cmd_effects_admin()
368 log->acs[nvme_admin_create_cq] = in nvmet_get_cmd_effects_admin()
372 log->acs[nvme_admin_get_log_page] = in nvmet_get_cmd_effects_admin()
373 log->acs[nvme_admin_identify] = in nvmet_get_cmd_effects_admin()
374 log->acs[nvme_admin_abort_cmd] = in nvmet_get_cmd_effects_admin()
375 log->acs[nvme_admin_set_features] = in nvmet_get_cmd_effects_admin()
376 log->acs[nvme_admin_get_features] = in nvmet_get_cmd_effects_admin()
377 log->acs[nvme_admin_async_event] = in nvmet_get_cmd_effects_admin()
378 log->acs[nvme_admin_keep_alive] = in nvmet_get_cmd_effects_admin()
384 log->iocs[nvme_cmd_read] = in nvmet_get_cmd_effects_nvm()
385 log->iocs[nvme_cmd_flush] = in nvmet_get_cmd_effects_nvm()
386 log->iocs[nvme_cmd_dsm] = in nvmet_get_cmd_effects_nvm()
387 log->iocs[nvme_cmd_resv_acquire] = in nvmet_get_cmd_effects_nvm()
388 log->iocs[nvme_cmd_resv_register] = in nvmet_get_cmd_effects_nvm()
389 log->iocs[nvme_cmd_resv_release] = in nvmet_get_cmd_effects_nvm()
390 log->iocs[nvme_cmd_resv_report] = in nvmet_get_cmd_effects_nvm()
392 log->iocs[nvme_cmd_write] = in nvmet_get_cmd_effects_nvm()
393 log->iocs[nvme_cmd_write_zeroes] = in nvmet_get_cmd_effects_nvm()
399 log->iocs[nvme_cmd_zone_append] = in nvmet_get_cmd_effects_zns()
400 log->iocs[nvme_cmd_zone_mgmt_send] = in nvmet_get_cmd_effects_zns()
402 log->iocs[nvme_cmd_zone_mgmt_recv] = in nvmet_get_cmd_effects_zns()
408 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_cmd_effects_ns()
418 switch (req->cmd->get_log_page.csi) { in nvmet_execute_get_log_cmd_effects_ns()
446 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_changed_ns()
450 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) in nvmet_execute_get_log_changed_ns()
453 mutex_lock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
454 if (ctrl->nr_changed_ns == U32_MAX) in nvmet_execute_get_log_changed_ns()
457 len = ctrl->nr_changed_ns * sizeof(__le32); in nvmet_execute_get_log_changed_ns()
458 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); in nvmet_execute_get_log_changed_ns()
460 status = nvmet_zero_sgl(req, len, req->transfer_len - len); in nvmet_execute_get_log_changed_ns()
461 ctrl->nr_changed_ns = 0; in nvmet_execute_get_log_changed_ns()
463 mutex_unlock(&ctrl->lock); in nvmet_execute_get_log_changed_ns()
471 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_format_ana_group()
472 struct nvmet_ns *ns; in nvmet_format_ana_group() local
476 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { in nvmet_format_ana_group()
477 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_format_ana_group()
478 if (ns->anagrpid == grpid) in nvmet_format_ana_group()
479 desc->nsids[count++] = cpu_to_le32(ns->nsid); in nvmet_format_ana_group()
483 desc->grpid = cpu_to_le32(grpid); in nvmet_format_ana_group()
484 desc->nnsids = cpu_to_le32(count); in nvmet_format_ana_group()
485 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); in nvmet_format_ana_group()
486 desc->state = req->port->ana_state[grpid]; in nvmet_format_ana_group()
487 memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); in nvmet_format_ana_group()
501 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( in nvmet_execute_get_log_page_endgrp()
502 req->cmd->get_log_page.lsi)); in nvmet_execute_get_log_page_endgrp()
513 if (!req->ns->bdev) in nvmet_execute_get_log_page_endgrp()
516 host_reads = part_stat_read(req->ns->bdev, ios[READ]); in nvmet_execute_get_log_page_endgrp()
518 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); in nvmet_execute_get_log_page_endgrp()
519 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); in nvmet_execute_get_log_page_endgrp()
521 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); in nvmet_execute_get_log_page_endgrp()
523 put_unaligned_le64(host_reads, &log->hrc[0]); in nvmet_execute_get_log_page_endgrp()
524 put_unaligned_le64(data_units_read, &log->dur[0]); in nvmet_execute_get_log_page_endgrp()
525 put_unaligned_le64(host_writes, &log->hwc[0]); in nvmet_execute_get_log_page_endgrp()
526 put_unaligned_le64(data_units_written, &log->duw[0]); in nvmet_execute_get_log_page_endgrp()
573 /* copy the header last once we know the number of groups */ in nvmet_execute_get_log_page_ana()
590 features->fis[NVME_FEAT_NUM_QUEUES] = in nvmet_execute_get_log_page_features()
592 features->fis[NVME_FEAT_KATO] = in nvmet_execute_get_log_page_features()
594 features->fis[NVME_FEAT_ASYNC_EVENT] = in nvmet_execute_get_log_page_features()
596 features->fis[NVME_FEAT_HOST_ID] = in nvmet_execute_get_log_page_features()
598 features->fis[NVME_FEAT_WRITE_PROTECT] = in nvmet_execute_get_log_page_features()
600 features->fis[NVME_FEAT_RESV_MASK] = in nvmet_execute_get_log_page_features()
611 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) in nvmet_execute_get_log_page()
614 switch (req->cmd->get_log_page.lid) { in nvmet_execute_get_log_page()
623 * We only support a single firmware slot which always is in nvmet_execute_get_log_page()
624 * active, so we can zero out the whole firmware slot log and in nvmet_execute_get_log_page()
644 req->cmd->get_log_page.lid, req->sq->qid); in nvmet_execute_get_log_page()
645 req->error_loc = offsetof(struct nvme_get_log_page_command, lid); in nvmet_execute_get_log_page()
651 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl()
652 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_execute_identify_ctrl()
657 if (!subsys->subsys_discovered) { in nvmet_execute_identify_ctrl()
658 mutex_lock(&subsys->lock); in nvmet_execute_identify_ctrl()
659 subsys->subsys_discovered = true; in nvmet_execute_identify_ctrl()
660 mutex_unlock(&subsys->lock); in nvmet_execute_identify_ctrl()
669 id->vid = cpu_to_le16(subsys->vendor_id); in nvmet_execute_identify_ctrl()
670 id->ssvid = cpu_to_le16(subsys->subsys_vendor_id); in nvmet_execute_identify_ctrl()
672 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); in nvmet_execute_identify_ctrl()
673 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, in nvmet_execute_identify_ctrl()
674 strlen(subsys->model_number), ' '); in nvmet_execute_identify_ctrl()
675 memcpy_and_pad(id->fr, sizeof(id->fr), in nvmet_execute_identify_ctrl()
676 subsys->firmware_rev, strlen(subsys->firmware_rev), ' '); in nvmet_execute_identify_ctrl()
678 put_unaligned_le24(subsys->ieee_oui, id->ieee); in nvmet_execute_identify_ctrl()
680 id->rab = 6; in nvmet_execute_identify_ctrl()
682 if (nvmet_is_disc_subsys(ctrl->subsys)) in nvmet_execute_identify_ctrl()
683 id->cntrltype = NVME_CTRL_DISC; in nvmet_execute_identify_ctrl()
685 id->cntrltype = NVME_CTRL_IO; in nvmet_execute_identify_ctrl()
687 /* we support multiple ports, multiples hosts and ANA: */ in nvmet_execute_identify_ctrl()
688 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL | in nvmet_execute_identify_ctrl()
692 if (ctrl->ops->get_mdts) in nvmet_execute_identify_ctrl()
693 id->mdts = ctrl->ops->get_mdts(ctrl); in nvmet_execute_identify_ctrl()
695 id->mdts = 0; in nvmet_execute_identify_ctrl()
697 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_execute_identify_ctrl()
698 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_execute_identify_ctrl()
701 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); in nvmet_execute_identify_ctrl()
705 id->ctratt = cpu_to_le32(ctratt); in nvmet_execute_identify_ctrl()
707 id->oacs = 0; in nvmet_execute_identify_ctrl()
710 * We don't really have a practical limit on the number of abort in nvmet_execute_identify_ctrl()
711 * comands. But we don't do anything useful for abort either, so in nvmet_execute_identify_ctrl()
714 id->acl = 3; in nvmet_execute_identify_ctrl()
716 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_execute_identify_ctrl()
718 /* first slot is read-only, only one slot supported */ in nvmet_execute_identify_ctrl()
719 id->frmw = (1 << 0) | (1 << 1); in nvmet_execute_identify_ctrl()
720 id->lpa = (1 << 0) | (1 << 1) | (1 << 2); in nvmet_execute_identify_ctrl()
721 id->elpe = NVMET_ERROR_LOG_SLOTS - 1; in nvmet_execute_identify_ctrl()
722 id->npss = 0; in nvmet_execute_identify_ctrl()
724 /* We support keep-alive timeout in granularity of seconds */ in nvmet_execute_identify_ctrl()
725 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_execute_identify_ctrl()
727 id->sqes = (0x6 << 4) | 0x6; in nvmet_execute_identify_ctrl()
728 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
730 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ in nvmet_execute_identify_ctrl()
731 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); in nvmet_execute_identify_ctrl()
733 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
734 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
735 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | in nvmet_execute_identify_ctrl()
740 id->vwc = NVME_CTRL_VWC_PRESENT; in nvmet_execute_identify_ctrl()
743 * We can't support atomic writes bigger than a LBA without support in nvmet_execute_identify_ctrl()
746 id->awun = 0; in nvmet_execute_identify_ctrl()
747 id->awupf = 0; in nvmet_execute_identify_ctrl()
749 /* we always support SGLs */ in nvmet_execute_identify_ctrl()
750 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED); in nvmet_execute_identify_ctrl()
751 if (ctrl->ops->flags & NVMF_KEYED_SGLS) in nvmet_execute_identify_ctrl()
752 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS); in nvmet_execute_identify_ctrl()
753 if (req->port->inline_data_size) in nvmet_execute_identify_ctrl()
754 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS); in nvmet_execute_identify_ctrl()
756 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); in nvmet_execute_identify_ctrl()
759 * Max command capsule size is sqe + in-capsule data size. in nvmet_execute_identify_ctrl()
760 * Disable in-capsule data for Metadata capable controllers. in nvmet_execute_identify_ctrl()
763 if (!ctrl->pi_support) in nvmet_execute_identify_ctrl()
764 cmd_capsule_size += req->port->inline_data_size; in nvmet_execute_identify_ctrl()
765 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); in nvmet_execute_identify_ctrl()
768 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_execute_identify_ctrl()
770 id->msdbd = ctrl->ops->msdbd; in nvmet_execute_identify_ctrl()
773 * Endurance group identifier is 16 bits, so we can't let namespaces in nvmet_execute_identify_ctrl()
774 * overflow that since we reuse the nsid in nvmet_execute_identify_ctrl()
777 id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES); in nvmet_execute_identify_ctrl()
779 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); in nvmet_execute_identify_ctrl()
780 id->anatt = 10; /* random value */ in nvmet_execute_identify_ctrl()
781 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
782 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); in nvmet_execute_identify_ctrl()
785 * Meh, we don't really support any power state. Fake up the same in nvmet_execute_identify_ctrl()
788 id->psd[0].max_power = cpu_to_le16(0x9c4); in nvmet_execute_identify_ctrl()
789 id->psd[0].entry_lat = cpu_to_le32(0x10); in nvmet_execute_identify_ctrl()
790 id->psd[0].exit_lat = cpu_to_le32(0x4); in nvmet_execute_identify_ctrl()
792 id->nwpc = 1 << 0; /* write protect and no write protect */ in nvmet_execute_identify_ctrl()
806 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns()
807 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns()
818 /* return an all zeroed buffer if we can't find an active namespace */ in nvmet_execute_identify_ns()
825 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns()
826 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
827 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns()
828 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
832 * nuse = ncap = nsze isn't always true, but we have no way to find in nvmet_execute_identify_ns()
835 id->ncap = id->nsze = in nvmet_execute_identify_ns()
836 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); in nvmet_execute_identify_ns()
837 switch (req->port->ana_state[req->ns->anagrpid]) { in nvmet_execute_identify_ns()
842 id->nuse = id->nsze; in nvmet_execute_identify_ns()
846 if (req->ns->bdev) in nvmet_execute_identify_ns()
847 nvmet_bdev_set_limits(req->ns->bdev, id); in nvmet_execute_identify_ns()
850 * We just provide a single LBA format that matches what the in nvmet_execute_identify_ns()
853 id->nlbaf = 0; in nvmet_execute_identify_ns()
854 id->flbas = 0; in nvmet_execute_identify_ns()
860 id->nmic = NVME_NS_NMIC_SHARED; in nvmet_execute_identify_ns()
861 id->anagrpid = cpu_to_le32(req->ns->anagrpid); in nvmet_execute_identify_ns()
863 if (req->ns->pr.enable) in nvmet_execute_identify_ns()
864 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE | in nvmet_execute_identify_ns()
873 * Since we don't know any better, every namespace is its own endurance in nvmet_execute_identify_ns()
876 id->endgid = cpu_to_le16(req->ns->nsid); in nvmet_execute_identify_ns()
878 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); in nvmet_execute_identify_ns()
880 id->lbaf[0].ds = req->ns->blksize_shift; in nvmet_execute_identify_ns()
882 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { in nvmet_execute_identify_ns()
883 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | in nvmet_execute_identify_ns()
886 id->mc = NVME_MC_EXTENDED_LBA; in nvmet_execute_identify_ns()
887 id->dps = req->ns->pi_type; in nvmet_execute_identify_ns()
888 id->flbas = NVME_NS_FLBAS_META_EXT; in nvmet_execute_identify_ns()
889 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); in nvmet_execute_identify_ns()
892 if (req->ns->readonly) in nvmet_execute_identify_ns()
893 id->nsattr |= NVME_NS_ATTR_RO; in nvmet_execute_identify_ns()
905 u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid); in nvmet_execute_identify_endgrp_list()
907 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_endgrp_list()
908 struct nvmet_ns *ns; in nvmet_execute_identify_endgrp_list() local
920 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_execute_identify_endgrp_list()
921 if (ns->nsid <= min_endgid) in nvmet_execute_identify_endgrp_list()
924 list[i++] = cpu_to_le16(ns->nsid); in nvmet_execute_identify_endgrp_list()
929 list[0] = cpu_to_le16(i - 1); in nvmet_execute_identify_endgrp_list()
939 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist()
940 struct nvmet_ns *ns; in nvmet_execute_identify_nslist() local
942 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); in nvmet_execute_identify_nslist()
952 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_nslist()
963 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { in nvmet_execute_identify_nslist()
964 if (ns->nsid <= min_nsid) in nvmet_execute_identify_nslist()
966 if (match_css && req->ns->csi != req->cmd->identify.csi) in nvmet_execute_identify_nslist()
968 list[i++] = cpu_to_le32(ns->nsid); in nvmet_execute_identify_nslist()
981 void *id, off_t *off) in nvmet_copy_ns_identifier() argument
989 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc)); in nvmet_copy_ns_identifier()
992 *off += sizeof(desc); in nvmet_copy_ns_identifier()
994 status = nvmet_copy_to_sgl(req, *off, id, len); in nvmet_copy_ns_identifier()
997 *off += len; in nvmet_copy_ns_identifier()
1004 off_t off = 0; in nvmet_execute_identify_desclist() local
1011 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { in nvmet_execute_identify_desclist()
1014 &req->ns->uuid, &off); in nvmet_execute_identify_desclist()
1018 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { in nvmet_execute_identify_desclist()
1021 &req->ns->nguid, &off); in nvmet_execute_identify_desclist()
1028 &req->ns->csi, &off); in nvmet_execute_identify_desclist()
1032 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, in nvmet_execute_identify_desclist()
1033 off) != NVME_IDENTIFY_DATA_SIZE - off) in nvmet_execute_identify_desclist()
1082 id->nstat = NVME_NSTAT_NRDY; in nvmet_execute_id_cs_indep()
1083 id->anagrpid = cpu_to_le32(req->ns->anagrpid); in nvmet_execute_id_cs_indep()
1084 id->nmic = NVME_NS_NMIC_SHARED; in nvmet_execute_id_cs_indep()
1085 if (req->ns->readonly) in nvmet_execute_id_cs_indep()
1086 id->nsattr |= NVME_NS_ATTR_RO; in nvmet_execute_id_cs_indep()
1087 if (req->ns->bdev && !bdev_nonrot(req->ns->bdev)) in nvmet_execute_id_cs_indep()
1088 id->nsfeat |= NVME_NS_ROTATIONAL; in nvmet_execute_id_cs_indep()
1090 * We need flush command to flush the file's metadata, in nvmet_execute_id_cs_indep()
1094 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev)) in nvmet_execute_id_cs_indep()
1095 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT; in nvmet_execute_id_cs_indep()
1108 switch (req->cmd->identify.cns) { in nvmet_execute_identify()
1122 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
1135 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
1159 req->cmd->identify.cns, req->sq->qid); in nvmet_execute_identify()
1160 req->error_loc = offsetof(struct nvme_identify, cns); in nvmet_execute_identify()
1166 * spec, but we are not required to do any useful work. We couldn't really
1183 if (req->ns->file) in nvmet_write_protect_flush_sync()
1189 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); in nvmet_write_protect_flush_sync()
1195 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_write_protect()
1203 mutex_lock(&subsys->lock); in nvmet_set_feat_write_protect()
1206 req->ns->readonly = true; in nvmet_set_feat_write_protect()
1209 req->ns->readonly = false; in nvmet_set_feat_write_protect()
1212 req->ns->readonly = false; in nvmet_set_feat_write_protect()
1220 nvmet_ns_changed(subsys, req->ns->nsid); in nvmet_set_feat_write_protect()
1221 mutex_unlock(&subsys->lock); in nvmet_set_feat_write_protect()
1227 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_kato()
1229 nvmet_stop_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
1230 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_set_feat_kato()
1231 nvmet_start_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
1233 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato()
1240 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_async_event()
1243 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_async_event()
1247 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); in nvmet_set_feat_async_event()
1255 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_host_id()
1261 * The NVMe base specifications v2.1 recommends supporting 128-bits host in nvmet_set_feat_host_id()
1263 * that "The controller may support a 64-bit Host Identifier and/or an in nvmet_set_feat_host_id()
1264 * extended 128-bit Host Identifier". So simplify this support and do in nvmet_set_feat_host_id()
1265 * not support 64-bits host IDs to avoid needing to check that all in nvmet_set_feat_host_id()
1269 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_set_feat_host_id()
1270 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_host_id()
1274 return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_set_feat_host_id()
1275 sizeof(req->sq->ctrl->hostid)); in nvmet_set_feat_host_id()
1280 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_irq_coalesce()
1281 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_irq_coalesce()
1292 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_set_feat_irq_coalesce()
1296 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); in nvmet_set_feat_irq_coalesce()
1301 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_irq_config()
1302 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_irq_config()
1313 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_set_feat_irq_config()
1317 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); in nvmet_set_feat_irq_config()
1322 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_feat_arbitration()
1323 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_arbitration()
1331 if (!ctrl->ops->set_feature) { in nvmet_set_feat_arbitration()
1332 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_set_feat_arbitration()
1336 return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); in nvmet_set_feat_arbitration()
1342 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_set_features()
1343 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_execute_set_features()
1363 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); in nvmet_execute_set_features()
1387 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_execute_set_features()
1404 mutex_lock(&subsys->lock); in nvmet_get_feat_write_protect()
1405 if (req->ns->readonly == true) in nvmet_get_feat_write_protect()
1410 mutex_unlock(&subsys->lock); in nvmet_get_feat_write_protect()
1417 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_get_feat_irq_coalesce()
1426 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_get_feat_irq_coalesce()
1430 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); in nvmet_get_feat_irq_coalesce()
1441 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_get_feat_irq_config()
1442 u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff; in nvmet_get_feat_irq_config()
1451 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_get_feat_irq_config()
1455 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); in nvmet_get_feat_irq_config()
1466 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_get_feat_arbitration()
1470 if (!ctrl->ops->get_feature) { in nvmet_get_feat_arbitration()
1471 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_get_feat_arbitration()
1475 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); in nvmet_get_feat_arbitration()
1490 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_get_feat_kato()
1495 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); in nvmet_get_feat_async_event()
1501 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_get_features()
1509 * These features are mandatory in the spec, but we don't in nvmet_execute_get_features()
1510 * have a useful way to implement them. We'll eventually in nvmet_execute_get_features()
1540 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); in nvmet_execute_get_features()
1546 /* need 128-bit host identifier flag */ in nvmet_execute_get_features()
1547 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_execute_get_features()
1548 req->error_loc = in nvmet_execute_get_features()
1554 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_execute_get_features()
1555 sizeof(req->sq->ctrl->hostid)); in nvmet_execute_get_features()
1564 req->error_loc = in nvmet_execute_get_features()
1575 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_async_event()
1580 mutex_lock(&ctrl->lock); in nvmet_execute_async_event()
1581 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { in nvmet_execute_async_event()
1582 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
1586 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; in nvmet_execute_async_event()
1587 mutex_unlock(&ctrl->lock); in nvmet_execute_async_event()
1589 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_execute_async_event()
1594 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_keep_alive()
1600 if (!ctrl->kato) { in nvmet_execute_keep_alive()
1605 pr_debug("ctrl %d update keep-alive timer for %d secs\n", in nvmet_execute_keep_alive()
1606 ctrl->cntlid, ctrl->kato); in nvmet_execute_keep_alive()
1607 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_execute_keep_alive()
1614 struct nvme_command *cmd = req->cmd; in nvmet_admin_cmd_data_len()
1621 switch (cmd->common.opcode) { in nvmet_admin_cmd_data_len()
1627 return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10)); in nvmet_admin_cmd_data_len()
1635 struct nvme_command *cmd = req->cmd; in nvmet_parse_admin_cmd()
1648 if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid && in nvmet_parse_admin_cmd()
1649 cmd->common.flags & NVME_CMD_SGL_ALL) in nvmet_parse_admin_cmd()
1655 switch (cmd->common.opcode) { in nvmet_parse_admin_cmd()
1657 req->execute = nvmet_execute_delete_sq; in nvmet_parse_admin_cmd()
1660 req->execute = nvmet_execute_create_sq; in nvmet_parse_admin_cmd()
1663 req->execute = nvmet_execute_get_log_page; in nvmet_parse_admin_cmd()
1666 req->execute = nvmet_execute_delete_cq; in nvmet_parse_admin_cmd()
1669 req->execute = nvmet_execute_create_cq; in nvmet_parse_admin_cmd()
1672 req->execute = nvmet_execute_identify; in nvmet_parse_admin_cmd()
1675 req->execute = nvmet_execute_abort; in nvmet_parse_admin_cmd()
1678 req->execute = nvmet_execute_set_features; in nvmet_parse_admin_cmd()
1681 req->execute = nvmet_execute_get_features; in nvmet_parse_admin_cmd()
1684 req->execute = nvmet_execute_async_event; in nvmet_parse_admin_cmd()
1687 req->execute = nvmet_execute_keep_alive; in nvmet_parse_admin_cmd()