1 /* 2 * Discovery service for the NVMe over Fabrics target. 3 * Copyright (C) 2016 Intel Corporation. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 7 * 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/slab.h> 16 #include <generated/utsrelease.h> 17 #include "nvmet.h" 18 19 struct nvmet_subsys *nvmet_disc_subsys; 20 21 u64 nvmet_genctr; 22 23 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port) 24 { 25 down_write(&nvmet_config_sem); 26 if (list_empty(&port->entry)) { 27 list_add_tail(&port->entry, &parent->referrals); 28 port->enabled = true; 29 nvmet_genctr++; 30 } 31 up_write(&nvmet_config_sem); 32 } 33 34 void nvmet_referral_disable(struct nvmet_port *port) 35 { 36 down_write(&nvmet_config_sem); 37 if (!list_empty(&port->entry)) { 38 port->enabled = false; 39 list_del_init(&port->entry); 40 nvmet_genctr++; 41 } 42 up_write(&nvmet_config_sem); 43 } 44 45 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, 46 struct nvmet_port *port, char *subsys_nqn, u8 type, u32 numrec) 47 { 48 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; 49 50 e->trtype = port->disc_addr.trtype; 51 e->adrfam = port->disc_addr.adrfam; 52 e->treq = port->disc_addr.treq; 53 e->portid = port->disc_addr.portid; 54 /* we support only dynamic controllers */ 55 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); 56 e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); 57 e->subtype = type; 58 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); 59 memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 60 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); 61 memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); 62 } 63 64 static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) 65 { 66 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); 67 struct nvmet_ctrl *ctrl = req->sq->ctrl; 68 struct nvmf_disc_rsp_page_hdr *hdr; 69 size_t data_len = nvmet_get_log_page_len(req->cmd); 70 size_t alloc_len = max(data_len, sizeof(*hdr)); 71 int residual_len = data_len - sizeof(*hdr); 72 struct nvmet_subsys_link *p; 73 struct nvmet_port *r; 74 u32 numrec = 0; 75 u16 status = 0; 76 77 /* 78 * Make sure we're passing at least a buffer of response header size. 79 * If host provided data len is less than the header size, only the 80 * number of bytes requested by host will be sent to host. 81 */ 82 hdr = kzalloc(alloc_len, GFP_KERNEL); 83 if (!hdr) { 84 status = NVME_SC_INTERNAL; 85 goto out; 86 } 87 88 down_read(&nvmet_config_sem); 89 list_for_each_entry(p, &req->port->subsystems, entry) { 90 if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn)) 91 continue; 92 if (residual_len >= entry_size) { 93 nvmet_format_discovery_entry(hdr, req->port, 94 p->subsys->subsysnqn, 95 NVME_NQN_NVME, numrec); 96 residual_len -= entry_size; 97 } 98 numrec++; 99 } 100 101 list_for_each_entry(r, &req->port->referrals, entry) { 102 if (residual_len >= entry_size) { 103 nvmet_format_discovery_entry(hdr, r, 104 NVME_DISC_SUBSYS_NAME, 105 NVME_NQN_DISC, numrec); 106 residual_len -= entry_size; 107 } 108 numrec++; 109 } 110 111 hdr->genctr = cpu_to_le64(nvmet_genctr); 112 hdr->numrec = cpu_to_le64(numrec); 113 hdr->recfmt = cpu_to_le16(0); 114 115 up_read(&nvmet_config_sem); 116 117 status = nvmet_copy_to_sgl(req, 0, hdr, data_len); 118 kfree(hdr); 119 out: 120 nvmet_req_complete(req, status); 121 } 122 123 static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req) 124 { 125 struct nvmet_ctrl *ctrl = req->sq->ctrl; 126 struct nvme_id_ctrl *id; 127 u16 status = 0; 128 129 id = kzalloc(sizeof(*id), GFP_KERNEL); 130 if (!id) { 131 status = NVME_SC_INTERNAL; 132 goto out; 133 } 134 135 memset(id->fr, ' ', sizeof(id->fr)); 136 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); 137 138 /* no limit on data transfer sizes for now */ 139 id->mdts = 0; 140 id->cntlid = cpu_to_le16(ctrl->cntlid); 141 id->ver = cpu_to_le32(ctrl->subsys->ver); 142 id->lpa = (1 << 2); 143 144 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 145 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 146 147 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 148 if (ctrl->ops->has_keyed_sgls) 149 id->sgls |= cpu_to_le32(1 << 2); 150 if (ctrl->ops->sqe_inline_size) 151 id->sgls |= cpu_to_le32(1 << 20); 152 153 strcpy(id->subnqn, ctrl->subsys->subsysnqn); 154 155 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 156 157 kfree(id); 158 out: 159 nvmet_req_complete(req, status); 160 } 161 162 int nvmet_parse_discovery_cmd(struct nvmet_req *req) 163 { 164 struct nvme_command *cmd = req->cmd; 165 166 req->ns = NULL; 167 168 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 169 pr_err("nvmet: got cmd %d while not ready\n", 170 cmd->common.opcode); 171 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 172 } 173 174 switch (cmd->common.opcode) { 175 case nvme_admin_get_log_page: 176 req->data_len = nvmet_get_log_page_len(cmd); 177 178 switch (cmd->get_log_page.lid) { 179 case NVME_LOG_DISC: 180 req->execute = nvmet_execute_get_disc_log_page; 181 return 0; 182 default: 183 pr_err("nvmet: unsupported get_log_page lid %d\n", 184 cmd->get_log_page.lid); 185 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 186 } 187 case nvme_admin_identify: 188 req->data_len = 4096; 189 switch (le32_to_cpu(cmd->identify.cns)) { 190 case NVME_ID_CNS_CTRL: 191 req->execute = 192 nvmet_execute_identify_disc_ctrl; 193 return 0; 194 default: 195 pr_err("nvmet: unsupported identify cns %d\n", 196 le32_to_cpu(cmd->identify.cns)); 197 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 198 } 199 default: 200 pr_err("nvmet: unsupported cmd %d\n", 201 cmd->common.opcode); 202 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 203 } 204 205 pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode); 206 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 207 } 208 209 int __init nvmet_init_discovery(void) 210 { 211 nvmet_disc_subsys = 212 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); 213 if (!nvmet_disc_subsys) 214 return -ENOMEM; 215 return 0; 216 } 217 218 void nvmet_exit_discovery(void) 219 { 220 nvmet_subsys_put(nvmet_disc_subsys); 221 } 222