1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Discovery service for the NVMe over Fabrics target. 4 * Copyright (C) 2016 Intel Corporation. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/slab.h> 8 #include <generated/utsrelease.h> 9 #include "nvmet.h" 10 11 struct nvmet_subsys *nvmet_disc_subsys; 12 13 static u64 nvmet_genctr; 14 15 static void __nvmet_disc_changed(struct nvmet_port *port, 16 struct nvmet_ctrl *ctrl) 17 { 18 if (ctrl->port != port) 19 return; 20 21 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) 22 return; 23 24 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, 25 NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); 26 } 27 28 void nvmet_port_disc_changed(struct nvmet_port *port, 29 struct nvmet_subsys *subsys) 30 { 31 struct nvmet_ctrl *ctrl; 32 33 lockdep_assert_held(&nvmet_config_sem); 34 nvmet_genctr++; 35 36 mutex_lock(&nvmet_disc_subsys->lock); 37 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { 38 if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn)) 39 continue; 40 41 __nvmet_disc_changed(port, ctrl); 42 } 43 mutex_unlock(&nvmet_disc_subsys->lock); 44 45 /* If transport can signal change, notify transport */ 46 if (port->tr_ops && port->tr_ops->discovery_chg) 47 port->tr_ops->discovery_chg(port); 48 } 49 50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port, 51 struct nvmet_subsys *subsys, 52 struct nvmet_host *host) 53 { 54 struct nvmet_ctrl *ctrl; 55 56 mutex_lock(&nvmet_disc_subsys->lock); 57 list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { 58 if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn)) 59 continue; 60 61 __nvmet_disc_changed(port, ctrl); 62 } 63 mutex_unlock(&nvmet_disc_subsys->lock); 64 } 65 66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, 67 struct nvmet_host *host) 68 { 69 struct nvmet_port *port; 70 struct nvmet_subsys_link *s; 71 72 lockdep_assert_held(&nvmet_config_sem); 73 nvmet_genctr++; 74 75 list_for_each_entry(port, nvmet_ports, global_entry) 76 list_for_each_entry(s, &port->subsystems, entry) { 77 if (s->subsys != subsys) 78 continue; 79 __nvmet_subsys_disc_changed(port, subsys, host); 80 } 81 } 82 83 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port) 84 { 85 down_write(&nvmet_config_sem); 86 if (list_empty(&port->entry)) { 87 list_add_tail(&port->entry, &parent->referrals); 88 port->enabled = true; 89 nvmet_port_disc_changed(parent, NULL); 90 } 91 up_write(&nvmet_config_sem); 92 } 93 94 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port) 95 { 96 down_write(&nvmet_config_sem); 97 if (!list_empty(&port->entry)) { 98 port->enabled = false; 99 list_del_init(&port->entry); 100 nvmet_port_disc_changed(parent, NULL); 101 } 102 up_write(&nvmet_config_sem); 103 } 104 105 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, 106 struct nvmet_port *port, char *subsys_nqn, char *traddr, 107 u8 type, u32 numrec) 108 { 109 struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; 110 111 e->trtype = port->disc_addr.trtype; 112 e->adrfam = port->disc_addr.adrfam; 113 e->treq = port->disc_addr.treq; 114 e->portid = port->disc_addr.portid; 115 /* we support only dynamic controllers */ 116 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); 117 e->asqsz = cpu_to_le16(NVME_AQ_DEPTH); 118 e->subtype = type; 119 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); 120 memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); 121 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); 122 strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); 123 } 124 125 /* 126 * nvmet_set_disc_traddr - set a correct discovery log entry traddr 127 * 128 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses 129 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply 130 * must not contain that "any" IP address. If the transport implements 131 * .disc_traddr, use it. this callback will set the discovery traddr 132 * from the req->port address in case the port in question listens 133 * "any" IP address. 134 */ 135 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port, 136 char *traddr) 137 { 138 if (req->ops->disc_traddr) 139 req->ops->disc_traddr(req, port, traddr); 140 else 141 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 142 } 143 144 static size_t discovery_log_entries(struct nvmet_req *req) 145 { 146 struct nvmet_ctrl *ctrl = req->sq->ctrl; 147 struct nvmet_subsys_link *p; 148 struct nvmet_port *r; 149 size_t entries = 1; 150 151 list_for_each_entry(p, &req->port->subsystems, entry) { 152 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 153 continue; 154 entries++; 155 } 156 list_for_each_entry(r, &req->port->referrals, entry) 157 entries++; 158 return entries; 159 } 160 161 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) 162 { 163 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); 164 struct nvmet_ctrl *ctrl = req->sq->ctrl; 165 struct nvmf_disc_rsp_page_hdr *hdr; 166 u64 offset = nvmet_get_log_page_offset(req->cmd); 167 size_t data_len = nvmet_get_log_page_len(req->cmd); 168 size_t alloc_len; 169 struct nvmet_subsys_link *p; 170 struct nvmet_port *r; 171 u32 numrec = 0; 172 u16 status = 0; 173 void *buffer; 174 char traddr[NVMF_TRADDR_SIZE]; 175 176 if (!nvmet_check_transfer_len(req, data_len)) 177 return; 178 179 if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { 180 req->error_loc = 181 offsetof(struct nvme_get_log_page_command, lid); 182 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 183 goto out; 184 } 185 186 /* Spec requires dword aligned offsets */ 187 if (offset & 0x3) { 188 req->error_loc = 189 offsetof(struct nvme_get_log_page_command, lpo); 190 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 191 goto out; 192 } 193 194 /* 195 * Make sure we're passing at least a buffer of response header size. 196 * If host provided data len is less than the header size, only the 197 * number of bytes requested by host will be sent to host. 198 */ 199 down_read(&nvmet_config_sem); 200 alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); 201 buffer = kzalloc(alloc_len, GFP_KERNEL); 202 if (!buffer) { 203 up_read(&nvmet_config_sem); 204 status = NVME_SC_INTERNAL; 205 goto out; 206 } 207 hdr = buffer; 208 209 nvmet_set_disc_traddr(req, req->port, traddr); 210 211 nvmet_format_discovery_entry(hdr, req->port, 212 nvmet_disc_subsys->subsysnqn, 213 traddr, NVME_NQN_CURR, numrec); 214 numrec++; 215 216 list_for_each_entry(p, &req->port->subsystems, entry) { 217 if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) 218 continue; 219 220 nvmet_format_discovery_entry(hdr, req->port, 221 p->subsys->subsysnqn, traddr, 222 NVME_NQN_NVME, numrec); 223 numrec++; 224 } 225 226 list_for_each_entry(r, &req->port->referrals, entry) { 227 nvmet_format_discovery_entry(hdr, r, 228 NVME_DISC_SUBSYS_NAME, 229 r->disc_addr.traddr, 230 NVME_NQN_DISC, numrec); 231 numrec++; 232 } 233 234 hdr->genctr = cpu_to_le64(nvmet_genctr); 235 hdr->numrec = cpu_to_le64(numrec); 236 hdr->recfmt = cpu_to_le16(0); 237 238 nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE); 239 240 up_read(&nvmet_config_sem); 241 242 status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); 243 kfree(buffer); 244 out: 245 nvmet_req_complete(req, status); 246 } 247 248 static void nvmet_execute_disc_identify(struct nvmet_req *req) 249 { 250 struct nvmet_ctrl *ctrl = req->sq->ctrl; 251 struct nvme_id_ctrl *id; 252 u16 status = 0; 253 254 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) 255 return; 256 257 if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { 258 req->error_loc = offsetof(struct nvme_identify, cns); 259 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 260 goto out; 261 } 262 263 id = kzalloc(sizeof(*id), GFP_KERNEL); 264 if (!id) { 265 status = NVME_SC_INTERNAL; 266 goto out; 267 } 268 269 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); 270 memset(id->fr, ' ', sizeof(id->fr)); 271 memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number, 272 strlen(ctrl->subsys->model_number), ' '); 273 memcpy_and_pad(id->fr, sizeof(id->fr), 274 UTS_RELEASE, strlen(UTS_RELEASE), ' '); 275 276 id->cntrltype = NVME_CTRL_DISC; 277 278 /* no limit on data transfer sizes for now */ 279 id->mdts = 0; 280 id->cntlid = cpu_to_le16(ctrl->cntlid); 281 id->ver = cpu_to_le32(ctrl->subsys->ver); 282 id->lpa = (1 << 2); 283 284 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */ 285 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); 286 287 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 288 if (ctrl->ops->flags & NVMF_KEYED_SGLS) 289 id->sgls |= cpu_to_le32(1 << 2); 290 if (req->port->inline_data_size) 291 id->sgls |= cpu_to_le32(1 << 20); 292 293 id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL); 294 295 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); 296 297 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 298 299 kfree(id); 300 out: 301 nvmet_req_complete(req, status); 302 } 303 304 static void nvmet_execute_disc_set_features(struct nvmet_req *req) 305 { 306 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 307 u16 stat; 308 309 if (!nvmet_check_transfer_len(req, 0)) 310 return; 311 312 switch (cdw10 & 0xff) { 313 case NVME_FEAT_KATO: 314 stat = nvmet_set_feat_kato(req); 315 break; 316 case NVME_FEAT_ASYNC_EVENT: 317 stat = nvmet_set_feat_async_event(req, 318 NVMET_DISC_AEN_CFG_OPTIONAL); 319 break; 320 default: 321 req->error_loc = 322 offsetof(struct nvme_common_command, cdw10); 323 stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 324 break; 325 } 326 327 nvmet_req_complete(req, stat); 328 } 329 330 static void nvmet_execute_disc_get_features(struct nvmet_req *req) 331 { 332 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); 333 u16 stat = 0; 334 335 if (!nvmet_check_transfer_len(req, 0)) 336 return; 337 338 switch (cdw10 & 0xff) { 339 case NVME_FEAT_KATO: 340 nvmet_get_feat_kato(req); 341 break; 342 case NVME_FEAT_ASYNC_EVENT: 343 nvmet_get_feat_async_event(req); 344 break; 345 default: 346 req->error_loc = 347 offsetof(struct nvme_common_command, cdw10); 348 stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 349 break; 350 } 351 352 nvmet_req_complete(req, stat); 353 } 354 355 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) 356 { 357 struct nvme_command *cmd = req->cmd; 358 359 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { 360 pr_err("got cmd %d while not ready\n", 361 cmd->common.opcode); 362 req->error_loc = 363 offsetof(struct nvme_common_command, opcode); 364 return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; 365 } 366 367 switch (cmd->common.opcode) { 368 case nvme_admin_set_features: 369 req->execute = nvmet_execute_disc_set_features; 370 return 0; 371 case nvme_admin_get_features: 372 req->execute = nvmet_execute_disc_get_features; 373 return 0; 374 case nvme_admin_async_event: 375 req->execute = nvmet_execute_async_event; 376 return 0; 377 case nvme_admin_keep_alive: 378 req->execute = nvmet_execute_keep_alive; 379 return 0; 380 case nvme_admin_get_log_page: 381 req->execute = nvmet_execute_disc_get_log_page; 382 return 0; 383 case nvme_admin_identify: 384 req->execute = nvmet_execute_disc_identify; 385 return 0; 386 default: 387 pr_debug("unhandled cmd %d\n", cmd->common.opcode); 388 req->error_loc = offsetof(struct nvme_common_command, opcode); 389 return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; 390 } 391 392 } 393 394 int __init nvmet_init_discovery(void) 395 { 396 nvmet_disc_subsys = 397 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR); 398 return PTR_ERR_OR_ZERO(nvmet_disc_subsys); 399 } 400 401 void nvmet_exit_discovery(void) 402 { 403 nvmet_subsys_put(nvmet_disc_subsys); 404 } 405