1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Configfs interface for the NVMe target. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/kstrtox.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/stat.h> 12 #include <linux/ctype.h> 13 #include <linux/pci.h> 14 #include <linux/pci-p2pdma.h> 15 #ifdef CONFIG_NVME_TARGET_AUTH 16 #include <linux/nvme-auth.h> 17 #endif 18 #include <linux/nvme-keyring.h> 19 #include <crypto/hash.h> 20 #include <crypto/kpp.h> 21 #include <linux/nospec.h> 22 23 #include "nvmet.h" 24 25 static const struct config_item_type nvmet_host_type; 26 static const struct config_item_type nvmet_subsys_type; 27 28 static LIST_HEAD(nvmet_ports_list); 29 struct list_head *nvmet_ports = &nvmet_ports_list; 30 31 struct nvmet_type_name_map { 32 u8 type; 33 const char *name; 34 }; 35 36 static struct nvmet_type_name_map nvmet_transport[] = { 37 { NVMF_TRTYPE_RDMA, "rdma" }, 38 { NVMF_TRTYPE_FC, "fc" }, 39 { NVMF_TRTYPE_TCP, "tcp" }, 40 { NVMF_TRTYPE_LOOP, "loop" }, 41 }; 42 43 static const struct nvmet_type_name_map nvmet_addr_family[] = { 44 { NVMF_ADDR_FAMILY_PCI, "pcie" }, 45 { NVMF_ADDR_FAMILY_IP4, "ipv4" }, 46 { NVMF_ADDR_FAMILY_IP6, "ipv6" }, 47 { NVMF_ADDR_FAMILY_IB, "ib" }, 48 { NVMF_ADDR_FAMILY_FC, "fc" }, 49 { NVMF_ADDR_FAMILY_LOOP, "loop" }, 50 }; 51 52 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller) 53 { 54 if (p->enabled) 55 pr_err("Disable port '%u' before changing attribute in %s\n", 56 le16_to_cpu(p->disc_addr.portid), caller); 57 return p->enabled; 58 } 59 60 /* 61 * nvmet_port Generic ConfigFS definitions. 62 * Used in any place in the ConfigFS tree that refers to an address. 63 */ 64 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page) 65 { 66 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam; 67 int i; 68 69 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 70 if (nvmet_addr_family[i].type == adrfam) 71 return snprintf(page, PAGE_SIZE, "%s\n", 72 nvmet_addr_family[i].name); 73 } 74 75 return snprintf(page, PAGE_SIZE, "\n"); 76 } 77 78 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 79 const char *page, size_t count) 80 { 81 struct nvmet_port *port = to_nvmet_port(item); 82 int i; 83 84 if (nvmet_is_port_enabled(port, __func__)) 85 return -EACCES; 86 87 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 88 if (sysfs_streq(page, nvmet_addr_family[i].name)) 89 goto found; 90 } 91 92 pr_err("Invalid value '%s' for adrfam\n", page); 93 return -EINVAL; 94 95 found: 96 port->disc_addr.adrfam = nvmet_addr_family[i].type; 97 return count; 98 } 99 100 CONFIGFS_ATTR(nvmet_, addr_adrfam); 101 102 static ssize_t nvmet_addr_portid_show(struct config_item *item, 103 char *page) 104 { 105 __le16 portid = to_nvmet_port(item)->disc_addr.portid; 106 107 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid)); 108 } 109 110 static ssize_t nvmet_addr_portid_store(struct config_item *item, 111 const char *page, size_t count) 112 { 113 struct nvmet_port *port = to_nvmet_port(item); 114 u16 portid = 0; 115 116 if (kstrtou16(page, 0, &portid)) { 117 pr_err("Invalid value '%s' for portid\n", page); 118 return -EINVAL; 119 } 120 121 if (nvmet_is_port_enabled(port, __func__)) 122 return -EACCES; 123 124 port->disc_addr.portid = cpu_to_le16(portid); 125 return count; 126 } 127 128 CONFIGFS_ATTR(nvmet_, addr_portid); 129 130 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 131 char *page) 132 { 133 struct nvmet_port *port = to_nvmet_port(item); 134 135 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr); 136 } 137 138 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 139 const char *page, size_t count) 140 { 141 struct nvmet_port *port = to_nvmet_port(item); 142 143 if (count > NVMF_TRADDR_SIZE) { 144 pr_err("Invalid value '%s' for traddr\n", page); 145 return -EINVAL; 146 } 147 148 if (nvmet_is_port_enabled(port, __func__)) 149 return -EACCES; 150 151 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 152 return -EINVAL; 153 return count; 154 } 155 156 CONFIGFS_ATTR(nvmet_, addr_traddr); 157 158 static const struct nvmet_type_name_map nvmet_addr_treq[] = { 159 { NVMF_TREQ_NOT_SPECIFIED, "not specified" }, 160 { NVMF_TREQ_REQUIRED, "required" }, 161 { NVMF_TREQ_NOT_REQUIRED, "not required" }, 162 }; 163 164 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port) 165 { 166 return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK); 167 } 168 169 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) 170 { 171 u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item)); 172 int i; 173 174 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 175 if (treq == nvmet_addr_treq[i].type) 176 return snprintf(page, PAGE_SIZE, "%s\n", 177 nvmet_addr_treq[i].name); 178 } 179 180 return snprintf(page, PAGE_SIZE, "\n"); 181 } 182 183 static ssize_t nvmet_addr_treq_store(struct config_item *item, 184 const char *page, size_t count) 185 { 186 struct nvmet_port *port = to_nvmet_port(item); 187 u8 treq = nvmet_port_disc_addr_treq_mask(port); 188 int i; 189 190 if (nvmet_is_port_enabled(port, __func__)) 191 return -EACCES; 192 193 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 194 if (sysfs_streq(page, nvmet_addr_treq[i].name)) 195 goto found; 196 } 197 198 pr_err("Invalid value '%s' for treq\n", page); 199 return -EINVAL; 200 201 found: 202 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP && 203 port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) { 204 switch (nvmet_addr_treq[i].type) { 205 case NVMF_TREQ_NOT_SPECIFIED: 206 pr_debug("treq '%s' not allowed for TLS1.3\n", 207 nvmet_addr_treq[i].name); 208 return -EINVAL; 209 case NVMF_TREQ_NOT_REQUIRED: 210 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n"); 211 break; 212 default: 213 break; 214 } 215 } 216 treq |= nvmet_addr_treq[i].type; 217 port->disc_addr.treq = treq; 218 return count; 219 } 220 221 CONFIGFS_ATTR(nvmet_, addr_treq); 222 223 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 224 char *page) 225 { 226 struct nvmet_port *port = to_nvmet_port(item); 227 228 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid); 229 } 230 231 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 232 const char *page, size_t count) 233 { 234 struct nvmet_port *port = to_nvmet_port(item); 235 236 if (count > NVMF_TRSVCID_SIZE) { 237 pr_err("Invalid value '%s' for trsvcid\n", page); 238 return -EINVAL; 239 } 240 if (nvmet_is_port_enabled(port, __func__)) 241 return -EACCES; 242 243 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 244 return -EINVAL; 245 return count; 246 } 247 248 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 249 250 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 251 char *page) 252 { 253 struct nvmet_port *port = to_nvmet_port(item); 254 255 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 256 } 257 258 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 259 const char *page, size_t count) 260 { 261 struct nvmet_port *port = to_nvmet_port(item); 262 int ret; 263 264 if (nvmet_is_port_enabled(port, __func__)) 265 return -EACCES; 266 ret = kstrtoint(page, 0, &port->inline_data_size); 267 if (ret) { 268 pr_err("Invalid value '%s' for inline_data_size\n", page); 269 return -EINVAL; 270 } 271 return count; 272 } 273 274 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 275 276 static ssize_t nvmet_param_max_queue_size_show(struct config_item *item, 277 char *page) 278 { 279 struct nvmet_port *port = to_nvmet_port(item); 280 281 return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size); 282 } 283 284 static ssize_t nvmet_param_max_queue_size_store(struct config_item *item, 285 const char *page, size_t count) 286 { 287 struct nvmet_port *port = to_nvmet_port(item); 288 int ret; 289 290 if (nvmet_is_port_enabled(port, __func__)) 291 return -EACCES; 292 ret = kstrtoint(page, 0, &port->max_queue_size); 293 if (ret) { 294 pr_err("Invalid value '%s' for max_queue_size\n", page); 295 return -EINVAL; 296 } 297 return count; 298 } 299 300 CONFIGFS_ATTR(nvmet_, param_max_queue_size); 301 302 #ifdef CONFIG_BLK_DEV_INTEGRITY 303 static ssize_t nvmet_param_pi_enable_show(struct config_item *item, 304 char *page) 305 { 306 struct nvmet_port *port = to_nvmet_port(item); 307 308 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable); 309 } 310 311 static ssize_t nvmet_param_pi_enable_store(struct config_item *item, 312 const char *page, size_t count) 313 { 314 struct nvmet_port *port = to_nvmet_port(item); 315 bool val; 316 317 if (kstrtobool(page, &val)) 318 return -EINVAL; 319 320 if (nvmet_is_port_enabled(port, __func__)) 321 return -EACCES; 322 323 port->pi_enable = val; 324 return count; 325 } 326 327 CONFIGFS_ATTR(nvmet_, param_pi_enable); 328 #endif 329 330 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 331 char *page) 332 { 333 struct nvmet_port *port = to_nvmet_port(item); 334 int i; 335 336 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 337 if (port->disc_addr.trtype == nvmet_transport[i].type) 338 return snprintf(page, PAGE_SIZE, 339 "%s\n", nvmet_transport[i].name); 340 } 341 342 return sprintf(page, "\n"); 343 } 344 345 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 346 { 347 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 348 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 349 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 350 } 351 352 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype) 353 { 354 port->disc_addr.tsas.tcp.sectype = sectype; 355 } 356 357 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 358 const char *page, size_t count) 359 { 360 struct nvmet_port *port = to_nvmet_port(item); 361 int i; 362 363 if (nvmet_is_port_enabled(port, __func__)) 364 return -EACCES; 365 366 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 367 if (sysfs_streq(page, nvmet_transport[i].name)) 368 goto found; 369 } 370 371 pr_err("Invalid value '%s' for trtype\n", page); 372 return -EINVAL; 373 374 found: 375 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 376 port->disc_addr.trtype = nvmet_transport[i].type; 377 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 378 nvmet_port_init_tsas_rdma(port); 379 else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) 380 nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE); 381 return count; 382 } 383 384 CONFIGFS_ATTR(nvmet_, addr_trtype); 385 386 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = { 387 { NVMF_TCP_SECTYPE_NONE, "none" }, 388 { NVMF_TCP_SECTYPE_TLS13, "tls1.3" }, 389 }; 390 391 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = { 392 { NVMF_RDMA_QPTYPE_CONNECTED, "connected" }, 393 { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" }, 394 }; 395 396 static ssize_t nvmet_addr_tsas_show(struct config_item *item, 397 char *page) 398 { 399 struct nvmet_port *port = to_nvmet_port(item); 400 int i; 401 402 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { 403 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { 404 if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type) 405 return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name); 406 } 407 } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { 408 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { 409 if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type) 410 return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name); 411 } 412 } 413 return sprintf(page, "\n"); 414 } 415 416 static u8 nvmet_addr_tsas_rdma_store(const char *page) 417 { 418 int i; 419 420 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { 421 if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name)) 422 return nvmet_addr_tsas_rdma[i].type; 423 } 424 return NVMF_RDMA_QPTYPE_INVALID; 425 } 426 427 static u8 nvmet_addr_tsas_tcp_store(const char *page) 428 { 429 int i; 430 431 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { 432 if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) 433 return nvmet_addr_tsas_tcp[i].type; 434 } 435 return NVMF_TCP_SECTYPE_INVALID; 436 } 437 438 static ssize_t nvmet_addr_tsas_store(struct config_item *item, 439 const char *page, size_t count) 440 { 441 struct nvmet_port *port = to_nvmet_port(item); 442 u8 treq = nvmet_port_disc_addr_treq_mask(port); 443 u8 sectype, qptype; 444 445 if (nvmet_is_port_enabled(port, __func__)) 446 return -EACCES; 447 448 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { 449 qptype = nvmet_addr_tsas_rdma_store(page); 450 if (qptype == port->disc_addr.tsas.rdma.qptype) 451 return count; 452 } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { 453 sectype = nvmet_addr_tsas_tcp_store(page); 454 if (sectype != NVMF_TCP_SECTYPE_INVALID) 455 goto found; 456 } 457 458 pr_err("Invalid value '%s' for tsas\n", page); 459 return -EINVAL; 460 461 found: 462 if (sectype == NVMF_TCP_SECTYPE_TLS13) { 463 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) { 464 pr_err("TLS is not supported\n"); 465 return -EINVAL; 466 } 467 if (!port->keyring) { 468 pr_err("TLS keyring not configured\n"); 469 return -EINVAL; 470 } 471 } 472 473 nvmet_port_init_tsas_tcp(port, sectype); 474 /* 475 * If TLS is enabled TREQ should be set to 'required' per default 476 */ 477 if (sectype == NVMF_TCP_SECTYPE_TLS13) { 478 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port); 479 480 if (sc == NVMF_TREQ_NOT_SPECIFIED) 481 treq |= NVMF_TREQ_REQUIRED; 482 else 483 treq |= sc; 484 } else { 485 treq |= NVMF_TREQ_NOT_SPECIFIED; 486 } 487 port->disc_addr.treq = treq; 488 return count; 489 } 490 491 CONFIGFS_ATTR(nvmet_, addr_tsas); 492 493 /* 494 * Namespace structures & file operation functions below 495 */ 496 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 497 { 498 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 499 } 500 501 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 502 const char *page, size_t count) 503 { 504 struct nvmet_ns *ns = to_nvmet_ns(item); 505 struct nvmet_subsys *subsys = ns->subsys; 506 size_t len; 507 int ret; 508 509 mutex_lock(&subsys->lock); 510 ret = -EBUSY; 511 if (ns->enabled) 512 goto out_unlock; 513 514 ret = -EINVAL; 515 len = strcspn(page, "\n"); 516 if (!len) 517 goto out_unlock; 518 519 kfree(ns->device_path); 520 ret = -ENOMEM; 521 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL); 522 if (!ns->device_path) 523 goto out_unlock; 524 525 mutex_unlock(&subsys->lock); 526 return count; 527 528 out_unlock: 529 mutex_unlock(&subsys->lock); 530 return ret; 531 } 532 533 CONFIGFS_ATTR(nvmet_ns_, device_path); 534 535 #ifdef CONFIG_PCI_P2PDMA 536 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) 537 { 538 struct nvmet_ns *ns = to_nvmet_ns(item); 539 540 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); 541 } 542 543 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, 544 const char *page, size_t count) 545 { 546 struct nvmet_ns *ns = to_nvmet_ns(item); 547 struct pci_dev *p2p_dev = NULL; 548 bool use_p2pmem; 549 int ret = count; 550 int error; 551 552 mutex_lock(&ns->subsys->lock); 553 if (ns->enabled) { 554 ret = -EBUSY; 555 goto out_unlock; 556 } 557 558 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); 559 if (error) { 560 ret = error; 561 goto out_unlock; 562 } 563 564 ns->use_p2pmem = use_p2pmem; 565 pci_dev_put(ns->p2p_dev); 566 ns->p2p_dev = p2p_dev; 567 568 out_unlock: 569 mutex_unlock(&ns->subsys->lock); 570 571 return ret; 572 } 573 574 CONFIGFS_ATTR(nvmet_ns_, p2pmem); 575 #endif /* CONFIG_PCI_P2PDMA */ 576 577 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 578 { 579 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 580 } 581 582 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 583 const char *page, size_t count) 584 { 585 struct nvmet_ns *ns = to_nvmet_ns(item); 586 struct nvmet_subsys *subsys = ns->subsys; 587 int ret = 0; 588 589 mutex_lock(&subsys->lock); 590 if (ns->enabled) { 591 ret = -EBUSY; 592 goto out_unlock; 593 } 594 595 if (uuid_parse(page, &ns->uuid)) 596 ret = -EINVAL; 597 598 out_unlock: 599 mutex_unlock(&subsys->lock); 600 return ret ? ret : count; 601 } 602 603 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 604 605 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 606 { 607 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 608 } 609 610 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 611 const char *page, size_t count) 612 { 613 struct nvmet_ns *ns = to_nvmet_ns(item); 614 struct nvmet_subsys *subsys = ns->subsys; 615 u8 nguid[16]; 616 const char *p = page; 617 int i; 618 int ret = 0; 619 620 mutex_lock(&subsys->lock); 621 if (ns->enabled) { 622 ret = -EBUSY; 623 goto out_unlock; 624 } 625 626 for (i = 0; i < 16; i++) { 627 if (p + 2 > page + count) { 628 ret = -EINVAL; 629 goto out_unlock; 630 } 631 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 632 ret = -EINVAL; 633 goto out_unlock; 634 } 635 636 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 637 p += 2; 638 639 if (*p == '-' || *p == ':') 640 p++; 641 } 642 643 memcpy(&ns->nguid, nguid, sizeof(nguid)); 644 out_unlock: 645 mutex_unlock(&subsys->lock); 646 return ret ? ret : count; 647 } 648 649 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 650 651 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 652 { 653 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 654 } 655 656 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 657 const char *page, size_t count) 658 { 659 struct nvmet_ns *ns = to_nvmet_ns(item); 660 u32 oldgrpid, newgrpid; 661 int ret; 662 663 ret = kstrtou32(page, 0, &newgrpid); 664 if (ret) 665 return ret; 666 667 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 668 return -EINVAL; 669 670 down_write(&nvmet_ana_sem); 671 oldgrpid = ns->anagrpid; 672 newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS); 673 nvmet_ana_group_enabled[newgrpid]++; 674 ns->anagrpid = newgrpid; 675 nvmet_ana_group_enabled[oldgrpid]--; 676 nvmet_ana_chgcnt++; 677 up_write(&nvmet_ana_sem); 678 679 nvmet_send_ana_event(ns->subsys, NULL); 680 return count; 681 } 682 683 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 684 685 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 686 { 687 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 688 } 689 690 static ssize_t nvmet_ns_enable_store(struct config_item *item, 691 const char *page, size_t count) 692 { 693 struct nvmet_ns *ns = to_nvmet_ns(item); 694 bool enable; 695 int ret = 0; 696 697 if (kstrtobool(page, &enable)) 698 return -EINVAL; 699 700 /* 701 * take a global nvmet_config_sem because the disable routine has a 702 * window where it releases the subsys-lock, giving a chance to 703 * a parallel enable to concurrently execute causing the disable to 704 * have a misaccounting of the ns percpu_ref. 705 */ 706 down_write(&nvmet_config_sem); 707 if (enable) 708 ret = nvmet_ns_enable(ns); 709 else 710 nvmet_ns_disable(ns); 711 up_write(&nvmet_config_sem); 712 713 return ret ? ret : count; 714 } 715 716 CONFIGFS_ATTR(nvmet_ns_, enable); 717 718 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 719 { 720 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 721 } 722 723 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 724 const char *page, size_t count) 725 { 726 struct nvmet_ns *ns = to_nvmet_ns(item); 727 bool val; 728 729 if (kstrtobool(page, &val)) 730 return -EINVAL; 731 732 mutex_lock(&ns->subsys->lock); 733 if (ns->enabled) { 734 pr_err("disable ns before setting buffered_io value.\n"); 735 mutex_unlock(&ns->subsys->lock); 736 return -EINVAL; 737 } 738 739 ns->buffered_io = val; 740 mutex_unlock(&ns->subsys->lock); 741 return count; 742 } 743 744 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 745 746 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, 747 const char *page, size_t count) 748 { 749 struct nvmet_ns *ns = to_nvmet_ns(item); 750 bool val; 751 752 if (kstrtobool(page, &val)) 753 return -EINVAL; 754 755 if (!val) 756 return -EINVAL; 757 758 mutex_lock(&ns->subsys->lock); 759 if (!ns->enabled) { 760 pr_err("enable ns before revalidate.\n"); 761 mutex_unlock(&ns->subsys->lock); 762 return -EINVAL; 763 } 764 if (nvmet_ns_revalidate(ns)) 765 nvmet_ns_changed(ns->subsys, ns->nsid); 766 mutex_unlock(&ns->subsys->lock); 767 return count; 768 } 769 770 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); 771 772 static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page) 773 { 774 return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable); 775 } 776 777 static ssize_t nvmet_ns_resv_enable_store(struct config_item *item, 778 const char *page, size_t count) 779 { 780 struct nvmet_ns *ns = to_nvmet_ns(item); 781 bool val; 782 783 if (kstrtobool(page, &val)) 784 return -EINVAL; 785 786 mutex_lock(&ns->subsys->lock); 787 if (ns->enabled) { 788 pr_err("the ns:%d is already enabled.\n", ns->nsid); 789 mutex_unlock(&ns->subsys->lock); 790 return -EINVAL; 791 } 792 ns->pr.enable = val; 793 mutex_unlock(&ns->subsys->lock); 794 return count; 795 } 796 CONFIGFS_ATTR(nvmet_ns_, resv_enable); 797 798 static struct configfs_attribute *nvmet_ns_attrs[] = { 799 &nvmet_ns_attr_device_path, 800 &nvmet_ns_attr_device_nguid, 801 &nvmet_ns_attr_device_uuid, 802 &nvmet_ns_attr_ana_grpid, 803 &nvmet_ns_attr_enable, 804 &nvmet_ns_attr_buffered_io, 805 &nvmet_ns_attr_revalidate_size, 806 &nvmet_ns_attr_resv_enable, 807 #ifdef CONFIG_PCI_P2PDMA 808 &nvmet_ns_attr_p2pmem, 809 #endif 810 NULL, 811 }; 812 813 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid) 814 { 815 struct config_item *ns_item; 816 char name[12]; 817 818 snprintf(name, sizeof(name), "%u", nsid); 819 mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex); 820 ns_item = config_group_find_item(&subsys->namespaces_group, name); 821 mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex); 822 return ns_item != NULL; 823 } 824 825 static void nvmet_ns_release(struct config_item *item) 826 { 827 struct nvmet_ns *ns = to_nvmet_ns(item); 828 829 nvmet_ns_free(ns); 830 } 831 832 static struct configfs_item_operations nvmet_ns_item_ops = { 833 .release = nvmet_ns_release, 834 }; 835 836 static const struct config_item_type nvmet_ns_type = { 837 .ct_item_ops = &nvmet_ns_item_ops, 838 .ct_attrs = nvmet_ns_attrs, 839 .ct_owner = THIS_MODULE, 840 }; 841 842 static struct config_group *nvmet_ns_make(struct config_group *group, 843 const char *name) 844 { 845 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 846 struct nvmet_ns *ns; 847 int ret; 848 u32 nsid; 849 850 ret = kstrtou32(name, 0, &nsid); 851 if (ret) 852 goto out; 853 854 ret = -EINVAL; 855 if (nsid == 0 || nsid == NVME_NSID_ALL) { 856 pr_err("invalid nsid %#x", nsid); 857 goto out; 858 } 859 860 ret = -ENOMEM; 861 ns = nvmet_ns_alloc(subsys, nsid); 862 if (!ns) 863 goto out; 864 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 865 866 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 867 868 return &ns->group; 869 out: 870 return ERR_PTR(ret); 871 } 872 873 static struct configfs_group_operations nvmet_namespaces_group_ops = { 874 .make_group = nvmet_ns_make, 875 }; 876 877 static const struct config_item_type nvmet_namespaces_type = { 878 .ct_group_ops = &nvmet_namespaces_group_ops, 879 .ct_owner = THIS_MODULE, 880 }; 881 882 #ifdef CONFIG_NVME_TARGET_PASSTHRU 883 884 static ssize_t nvmet_passthru_device_path_show(struct config_item *item, 885 char *page) 886 { 887 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 888 889 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path); 890 } 891 892 static ssize_t nvmet_passthru_device_path_store(struct config_item *item, 893 const char *page, size_t count) 894 { 895 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 896 size_t len; 897 int ret; 898 899 mutex_lock(&subsys->lock); 900 901 ret = -EBUSY; 902 if (subsys->passthru_ctrl) 903 goto out_unlock; 904 905 ret = -EINVAL; 906 len = strcspn(page, "\n"); 907 if (!len) 908 goto out_unlock; 909 910 kfree(subsys->passthru_ctrl_path); 911 ret = -ENOMEM; 912 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL); 913 if (!subsys->passthru_ctrl_path) 914 goto out_unlock; 915 916 mutex_unlock(&subsys->lock); 917 918 return count; 919 out_unlock: 920 mutex_unlock(&subsys->lock); 921 return ret; 922 } 923 CONFIGFS_ATTR(nvmet_passthru_, device_path); 924 925 static ssize_t nvmet_passthru_enable_show(struct config_item *item, 926 char *page) 927 { 928 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 929 930 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0); 931 } 932 933 static ssize_t nvmet_passthru_enable_store(struct config_item *item, 934 const char *page, size_t count) 935 { 936 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 937 bool enable; 938 int ret = 0; 939 940 if (kstrtobool(page, &enable)) 941 return -EINVAL; 942 943 if (enable) 944 ret = nvmet_passthru_ctrl_enable(subsys); 945 else 946 nvmet_passthru_ctrl_disable(subsys); 947 948 return ret ? ret : count; 949 } 950 CONFIGFS_ATTR(nvmet_passthru_, enable); 951 952 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item, 953 char *page) 954 { 955 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout); 956 } 957 958 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item, 959 const char *page, size_t count) 960 { 961 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 962 unsigned int timeout; 963 964 if (kstrtouint(page, 0, &timeout)) 965 return -EINVAL; 966 subsys->admin_timeout = timeout; 967 return count; 968 } 969 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout); 970 971 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item, 972 char *page) 973 { 974 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout); 975 } 976 977 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, 978 const char *page, size_t count) 979 { 980 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 981 unsigned int timeout; 982 983 if (kstrtouint(page, 0, &timeout)) 984 return -EINVAL; 985 subsys->io_timeout = timeout; 986 return count; 987 } 988 CONFIGFS_ATTR(nvmet_passthru_, io_timeout); 989 990 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item, 991 char *page) 992 { 993 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids); 994 } 995 996 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item, 997 const char *page, size_t count) 998 { 999 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 1000 unsigned int clear_ids; 1001 1002 if (kstrtouint(page, 0, &clear_ids)) 1003 return -EINVAL; 1004 subsys->clear_ids = clear_ids; 1005 return count; 1006 } 1007 CONFIGFS_ATTR(nvmet_passthru_, clear_ids); 1008 1009 static struct configfs_attribute *nvmet_passthru_attrs[] = { 1010 &nvmet_passthru_attr_device_path, 1011 &nvmet_passthru_attr_enable, 1012 &nvmet_passthru_attr_admin_timeout, 1013 &nvmet_passthru_attr_io_timeout, 1014 &nvmet_passthru_attr_clear_ids, 1015 NULL, 1016 }; 1017 1018 static const struct config_item_type nvmet_passthru_type = { 1019 .ct_attrs = nvmet_passthru_attrs, 1020 .ct_owner = THIS_MODULE, 1021 }; 1022 1023 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 1024 { 1025 config_group_init_type_name(&subsys->passthru_group, 1026 "passthru", &nvmet_passthru_type); 1027 configfs_add_default_group(&subsys->passthru_group, 1028 &subsys->group); 1029 } 1030 1031 #else /* CONFIG_NVME_TARGET_PASSTHRU */ 1032 1033 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 1034 { 1035 } 1036 1037 #endif /* CONFIG_NVME_TARGET_PASSTHRU */ 1038 1039 static int nvmet_port_subsys_allow_link(struct config_item *parent, 1040 struct config_item *target) 1041 { 1042 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 1043 struct nvmet_subsys *subsys; 1044 struct nvmet_subsys_link *link, *p; 1045 int ret; 1046 1047 if (target->ci_type != &nvmet_subsys_type) { 1048 pr_err("can only link subsystems into the subsystems dir.!\n"); 1049 return -EINVAL; 1050 } 1051 subsys = to_subsys(target); 1052 link = kmalloc(sizeof(*link), GFP_KERNEL); 1053 if (!link) 1054 return -ENOMEM; 1055 link->subsys = subsys; 1056 1057 down_write(&nvmet_config_sem); 1058 ret = -EEXIST; 1059 list_for_each_entry(p, &port->subsystems, entry) { 1060 if (p->subsys == subsys) 1061 goto out_free_link; 1062 } 1063 1064 if (list_empty(&port->subsystems)) { 1065 ret = nvmet_enable_port(port); 1066 if (ret) 1067 goto out_free_link; 1068 } 1069 1070 list_add_tail(&link->entry, &port->subsystems); 1071 nvmet_port_disc_changed(port, subsys); 1072 1073 up_write(&nvmet_config_sem); 1074 return 0; 1075 1076 out_free_link: 1077 up_write(&nvmet_config_sem); 1078 kfree(link); 1079 return ret; 1080 } 1081 1082 static void nvmet_port_subsys_drop_link(struct config_item *parent, 1083 struct config_item *target) 1084 { 1085 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 1086 struct nvmet_subsys *subsys = to_subsys(target); 1087 struct nvmet_subsys_link *p; 1088 1089 down_write(&nvmet_config_sem); 1090 list_for_each_entry(p, &port->subsystems, entry) { 1091 if (p->subsys == subsys) 1092 goto found; 1093 } 1094 up_write(&nvmet_config_sem); 1095 return; 1096 1097 found: 1098 list_del(&p->entry); 1099 nvmet_port_del_ctrls(port, subsys); 1100 nvmet_port_disc_changed(port, subsys); 1101 1102 if (list_empty(&port->subsystems)) 1103 nvmet_disable_port(port); 1104 up_write(&nvmet_config_sem); 1105 kfree(p); 1106 } 1107 1108 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 1109 .allow_link = nvmet_port_subsys_allow_link, 1110 .drop_link = nvmet_port_subsys_drop_link, 1111 }; 1112 1113 static const struct config_item_type nvmet_port_subsys_type = { 1114 .ct_item_ops = &nvmet_port_subsys_item_ops, 1115 .ct_owner = THIS_MODULE, 1116 }; 1117 1118 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 1119 struct config_item *target) 1120 { 1121 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 1122 struct nvmet_host *host; 1123 struct nvmet_host_link *link, *p; 1124 int ret; 1125 1126 if (target->ci_type != &nvmet_host_type) { 1127 pr_err("can only link hosts into the allowed_hosts directory!\n"); 1128 return -EINVAL; 1129 } 1130 1131 host = to_host(target); 1132 link = kmalloc(sizeof(*link), GFP_KERNEL); 1133 if (!link) 1134 return -ENOMEM; 1135 link->host = host; 1136 1137 down_write(&nvmet_config_sem); 1138 ret = -EINVAL; 1139 if (subsys->allow_any_host) { 1140 pr_err("can't add hosts when allow_any_host is set!\n"); 1141 goto out_free_link; 1142 } 1143 1144 ret = -EEXIST; 1145 list_for_each_entry(p, &subsys->hosts, entry) { 1146 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 1147 goto out_free_link; 1148 } 1149 list_add_tail(&link->entry, &subsys->hosts); 1150 nvmet_subsys_disc_changed(subsys, host); 1151 1152 up_write(&nvmet_config_sem); 1153 return 0; 1154 out_free_link: 1155 up_write(&nvmet_config_sem); 1156 kfree(link); 1157 return ret; 1158 } 1159 1160 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 1161 struct config_item *target) 1162 { 1163 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 1164 struct nvmet_host *host = to_host(target); 1165 struct nvmet_host_link *p; 1166 1167 down_write(&nvmet_config_sem); 1168 list_for_each_entry(p, &subsys->hosts, entry) { 1169 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 1170 goto found; 1171 } 1172 up_write(&nvmet_config_sem); 1173 return; 1174 1175 found: 1176 list_del(&p->entry); 1177 nvmet_subsys_disc_changed(subsys, host); 1178 1179 up_write(&nvmet_config_sem); 1180 kfree(p); 1181 } 1182 1183 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 1184 .allow_link = nvmet_allowed_hosts_allow_link, 1185 .drop_link = nvmet_allowed_hosts_drop_link, 1186 }; 1187 1188 static const struct config_item_type nvmet_allowed_hosts_type = { 1189 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 1190 .ct_owner = THIS_MODULE, 1191 }; 1192 1193 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 1194 char *page) 1195 { 1196 return snprintf(page, PAGE_SIZE, "%d\n", 1197 to_subsys(item)->allow_any_host); 1198 } 1199 1200 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 1201 const char *page, size_t count) 1202 { 1203 struct nvmet_subsys *subsys = to_subsys(item); 1204 bool allow_any_host; 1205 int ret = 0; 1206 1207 if (kstrtobool(page, &allow_any_host)) 1208 return -EINVAL; 1209 1210 down_write(&nvmet_config_sem); 1211 if (allow_any_host && !list_empty(&subsys->hosts)) { 1212 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 1213 ret = -EINVAL; 1214 goto out_unlock; 1215 } 1216 1217 if (subsys->allow_any_host != allow_any_host) { 1218 subsys->allow_any_host = allow_any_host; 1219 nvmet_subsys_disc_changed(subsys, NULL); 1220 } 1221 1222 out_unlock: 1223 up_write(&nvmet_config_sem); 1224 return ret ? ret : count; 1225 } 1226 1227 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 1228 1229 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 1230 char *page) 1231 { 1232 struct nvmet_subsys *subsys = to_subsys(item); 1233 1234 if (NVME_TERTIARY(subsys->ver)) 1235 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n", 1236 NVME_MAJOR(subsys->ver), 1237 NVME_MINOR(subsys->ver), 1238 NVME_TERTIARY(subsys->ver)); 1239 1240 return snprintf(page, PAGE_SIZE, "%llu.%llu\n", 1241 NVME_MAJOR(subsys->ver), 1242 NVME_MINOR(subsys->ver)); 1243 } 1244 1245 static ssize_t 1246 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys, 1247 const char *page, size_t count) 1248 { 1249 int major, minor, tertiary = 0; 1250 int ret; 1251 1252 if (subsys->subsys_discovered) { 1253 if (NVME_TERTIARY(subsys->ver)) 1254 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n", 1255 NVME_MAJOR(subsys->ver), 1256 NVME_MINOR(subsys->ver), 1257 NVME_TERTIARY(subsys->ver)); 1258 else 1259 pr_err("Can't set version number. %llu.%llu is already assigned\n", 1260 NVME_MAJOR(subsys->ver), 1261 NVME_MINOR(subsys->ver)); 1262 return -EINVAL; 1263 } 1264 1265 /* passthru subsystems use the underlying controller's version */ 1266 if (nvmet_is_passthru_subsys(subsys)) 1267 return -EINVAL; 1268 1269 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 1270 if (ret != 2 && ret != 3) 1271 return -EINVAL; 1272 1273 subsys->ver = NVME_VS(major, minor, tertiary); 1274 1275 return count; 1276 } 1277 1278 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 1279 const char *page, size_t count) 1280 { 1281 struct nvmet_subsys *subsys = to_subsys(item); 1282 ssize_t ret; 1283 1284 down_write(&nvmet_config_sem); 1285 mutex_lock(&subsys->lock); 1286 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count); 1287 mutex_unlock(&subsys->lock); 1288 up_write(&nvmet_config_sem); 1289 1290 return ret; 1291 } 1292 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 1293 1294 /* See Section 1.5 of NVMe 1.4 */ 1295 static bool nvmet_is_ascii(const char c) 1296 { 1297 return c >= 0x20 && c <= 0x7e; 1298 } 1299 1300 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 1301 char *page) 1302 { 1303 struct nvmet_subsys *subsys = to_subsys(item); 1304 1305 return snprintf(page, PAGE_SIZE, "%.*s\n", 1306 NVMET_SN_MAX_SIZE, subsys->serial); 1307 } 1308 1309 static ssize_t 1310 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys, 1311 const char *page, size_t count) 1312 { 1313 int pos, len = strcspn(page, "\n"); 1314 1315 if (subsys->subsys_discovered) { 1316 pr_err("Can't set serial number. %s is already assigned\n", 1317 subsys->serial); 1318 return -EINVAL; 1319 } 1320 1321 if (!len || len > NVMET_SN_MAX_SIZE) { 1322 pr_err("Serial Number can not be empty or exceed %d Bytes\n", 1323 NVMET_SN_MAX_SIZE); 1324 return -EINVAL; 1325 } 1326 1327 for (pos = 0; pos < len; pos++) { 1328 if (!nvmet_is_ascii(page[pos])) { 1329 pr_err("Serial Number must contain only ASCII strings\n"); 1330 return -EINVAL; 1331 } 1332 } 1333 1334 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' '); 1335 1336 return count; 1337 } 1338 1339 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 1340 const char *page, size_t count) 1341 { 1342 struct nvmet_subsys *subsys = to_subsys(item); 1343 ssize_t ret; 1344 1345 down_write(&nvmet_config_sem); 1346 mutex_lock(&subsys->lock); 1347 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count); 1348 mutex_unlock(&subsys->lock); 1349 up_write(&nvmet_config_sem); 1350 1351 return ret; 1352 } 1353 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 1354 1355 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item, 1356 char *page) 1357 { 1358 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min); 1359 } 1360 1361 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, 1362 const char *page, size_t cnt) 1363 { 1364 u16 cntlid_min; 1365 1366 if (sscanf(page, "%hu\n", &cntlid_min) != 1) 1367 return -EINVAL; 1368 1369 if (cntlid_min == 0) 1370 return -EINVAL; 1371 1372 down_write(&nvmet_config_sem); 1373 if (cntlid_min > to_subsys(item)->cntlid_max) 1374 goto out_unlock; 1375 to_subsys(item)->cntlid_min = cntlid_min; 1376 up_write(&nvmet_config_sem); 1377 return cnt; 1378 1379 out_unlock: 1380 up_write(&nvmet_config_sem); 1381 return -EINVAL; 1382 } 1383 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min); 1384 1385 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item, 1386 char *page) 1387 { 1388 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max); 1389 } 1390 1391 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, 1392 const char *page, size_t cnt) 1393 { 1394 u16 cntlid_max; 1395 1396 if (sscanf(page, "%hu\n", &cntlid_max) != 1) 1397 return -EINVAL; 1398 1399 if (cntlid_max == 0) 1400 return -EINVAL; 1401 1402 down_write(&nvmet_config_sem); 1403 if (cntlid_max < to_subsys(item)->cntlid_min) 1404 goto out_unlock; 1405 to_subsys(item)->cntlid_max = cntlid_max; 1406 up_write(&nvmet_config_sem); 1407 return cnt; 1408 1409 out_unlock: 1410 up_write(&nvmet_config_sem); 1411 return -EINVAL; 1412 } 1413 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); 1414 1415 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, 1416 char *page) 1417 { 1418 struct nvmet_subsys *subsys = to_subsys(item); 1419 1420 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number); 1421 } 1422 1423 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, 1424 const char *page, size_t count) 1425 { 1426 int pos = 0, len; 1427 char *val; 1428 1429 if (subsys->subsys_discovered) { 1430 pr_err("Can't set model number. %s is already assigned\n", 1431 subsys->model_number); 1432 return -EINVAL; 1433 } 1434 1435 len = strcspn(page, "\n"); 1436 if (!len) 1437 return -EINVAL; 1438 1439 if (len > NVMET_MN_MAX_SIZE) { 1440 pr_err("Model number size can not exceed %d Bytes\n", 1441 NVMET_MN_MAX_SIZE); 1442 return -EINVAL; 1443 } 1444 1445 for (pos = 0; pos < len; pos++) { 1446 if (!nvmet_is_ascii(page[pos])) 1447 return -EINVAL; 1448 } 1449 1450 val = kmemdup_nul(page, len, GFP_KERNEL); 1451 if (!val) 1452 return -ENOMEM; 1453 kfree(subsys->model_number); 1454 subsys->model_number = val; 1455 return count; 1456 } 1457 1458 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, 1459 const char *page, size_t count) 1460 { 1461 struct nvmet_subsys *subsys = to_subsys(item); 1462 ssize_t ret; 1463 1464 down_write(&nvmet_config_sem); 1465 mutex_lock(&subsys->lock); 1466 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count); 1467 mutex_unlock(&subsys->lock); 1468 up_write(&nvmet_config_sem); 1469 1470 return ret; 1471 } 1472 CONFIGFS_ATTR(nvmet_subsys_, attr_model); 1473 1474 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item, 1475 char *page) 1476 { 1477 struct nvmet_subsys *subsys = to_subsys(item); 1478 1479 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui); 1480 } 1481 1482 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys, 1483 const char *page, size_t count) 1484 { 1485 uint32_t val = 0; 1486 int ret; 1487 1488 if (subsys->subsys_discovered) { 1489 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n", 1490 subsys->ieee_oui); 1491 return -EINVAL; 1492 } 1493 1494 ret = kstrtou32(page, 0, &val); 1495 if (ret < 0) 1496 return ret; 1497 1498 if (val >= 0x1000000) 1499 return -EINVAL; 1500 1501 subsys->ieee_oui = val; 1502 1503 return count; 1504 } 1505 1506 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item, 1507 const char *page, size_t count) 1508 { 1509 struct nvmet_subsys *subsys = to_subsys(item); 1510 ssize_t ret; 1511 1512 down_write(&nvmet_config_sem); 1513 mutex_lock(&subsys->lock); 1514 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count); 1515 mutex_unlock(&subsys->lock); 1516 up_write(&nvmet_config_sem); 1517 1518 return ret; 1519 } 1520 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui); 1521 1522 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item, 1523 char *page) 1524 { 1525 struct nvmet_subsys *subsys = to_subsys(item); 1526 1527 return sysfs_emit(page, "%s\n", subsys->firmware_rev); 1528 } 1529 1530 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys, 1531 const char *page, size_t count) 1532 { 1533 int pos = 0, len; 1534 char *val; 1535 1536 if (subsys->subsys_discovered) { 1537 pr_err("Can't set firmware revision. %s is already assigned\n", 1538 subsys->firmware_rev); 1539 return -EINVAL; 1540 } 1541 1542 len = strcspn(page, "\n"); 1543 if (!len) 1544 return -EINVAL; 1545 1546 if (len > NVMET_FR_MAX_SIZE) { 1547 pr_err("Firmware revision size can not exceed %d Bytes\n", 1548 NVMET_FR_MAX_SIZE); 1549 return -EINVAL; 1550 } 1551 1552 for (pos = 0; pos < len; pos++) { 1553 if (!nvmet_is_ascii(page[pos])) 1554 return -EINVAL; 1555 } 1556 1557 val = kmemdup_nul(page, len, GFP_KERNEL); 1558 if (!val) 1559 return -ENOMEM; 1560 1561 kfree(subsys->firmware_rev); 1562 1563 subsys->firmware_rev = val; 1564 1565 return count; 1566 } 1567 1568 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item, 1569 const char *page, size_t count) 1570 { 1571 struct nvmet_subsys *subsys = to_subsys(item); 1572 ssize_t ret; 1573 1574 down_write(&nvmet_config_sem); 1575 mutex_lock(&subsys->lock); 1576 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count); 1577 mutex_unlock(&subsys->lock); 1578 up_write(&nvmet_config_sem); 1579 1580 return ret; 1581 } 1582 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware); 1583 1584 #ifdef CONFIG_BLK_DEV_INTEGRITY 1585 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, 1586 char *page) 1587 { 1588 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support); 1589 } 1590 1591 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item, 1592 const char *page, size_t count) 1593 { 1594 struct nvmet_subsys *subsys = to_subsys(item); 1595 bool pi_enable; 1596 1597 if (kstrtobool(page, &pi_enable)) 1598 return -EINVAL; 1599 1600 subsys->pi_support = pi_enable; 1601 return count; 1602 } 1603 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable); 1604 #endif 1605 1606 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, 1607 char *page) 1608 { 1609 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid); 1610 } 1611 1612 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, 1613 const char *page, size_t cnt) 1614 { 1615 struct nvmet_subsys *subsys = to_subsys(item); 1616 struct nvmet_ctrl *ctrl; 1617 u16 qid_max; 1618 1619 if (sscanf(page, "%hu\n", &qid_max) != 1) 1620 return -EINVAL; 1621 1622 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES) 1623 return -EINVAL; 1624 1625 down_write(&nvmet_config_sem); 1626 subsys->max_qid = qid_max; 1627 1628 /* Force reconnect */ 1629 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 1630 ctrl->ops->delete_ctrl(ctrl); 1631 up_write(&nvmet_config_sem); 1632 1633 return cnt; 1634 } 1635 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max); 1636 1637 static struct configfs_attribute *nvmet_subsys_attrs[] = { 1638 &nvmet_subsys_attr_attr_allow_any_host, 1639 &nvmet_subsys_attr_attr_version, 1640 &nvmet_subsys_attr_attr_serial, 1641 &nvmet_subsys_attr_attr_cntlid_min, 1642 &nvmet_subsys_attr_attr_cntlid_max, 1643 &nvmet_subsys_attr_attr_model, 1644 &nvmet_subsys_attr_attr_qid_max, 1645 &nvmet_subsys_attr_attr_ieee_oui, 1646 &nvmet_subsys_attr_attr_firmware, 1647 #ifdef CONFIG_BLK_DEV_INTEGRITY 1648 &nvmet_subsys_attr_attr_pi_enable, 1649 #endif 1650 NULL, 1651 }; 1652 1653 /* 1654 * Subsystem structures & folder operation functions below 1655 */ 1656 static void nvmet_subsys_release(struct config_item *item) 1657 { 1658 struct nvmet_subsys *subsys = to_subsys(item); 1659 1660 nvmet_subsys_del_ctrls(subsys); 1661 nvmet_subsys_put(subsys); 1662 } 1663 1664 static struct configfs_item_operations nvmet_subsys_item_ops = { 1665 .release = nvmet_subsys_release, 1666 }; 1667 1668 static const struct config_item_type nvmet_subsys_type = { 1669 .ct_item_ops = &nvmet_subsys_item_ops, 1670 .ct_attrs = nvmet_subsys_attrs, 1671 .ct_owner = THIS_MODULE, 1672 }; 1673 1674 static struct config_group *nvmet_subsys_make(struct config_group *group, 1675 const char *name) 1676 { 1677 struct nvmet_subsys *subsys; 1678 1679 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 1680 pr_err("can't create discovery subsystem through configfs\n"); 1681 return ERR_PTR(-EINVAL); 1682 } 1683 1684 if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) { 1685 pr_err("can't create subsystem using unique discovery NQN\n"); 1686 return ERR_PTR(-EINVAL); 1687 } 1688 1689 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 1690 if (IS_ERR(subsys)) 1691 return ERR_CAST(subsys); 1692 1693 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 1694 1695 config_group_init_type_name(&subsys->namespaces_group, 1696 "namespaces", &nvmet_namespaces_type); 1697 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 1698 1699 config_group_init_type_name(&subsys->allowed_hosts_group, 1700 "allowed_hosts", &nvmet_allowed_hosts_type); 1701 configfs_add_default_group(&subsys->allowed_hosts_group, 1702 &subsys->group); 1703 1704 nvmet_add_passthru_group(subsys); 1705 1706 return &subsys->group; 1707 } 1708 1709 static struct configfs_group_operations nvmet_subsystems_group_ops = { 1710 .make_group = nvmet_subsys_make, 1711 }; 1712 1713 static const struct config_item_type nvmet_subsystems_type = { 1714 .ct_group_ops = &nvmet_subsystems_group_ops, 1715 .ct_owner = THIS_MODULE, 1716 }; 1717 1718 static ssize_t nvmet_referral_enable_show(struct config_item *item, 1719 char *page) 1720 { 1721 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 1722 } 1723 1724 static ssize_t nvmet_referral_enable_store(struct config_item *item, 1725 const char *page, size_t count) 1726 { 1727 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1728 struct nvmet_port *port = to_nvmet_port(item); 1729 bool enable; 1730 1731 if (kstrtobool(page, &enable)) 1732 goto inval; 1733 1734 if (enable) 1735 nvmet_referral_enable(parent, port); 1736 else 1737 nvmet_referral_disable(parent, port); 1738 1739 return count; 1740 inval: 1741 pr_err("Invalid value '%s' for enable\n", page); 1742 return -EINVAL; 1743 } 1744 1745 CONFIGFS_ATTR(nvmet_referral_, enable); 1746 1747 /* 1748 * Discovery Service subsystem definitions 1749 */ 1750 static struct configfs_attribute *nvmet_referral_attrs[] = { 1751 &nvmet_attr_addr_adrfam, 1752 &nvmet_attr_addr_portid, 1753 &nvmet_attr_addr_treq, 1754 &nvmet_attr_addr_traddr, 1755 &nvmet_attr_addr_trsvcid, 1756 &nvmet_attr_addr_trtype, 1757 &nvmet_referral_attr_enable, 1758 NULL, 1759 }; 1760 1761 static void nvmet_referral_notify(struct config_group *group, 1762 struct config_item *item) 1763 { 1764 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1765 struct nvmet_port *port = to_nvmet_port(item); 1766 1767 nvmet_referral_disable(parent, port); 1768 } 1769 1770 static void nvmet_referral_release(struct config_item *item) 1771 { 1772 struct nvmet_port *port = to_nvmet_port(item); 1773 1774 kfree(port); 1775 } 1776 1777 static struct configfs_item_operations nvmet_referral_item_ops = { 1778 .release = nvmet_referral_release, 1779 }; 1780 1781 static const struct config_item_type nvmet_referral_type = { 1782 .ct_owner = THIS_MODULE, 1783 .ct_attrs = nvmet_referral_attrs, 1784 .ct_item_ops = &nvmet_referral_item_ops, 1785 }; 1786 1787 static struct config_group *nvmet_referral_make( 1788 struct config_group *group, const char *name) 1789 { 1790 struct nvmet_port *port; 1791 1792 port = kzalloc(sizeof(*port), GFP_KERNEL); 1793 if (!port) 1794 return ERR_PTR(-ENOMEM); 1795 1796 INIT_LIST_HEAD(&port->entry); 1797 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 1798 1799 return &port->group; 1800 } 1801 1802 static struct configfs_group_operations nvmet_referral_group_ops = { 1803 .make_group = nvmet_referral_make, 1804 .disconnect_notify = nvmet_referral_notify, 1805 }; 1806 1807 static const struct config_item_type nvmet_referrals_type = { 1808 .ct_owner = THIS_MODULE, 1809 .ct_group_ops = &nvmet_referral_group_ops, 1810 }; 1811 1812 static struct nvmet_type_name_map nvmet_ana_state[] = { 1813 { NVME_ANA_OPTIMIZED, "optimized" }, 1814 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 1815 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 1816 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 1817 { NVME_ANA_CHANGE, "change" }, 1818 }; 1819 1820 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 1821 char *page) 1822 { 1823 struct nvmet_ana_group *grp = to_ana_group(item); 1824 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 1825 int i; 1826 1827 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1828 if (state == nvmet_ana_state[i].type) 1829 return sprintf(page, "%s\n", nvmet_ana_state[i].name); 1830 } 1831 1832 return sprintf(page, "\n"); 1833 } 1834 1835 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 1836 const char *page, size_t count) 1837 { 1838 struct nvmet_ana_group *grp = to_ana_group(item); 1839 enum nvme_ana_state *ana_state = grp->port->ana_state; 1840 int i; 1841 1842 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1843 if (sysfs_streq(page, nvmet_ana_state[i].name)) 1844 goto found; 1845 } 1846 1847 pr_err("Invalid value '%s' for ana_state\n", page); 1848 return -EINVAL; 1849 1850 found: 1851 down_write(&nvmet_ana_sem); 1852 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type; 1853 nvmet_ana_chgcnt++; 1854 up_write(&nvmet_ana_sem); 1855 nvmet_port_send_ana_event(grp->port); 1856 return count; 1857 } 1858 1859 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1860 1861 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1862 &nvmet_ana_group_attr_ana_state, 1863 NULL, 1864 }; 1865 1866 static void nvmet_ana_group_release(struct config_item *item) 1867 { 1868 struct nvmet_ana_group *grp = to_ana_group(item); 1869 1870 if (grp == &grp->port->ana_default_group) 1871 return; 1872 1873 down_write(&nvmet_ana_sem); 1874 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1875 nvmet_ana_group_enabled[grp->grpid]--; 1876 up_write(&nvmet_ana_sem); 1877 1878 nvmet_port_send_ana_event(grp->port); 1879 kfree(grp); 1880 } 1881 1882 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1883 .release = nvmet_ana_group_release, 1884 }; 1885 1886 static const struct config_item_type nvmet_ana_group_type = { 1887 .ct_item_ops = &nvmet_ana_group_item_ops, 1888 .ct_attrs = nvmet_ana_group_attrs, 1889 .ct_owner = THIS_MODULE, 1890 }; 1891 1892 static struct config_group *nvmet_ana_groups_make_group( 1893 struct config_group *group, const char *name) 1894 { 1895 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1896 struct nvmet_ana_group *grp; 1897 u32 grpid; 1898 int ret; 1899 1900 ret = kstrtou32(name, 0, &grpid); 1901 if (ret) 1902 goto out; 1903 1904 ret = -EINVAL; 1905 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1906 goto out; 1907 1908 ret = -ENOMEM; 1909 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1910 if (!grp) 1911 goto out; 1912 grp->port = port; 1913 grp->grpid = grpid; 1914 1915 down_write(&nvmet_ana_sem); 1916 grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS); 1917 nvmet_ana_group_enabled[grpid]++; 1918 up_write(&nvmet_ana_sem); 1919 1920 nvmet_port_send_ana_event(grp->port); 1921 1922 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1923 return &grp->group; 1924 out: 1925 return ERR_PTR(ret); 1926 } 1927 1928 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1929 .make_group = nvmet_ana_groups_make_group, 1930 }; 1931 1932 static const struct config_item_type nvmet_ana_groups_type = { 1933 .ct_group_ops = &nvmet_ana_groups_group_ops, 1934 .ct_owner = THIS_MODULE, 1935 }; 1936 1937 /* 1938 * Ports definitions. 1939 */ 1940 static void nvmet_port_release(struct config_item *item) 1941 { 1942 struct nvmet_port *port = to_nvmet_port(item); 1943 1944 /* Let inflight controllers teardown complete */ 1945 flush_workqueue(nvmet_wq); 1946 list_del(&port->global_entry); 1947 1948 key_put(port->keyring); 1949 kfree(port->ana_state); 1950 kfree(port); 1951 } 1952 1953 static struct configfs_attribute *nvmet_port_attrs[] = { 1954 &nvmet_attr_addr_adrfam, 1955 &nvmet_attr_addr_treq, 1956 &nvmet_attr_addr_traddr, 1957 &nvmet_attr_addr_trsvcid, 1958 &nvmet_attr_addr_trtype, 1959 &nvmet_attr_addr_tsas, 1960 &nvmet_attr_param_inline_data_size, 1961 &nvmet_attr_param_max_queue_size, 1962 #ifdef CONFIG_BLK_DEV_INTEGRITY 1963 &nvmet_attr_param_pi_enable, 1964 #endif 1965 NULL, 1966 }; 1967 1968 static struct configfs_item_operations nvmet_port_item_ops = { 1969 .release = nvmet_port_release, 1970 }; 1971 1972 static const struct config_item_type nvmet_port_type = { 1973 .ct_attrs = nvmet_port_attrs, 1974 .ct_item_ops = &nvmet_port_item_ops, 1975 .ct_owner = THIS_MODULE, 1976 }; 1977 1978 static struct config_group *nvmet_ports_make(struct config_group *group, 1979 const char *name) 1980 { 1981 struct nvmet_port *port; 1982 u16 portid; 1983 u32 i; 1984 1985 if (kstrtou16(name, 0, &portid)) 1986 return ERR_PTR(-EINVAL); 1987 1988 port = kzalloc(sizeof(*port), GFP_KERNEL); 1989 if (!port) 1990 return ERR_PTR(-ENOMEM); 1991 1992 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 1993 sizeof(*port->ana_state), GFP_KERNEL); 1994 if (!port->ana_state) { 1995 kfree(port); 1996 return ERR_PTR(-ENOMEM); 1997 } 1998 1999 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) { 2000 port->keyring = key_lookup(nvme_keyring_id()); 2001 if (IS_ERR(port->keyring)) { 2002 pr_warn("NVMe keyring not available, disabling TLS\n"); 2003 port->keyring = NULL; 2004 } 2005 } 2006 2007 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 2008 if (i == NVMET_DEFAULT_ANA_GRPID) 2009 port->ana_state[1] = NVME_ANA_OPTIMIZED; 2010 else 2011 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 2012 } 2013 2014 list_add(&port->global_entry, &nvmet_ports_list); 2015 2016 INIT_LIST_HEAD(&port->entry); 2017 INIT_LIST_HEAD(&port->subsystems); 2018 INIT_LIST_HEAD(&port->referrals); 2019 port->inline_data_size = -1; /* < 0 == let the transport choose */ 2020 port->max_queue_size = -1; /* < 0 == let the transport choose */ 2021 2022 port->disc_addr.portid = cpu_to_le16(portid); 2023 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; 2024 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; 2025 config_group_init_type_name(&port->group, name, &nvmet_port_type); 2026 2027 config_group_init_type_name(&port->subsys_group, 2028 "subsystems", &nvmet_port_subsys_type); 2029 configfs_add_default_group(&port->subsys_group, &port->group); 2030 2031 config_group_init_type_name(&port->referrals_group, 2032 "referrals", &nvmet_referrals_type); 2033 configfs_add_default_group(&port->referrals_group, &port->group); 2034 2035 config_group_init_type_name(&port->ana_groups_group, 2036 "ana_groups", &nvmet_ana_groups_type); 2037 configfs_add_default_group(&port->ana_groups_group, &port->group); 2038 2039 port->ana_default_group.port = port; 2040 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 2041 config_group_init_type_name(&port->ana_default_group.group, 2042 __stringify(NVMET_DEFAULT_ANA_GRPID), 2043 &nvmet_ana_group_type); 2044 configfs_add_default_group(&port->ana_default_group.group, 2045 &port->ana_groups_group); 2046 2047 return &port->group; 2048 } 2049 2050 static struct configfs_group_operations nvmet_ports_group_ops = { 2051 .make_group = nvmet_ports_make, 2052 }; 2053 2054 static const struct config_item_type nvmet_ports_type = { 2055 .ct_group_ops = &nvmet_ports_group_ops, 2056 .ct_owner = THIS_MODULE, 2057 }; 2058 2059 static struct config_group nvmet_subsystems_group; 2060 static struct config_group nvmet_ports_group; 2061 2062 #ifdef CONFIG_NVME_TARGET_AUTH 2063 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, 2064 char *page) 2065 { 2066 u8 *dhchap_secret; 2067 ssize_t ret; 2068 2069 down_read(&nvmet_config_sem); 2070 dhchap_secret = to_host(item)->dhchap_secret; 2071 if (!dhchap_secret) 2072 ret = sprintf(page, "\n"); 2073 else 2074 ret = sprintf(page, "%s\n", dhchap_secret); 2075 up_read(&nvmet_config_sem); 2076 return ret; 2077 } 2078 2079 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, 2080 const char *page, size_t count) 2081 { 2082 struct nvmet_host *host = to_host(item); 2083 int ret; 2084 2085 ret = nvmet_auth_set_key(host, page, false); 2086 /* 2087 * Re-authentication is a soft state, so keep the 2088 * current authentication valid until the host 2089 * requests re-authentication. 2090 */ 2091 return ret < 0 ? ret : count; 2092 } 2093 2094 CONFIGFS_ATTR(nvmet_host_, dhchap_key); 2095 2096 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, 2097 char *page) 2098 { 2099 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; 2100 ssize_t ret; 2101 2102 down_read(&nvmet_config_sem); 2103 dhchap_secret = to_host(item)->dhchap_ctrl_secret; 2104 if (!dhchap_secret) 2105 ret = sprintf(page, "\n"); 2106 else 2107 ret = sprintf(page, "%s\n", dhchap_secret); 2108 up_read(&nvmet_config_sem); 2109 return ret; 2110 } 2111 2112 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, 2113 const char *page, size_t count) 2114 { 2115 struct nvmet_host *host = to_host(item); 2116 int ret; 2117 2118 ret = nvmet_auth_set_key(host, page, true); 2119 /* 2120 * Re-authentication is a soft state, so keep the 2121 * current authentication valid until the host 2122 * requests re-authentication. 2123 */ 2124 return ret < 0 ? ret : count; 2125 } 2126 2127 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key); 2128 2129 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item, 2130 char *page) 2131 { 2132 struct nvmet_host *host = to_host(item); 2133 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); 2134 2135 return sprintf(page, "%s\n", hash_name ? hash_name : "none"); 2136 } 2137 2138 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item, 2139 const char *page, size_t count) 2140 { 2141 struct nvmet_host *host = to_host(item); 2142 u8 hmac_id; 2143 2144 hmac_id = nvme_auth_hmac_id(page); 2145 if (hmac_id == NVME_AUTH_HASH_INVALID) 2146 return -EINVAL; 2147 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0)) 2148 return -ENOTSUPP; 2149 host->dhchap_hash_id = hmac_id; 2150 return count; 2151 } 2152 2153 CONFIGFS_ATTR(nvmet_host_, dhchap_hash); 2154 2155 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item, 2156 char *page) 2157 { 2158 struct nvmet_host *host = to_host(item); 2159 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id); 2160 2161 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none"); 2162 } 2163 2164 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item, 2165 const char *page, size_t count) 2166 { 2167 struct nvmet_host *host = to_host(item); 2168 int dhgroup_id; 2169 2170 dhgroup_id = nvme_auth_dhgroup_id(page); 2171 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID) 2172 return -EINVAL; 2173 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) { 2174 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id); 2175 2176 if (!crypto_has_kpp(kpp, 0, 0)) 2177 return -EINVAL; 2178 } 2179 host->dhchap_dhgroup_id = dhgroup_id; 2180 return count; 2181 } 2182 2183 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup); 2184 2185 static struct configfs_attribute *nvmet_host_attrs[] = { 2186 &nvmet_host_attr_dhchap_key, 2187 &nvmet_host_attr_dhchap_ctrl_key, 2188 &nvmet_host_attr_dhchap_hash, 2189 &nvmet_host_attr_dhchap_dhgroup, 2190 NULL, 2191 }; 2192 #endif /* CONFIG_NVME_TARGET_AUTH */ 2193 2194 static void nvmet_host_release(struct config_item *item) 2195 { 2196 struct nvmet_host *host = to_host(item); 2197 2198 #ifdef CONFIG_NVME_TARGET_AUTH 2199 kfree(host->dhchap_secret); 2200 kfree(host->dhchap_ctrl_secret); 2201 #endif 2202 kfree(host); 2203 } 2204 2205 static struct configfs_item_operations nvmet_host_item_ops = { 2206 .release = nvmet_host_release, 2207 }; 2208 2209 static const struct config_item_type nvmet_host_type = { 2210 .ct_item_ops = &nvmet_host_item_ops, 2211 #ifdef CONFIG_NVME_TARGET_AUTH 2212 .ct_attrs = nvmet_host_attrs, 2213 #endif 2214 .ct_owner = THIS_MODULE, 2215 }; 2216 2217 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 2218 const char *name) 2219 { 2220 struct nvmet_host *host; 2221 2222 host = kzalloc(sizeof(*host), GFP_KERNEL); 2223 if (!host) 2224 return ERR_PTR(-ENOMEM); 2225 2226 #ifdef CONFIG_NVME_TARGET_AUTH 2227 /* Default to SHA256 */ 2228 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256; 2229 #endif 2230 2231 config_group_init_type_name(&host->group, name, &nvmet_host_type); 2232 2233 return &host->group; 2234 } 2235 2236 static struct configfs_group_operations nvmet_hosts_group_ops = { 2237 .make_group = nvmet_hosts_make_group, 2238 }; 2239 2240 static const struct config_item_type nvmet_hosts_type = { 2241 .ct_group_ops = &nvmet_hosts_group_ops, 2242 .ct_owner = THIS_MODULE, 2243 }; 2244 2245 static struct config_group nvmet_hosts_group; 2246 2247 static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item, 2248 char *page) 2249 { 2250 return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn); 2251 } 2252 2253 static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, 2254 const char *page, size_t count) 2255 { 2256 struct list_head *entry; 2257 size_t len; 2258 2259 len = strcspn(page, "\n"); 2260 if (!len || len > NVMF_NQN_FIELD_LEN - 1) 2261 return -EINVAL; 2262 2263 down_write(&nvmet_config_sem); 2264 list_for_each(entry, &nvmet_subsystems_group.cg_children) { 2265 struct config_item *item = 2266 container_of(entry, struct config_item, ci_entry); 2267 2268 if (!strncmp(config_item_name(item), page, len)) { 2269 pr_err("duplicate NQN %s\n", config_item_name(item)); 2270 up_write(&nvmet_config_sem); 2271 return -EINVAL; 2272 } 2273 } 2274 memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN); 2275 memcpy(nvmet_disc_subsys->subsysnqn, page, len); 2276 up_write(&nvmet_config_sem); 2277 2278 return len; 2279 } 2280 2281 CONFIGFS_ATTR(nvmet_root_, discovery_nqn); 2282 2283 static struct configfs_attribute *nvmet_root_attrs[] = { 2284 &nvmet_root_attr_discovery_nqn, 2285 NULL, 2286 }; 2287 2288 static const struct config_item_type nvmet_root_type = { 2289 .ct_attrs = nvmet_root_attrs, 2290 .ct_owner = THIS_MODULE, 2291 }; 2292 2293 static struct configfs_subsystem nvmet_configfs_subsystem = { 2294 .su_group = { 2295 .cg_item = { 2296 .ci_namebuf = "nvmet", 2297 .ci_type = &nvmet_root_type, 2298 }, 2299 }, 2300 }; 2301 2302 int __init nvmet_init_configfs(void) 2303 { 2304 int ret; 2305 2306 config_group_init(&nvmet_configfs_subsystem.su_group); 2307 mutex_init(&nvmet_configfs_subsystem.su_mutex); 2308 2309 config_group_init_type_name(&nvmet_subsystems_group, 2310 "subsystems", &nvmet_subsystems_type); 2311 configfs_add_default_group(&nvmet_subsystems_group, 2312 &nvmet_configfs_subsystem.su_group); 2313 2314 config_group_init_type_name(&nvmet_ports_group, 2315 "ports", &nvmet_ports_type); 2316 configfs_add_default_group(&nvmet_ports_group, 2317 &nvmet_configfs_subsystem.su_group); 2318 2319 config_group_init_type_name(&nvmet_hosts_group, 2320 "hosts", &nvmet_hosts_type); 2321 configfs_add_default_group(&nvmet_hosts_group, 2322 &nvmet_configfs_subsystem.su_group); 2323 2324 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 2325 if (ret) { 2326 pr_err("configfs_register_subsystem: %d\n", ret); 2327 return ret; 2328 } 2329 2330 return 0; 2331 } 2332 2333 void __exit nvmet_exit_configfs(void) 2334 { 2335 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 2336 } 2337