1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Configfs interface for the NVMe target. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/kstrtox.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/stat.h> 12 #include <linux/ctype.h> 13 #include <linux/pci.h> 14 #include <linux/pci-p2pdma.h> 15 #ifdef CONFIG_NVME_TARGET_AUTH 16 #include <linux/nvme-auth.h> 17 #endif 18 #include <linux/nvme-keyring.h> 19 #include <crypto/hash.h> 20 #include <crypto/kpp.h> 21 #include <linux/nospec.h> 22 23 #include "nvmet.h" 24 25 static const struct config_item_type nvmet_host_type; 26 static const struct config_item_type nvmet_subsys_type; 27 28 static LIST_HEAD(nvmet_ports_list); 29 struct list_head *nvmet_ports = &nvmet_ports_list; 30 31 struct nvmet_type_name_map { 32 u8 type; 33 const char *name; 34 }; 35 36 static struct nvmet_type_name_map nvmet_transport[] = { 37 { NVMF_TRTYPE_RDMA, "rdma" }, 38 { NVMF_TRTYPE_FC, "fc" }, 39 { NVMF_TRTYPE_TCP, "tcp" }, 40 { NVMF_TRTYPE_LOOP, "loop" }, 41 }; 42 43 static const struct nvmet_type_name_map nvmet_addr_family[] = { 44 { NVMF_ADDR_FAMILY_PCI, "pcie" }, 45 { NVMF_ADDR_FAMILY_IP4, "ipv4" }, 46 { NVMF_ADDR_FAMILY_IP6, "ipv6" }, 47 { NVMF_ADDR_FAMILY_IB, "ib" }, 48 { NVMF_ADDR_FAMILY_FC, "fc" }, 49 { NVMF_ADDR_FAMILY_LOOP, "loop" }, 50 }; 51 52 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller) 53 { 54 if (p->enabled) 55 pr_err("Disable port '%u' before changing attribute in %s\n", 56 le16_to_cpu(p->disc_addr.portid), caller); 57 return p->enabled; 58 } 59 60 /* 61 * nvmet_port Generic ConfigFS definitions. 62 * Used in any place in the ConfigFS tree that refers to an address. 63 */ 64 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page) 65 { 66 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam; 67 int i; 68 69 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 70 if (nvmet_addr_family[i].type == adrfam) 71 return snprintf(page, PAGE_SIZE, "%s\n", 72 nvmet_addr_family[i].name); 73 } 74 75 return snprintf(page, PAGE_SIZE, "\n"); 76 } 77 78 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 79 const char *page, size_t count) 80 { 81 struct nvmet_port *port = to_nvmet_port(item); 82 int i; 83 84 if (nvmet_is_port_enabled(port, __func__)) 85 return -EACCES; 86 87 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 88 if (sysfs_streq(page, nvmet_addr_family[i].name)) 89 goto found; 90 } 91 92 pr_err("Invalid value '%s' for adrfam\n", page); 93 return -EINVAL; 94 95 found: 96 port->disc_addr.adrfam = nvmet_addr_family[i].type; 97 return count; 98 } 99 100 CONFIGFS_ATTR(nvmet_, addr_adrfam); 101 102 static ssize_t nvmet_addr_portid_show(struct config_item *item, 103 char *page) 104 { 105 __le16 portid = to_nvmet_port(item)->disc_addr.portid; 106 107 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid)); 108 } 109 110 static ssize_t nvmet_addr_portid_store(struct config_item *item, 111 const char *page, size_t count) 112 { 113 struct nvmet_port *port = to_nvmet_port(item); 114 u16 portid = 0; 115 116 if (kstrtou16(page, 0, &portid)) { 117 pr_err("Invalid value '%s' for portid\n", page); 118 return -EINVAL; 119 } 120 121 if (nvmet_is_port_enabled(port, __func__)) 122 return -EACCES; 123 124 port->disc_addr.portid = cpu_to_le16(portid); 125 return count; 126 } 127 128 CONFIGFS_ATTR(nvmet_, addr_portid); 129 130 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 131 char *page) 132 { 133 struct nvmet_port *port = to_nvmet_port(item); 134 135 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr); 136 } 137 138 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 139 const char *page, size_t count) 140 { 141 struct nvmet_port *port = to_nvmet_port(item); 142 143 if (count > NVMF_TRADDR_SIZE) { 144 pr_err("Invalid value '%s' for traddr\n", page); 145 return -EINVAL; 146 } 147 148 if (nvmet_is_port_enabled(port, __func__)) 149 return -EACCES; 150 151 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 152 return -EINVAL; 153 return count; 154 } 155 156 CONFIGFS_ATTR(nvmet_, addr_traddr); 157 158 static const struct nvmet_type_name_map nvmet_addr_treq[] = { 159 { NVMF_TREQ_NOT_SPECIFIED, "not specified" }, 160 { NVMF_TREQ_REQUIRED, "required" }, 161 { NVMF_TREQ_NOT_REQUIRED, "not required" }, 162 }; 163 164 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port) 165 { 166 return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK); 167 } 168 169 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) 170 { 171 u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item)); 172 int i; 173 174 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 175 if (treq == nvmet_addr_treq[i].type) 176 return snprintf(page, PAGE_SIZE, "%s\n", 177 nvmet_addr_treq[i].name); 178 } 179 180 return snprintf(page, PAGE_SIZE, "\n"); 181 } 182 183 static ssize_t nvmet_addr_treq_store(struct config_item *item, 184 const char *page, size_t count) 185 { 186 struct nvmet_port *port = to_nvmet_port(item); 187 u8 treq = nvmet_port_disc_addr_treq_mask(port); 188 int i; 189 190 if (nvmet_is_port_enabled(port, __func__)) 191 return -EACCES; 192 193 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 194 if (sysfs_streq(page, nvmet_addr_treq[i].name)) 195 goto found; 196 } 197 198 pr_err("Invalid value '%s' for treq\n", page); 199 return -EINVAL; 200 201 found: 202 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP && 203 port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) { 204 switch (nvmet_addr_treq[i].type) { 205 case NVMF_TREQ_NOT_SPECIFIED: 206 pr_debug("treq '%s' not allowed for TLS1.3\n", 207 nvmet_addr_treq[i].name); 208 return -EINVAL; 209 case NVMF_TREQ_NOT_REQUIRED: 210 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n"); 211 break; 212 default: 213 break; 214 } 215 } 216 treq |= nvmet_addr_treq[i].type; 217 port->disc_addr.treq = treq; 218 return count; 219 } 220 221 CONFIGFS_ATTR(nvmet_, addr_treq); 222 223 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 224 char *page) 225 { 226 struct nvmet_port *port = to_nvmet_port(item); 227 228 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid); 229 } 230 231 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 232 const char *page, size_t count) 233 { 234 struct nvmet_port *port = to_nvmet_port(item); 235 236 if (count > NVMF_TRSVCID_SIZE) { 237 pr_err("Invalid value '%s' for trsvcid\n", page); 238 return -EINVAL; 239 } 240 if (nvmet_is_port_enabled(port, __func__)) 241 return -EACCES; 242 243 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 244 return -EINVAL; 245 return count; 246 } 247 248 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 249 250 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 251 char *page) 252 { 253 struct nvmet_port *port = to_nvmet_port(item); 254 255 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 256 } 257 258 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 259 const char *page, size_t count) 260 { 261 struct nvmet_port *port = to_nvmet_port(item); 262 int ret; 263 264 if (nvmet_is_port_enabled(port, __func__)) 265 return -EACCES; 266 ret = kstrtoint(page, 0, &port->inline_data_size); 267 if (ret) { 268 pr_err("Invalid value '%s' for inline_data_size\n", page); 269 return -EINVAL; 270 } 271 return count; 272 } 273 274 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 275 276 static ssize_t nvmet_param_max_queue_size_show(struct config_item *item, 277 char *page) 278 { 279 struct nvmet_port *port = to_nvmet_port(item); 280 281 return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size); 282 } 283 284 static ssize_t nvmet_param_max_queue_size_store(struct config_item *item, 285 const char *page, size_t count) 286 { 287 struct nvmet_port *port = to_nvmet_port(item); 288 int ret; 289 290 if (nvmet_is_port_enabled(port, __func__)) 291 return -EACCES; 292 ret = kstrtoint(page, 0, &port->max_queue_size); 293 if (ret) { 294 pr_err("Invalid value '%s' for max_queue_size\n", page); 295 return -EINVAL; 296 } 297 return count; 298 } 299 300 CONFIGFS_ATTR(nvmet_, param_max_queue_size); 301 302 #ifdef CONFIG_BLK_DEV_INTEGRITY 303 static ssize_t nvmet_param_pi_enable_show(struct config_item *item, 304 char *page) 305 { 306 struct nvmet_port *port = to_nvmet_port(item); 307 308 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable); 309 } 310 311 static ssize_t nvmet_param_pi_enable_store(struct config_item *item, 312 const char *page, size_t count) 313 { 314 struct nvmet_port *port = to_nvmet_port(item); 315 bool val; 316 317 if (kstrtobool(page, &val)) 318 return -EINVAL; 319 320 if (nvmet_is_port_enabled(port, __func__)) 321 return -EACCES; 322 323 port->pi_enable = val; 324 return count; 325 } 326 327 CONFIGFS_ATTR(nvmet_, param_pi_enable); 328 #endif 329 330 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 331 char *page) 332 { 333 struct nvmet_port *port = to_nvmet_port(item); 334 int i; 335 336 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 337 if (port->disc_addr.trtype == nvmet_transport[i].type) 338 return snprintf(page, PAGE_SIZE, 339 "%s\n", nvmet_transport[i].name); 340 } 341 342 return sprintf(page, "\n"); 343 } 344 345 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 346 { 347 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 348 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 349 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 350 } 351 352 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype) 353 { 354 port->disc_addr.tsas.tcp.sectype = sectype; 355 } 356 357 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 358 const char *page, size_t count) 359 { 360 struct nvmet_port *port = to_nvmet_port(item); 361 int i; 362 363 if (nvmet_is_port_enabled(port, __func__)) 364 return -EACCES; 365 366 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 367 if (sysfs_streq(page, nvmet_transport[i].name)) 368 goto found; 369 } 370 371 pr_err("Invalid value '%s' for trtype\n", page); 372 return -EINVAL; 373 374 found: 375 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 376 port->disc_addr.trtype = nvmet_transport[i].type; 377 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 378 nvmet_port_init_tsas_rdma(port); 379 else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) 380 nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE); 381 return count; 382 } 383 384 CONFIGFS_ATTR(nvmet_, addr_trtype); 385 386 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = { 387 { NVMF_TCP_SECTYPE_NONE, "none" }, 388 { NVMF_TCP_SECTYPE_TLS13, "tls1.3" }, 389 }; 390 391 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = { 392 { NVMF_RDMA_QPTYPE_CONNECTED, "connected" }, 393 { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" }, 394 }; 395 396 static ssize_t nvmet_addr_tsas_show(struct config_item *item, 397 char *page) 398 { 399 struct nvmet_port *port = to_nvmet_port(item); 400 int i; 401 402 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { 403 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { 404 if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type) 405 return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name); 406 } 407 } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { 408 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { 409 if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type) 410 return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name); 411 } 412 } 413 return sprintf(page, "\n"); 414 } 415 416 static u8 nvmet_addr_tsas_rdma_store(const char *page) 417 { 418 int i; 419 420 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { 421 if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name)) 422 return nvmet_addr_tsas_rdma[i].type; 423 } 424 return NVMF_RDMA_QPTYPE_INVALID; 425 } 426 427 static u8 nvmet_addr_tsas_tcp_store(const char *page) 428 { 429 int i; 430 431 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { 432 if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) 433 return nvmet_addr_tsas_tcp[i].type; 434 } 435 return NVMF_TCP_SECTYPE_INVALID; 436 } 437 438 static ssize_t nvmet_addr_tsas_store(struct config_item *item, 439 const char *page, size_t count) 440 { 441 struct nvmet_port *port = to_nvmet_port(item); 442 u8 treq = nvmet_port_disc_addr_treq_mask(port); 443 u8 sectype, qptype; 444 445 if (nvmet_is_port_enabled(port, __func__)) 446 return -EACCES; 447 448 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { 449 qptype = nvmet_addr_tsas_rdma_store(page); 450 if (qptype == port->disc_addr.tsas.rdma.qptype) 451 return count; 452 } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { 453 sectype = nvmet_addr_tsas_tcp_store(page); 454 if (sectype != NVMF_TCP_SECTYPE_INVALID) 455 goto found; 456 } 457 458 pr_err("Invalid value '%s' for tsas\n", page); 459 return -EINVAL; 460 461 found: 462 if (sectype == NVMF_TCP_SECTYPE_TLS13) { 463 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) { 464 pr_err("TLS is not supported\n"); 465 return -EINVAL; 466 } 467 if (!port->keyring) { 468 pr_err("TLS keyring not configured\n"); 469 return -EINVAL; 470 } 471 } 472 473 nvmet_port_init_tsas_tcp(port, sectype); 474 /* 475 * If TLS is enabled TREQ should be set to 'required' per default 476 */ 477 if (sectype == NVMF_TCP_SECTYPE_TLS13) { 478 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port); 479 480 if (sc == NVMF_TREQ_NOT_SPECIFIED) 481 treq |= NVMF_TREQ_REQUIRED; 482 else 483 treq |= sc; 484 } else { 485 treq |= NVMF_TREQ_NOT_SPECIFIED; 486 } 487 port->disc_addr.treq = treq; 488 return count; 489 } 490 491 CONFIGFS_ATTR(nvmet_, addr_tsas); 492 493 /* 494 * Namespace structures & file operation functions below 495 */ 496 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 497 { 498 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 499 } 500 501 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 502 const char *page, size_t count) 503 { 504 struct nvmet_ns *ns = to_nvmet_ns(item); 505 struct nvmet_subsys *subsys = ns->subsys; 506 size_t len; 507 int ret; 508 509 mutex_lock(&subsys->lock); 510 ret = -EBUSY; 511 if (ns->enabled) 512 goto out_unlock; 513 514 ret = -EINVAL; 515 len = strcspn(page, "\n"); 516 if (!len) 517 goto out_unlock; 518 519 kfree(ns->device_path); 520 ret = -ENOMEM; 521 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL); 522 if (!ns->device_path) 523 goto out_unlock; 524 525 mutex_unlock(&subsys->lock); 526 return count; 527 528 out_unlock: 529 mutex_unlock(&subsys->lock); 530 return ret; 531 } 532 533 CONFIGFS_ATTR(nvmet_ns_, device_path); 534 535 #ifdef CONFIG_PCI_P2PDMA 536 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) 537 { 538 struct nvmet_ns *ns = to_nvmet_ns(item); 539 540 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); 541 } 542 543 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, 544 const char *page, size_t count) 545 { 546 struct nvmet_ns *ns = to_nvmet_ns(item); 547 struct pci_dev *p2p_dev = NULL; 548 bool use_p2pmem; 549 int ret = count; 550 int error; 551 552 mutex_lock(&ns->subsys->lock); 553 if (ns->enabled) { 554 ret = -EBUSY; 555 goto out_unlock; 556 } 557 558 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); 559 if (error) { 560 ret = error; 561 goto out_unlock; 562 } 563 564 ns->use_p2pmem = use_p2pmem; 565 pci_dev_put(ns->p2p_dev); 566 ns->p2p_dev = p2p_dev; 567 568 out_unlock: 569 mutex_unlock(&ns->subsys->lock); 570 571 return ret; 572 } 573 574 CONFIGFS_ATTR(nvmet_ns_, p2pmem); 575 #endif /* CONFIG_PCI_P2PDMA */ 576 577 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 578 { 579 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 580 } 581 582 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 583 const char *page, size_t count) 584 { 585 struct nvmet_ns *ns = to_nvmet_ns(item); 586 struct nvmet_subsys *subsys = ns->subsys; 587 int ret = 0; 588 589 mutex_lock(&subsys->lock); 590 if (ns->enabled) { 591 ret = -EBUSY; 592 goto out_unlock; 593 } 594 595 if (uuid_parse(page, &ns->uuid)) 596 ret = -EINVAL; 597 598 out_unlock: 599 mutex_unlock(&subsys->lock); 600 return ret ? ret : count; 601 } 602 603 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 604 605 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 606 { 607 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 608 } 609 610 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 611 const char *page, size_t count) 612 { 613 struct nvmet_ns *ns = to_nvmet_ns(item); 614 struct nvmet_subsys *subsys = ns->subsys; 615 u8 nguid[16]; 616 const char *p = page; 617 int i; 618 int ret = 0; 619 620 mutex_lock(&subsys->lock); 621 if (ns->enabled) { 622 ret = -EBUSY; 623 goto out_unlock; 624 } 625 626 for (i = 0; i < 16; i++) { 627 if (p + 2 > page + count) { 628 ret = -EINVAL; 629 goto out_unlock; 630 } 631 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 632 ret = -EINVAL; 633 goto out_unlock; 634 } 635 636 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 637 p += 2; 638 639 if (*p == '-' || *p == ':') 640 p++; 641 } 642 643 memcpy(&ns->nguid, nguid, sizeof(nguid)); 644 out_unlock: 645 mutex_unlock(&subsys->lock); 646 return ret ? ret : count; 647 } 648 649 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 650 651 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 652 { 653 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 654 } 655 656 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 657 const char *page, size_t count) 658 { 659 struct nvmet_ns *ns = to_nvmet_ns(item); 660 u32 oldgrpid, newgrpid; 661 int ret; 662 663 ret = kstrtou32(page, 0, &newgrpid); 664 if (ret) 665 return ret; 666 667 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 668 return -EINVAL; 669 670 down_write(&nvmet_ana_sem); 671 oldgrpid = ns->anagrpid; 672 newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS); 673 nvmet_ana_group_enabled[newgrpid]++; 674 ns->anagrpid = newgrpid; 675 nvmet_ana_group_enabled[oldgrpid]--; 676 nvmet_ana_chgcnt++; 677 up_write(&nvmet_ana_sem); 678 679 nvmet_send_ana_event(ns->subsys, NULL); 680 return count; 681 } 682 683 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 684 685 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 686 { 687 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 688 } 689 690 static ssize_t nvmet_ns_enable_store(struct config_item *item, 691 const char *page, size_t count) 692 { 693 struct nvmet_ns *ns = to_nvmet_ns(item); 694 bool enable; 695 int ret = 0; 696 697 if (kstrtobool(page, &enable)) 698 return -EINVAL; 699 700 /* 701 * take a global nvmet_config_sem because the disable routine has a 702 * window where it releases the subsys-lock, giving a chance to 703 * a parallel enable to concurrently execute causing the disable to 704 * have a misaccounting of the ns percpu_ref. 705 */ 706 down_write(&nvmet_config_sem); 707 if (enable) 708 ret = nvmet_ns_enable(ns); 709 else 710 nvmet_ns_disable(ns); 711 up_write(&nvmet_config_sem); 712 713 return ret ? ret : count; 714 } 715 716 CONFIGFS_ATTR(nvmet_ns_, enable); 717 718 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 719 { 720 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 721 } 722 723 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 724 const char *page, size_t count) 725 { 726 struct nvmet_ns *ns = to_nvmet_ns(item); 727 bool val; 728 729 if (kstrtobool(page, &val)) 730 return -EINVAL; 731 732 mutex_lock(&ns->subsys->lock); 733 if (ns->enabled) { 734 pr_err("disable ns before setting buffered_io value.\n"); 735 mutex_unlock(&ns->subsys->lock); 736 return -EINVAL; 737 } 738 739 ns->buffered_io = val; 740 mutex_unlock(&ns->subsys->lock); 741 return count; 742 } 743 744 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 745 746 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, 747 const char *page, size_t count) 748 { 749 struct nvmet_ns *ns = to_nvmet_ns(item); 750 bool val; 751 752 if (kstrtobool(page, &val)) 753 return -EINVAL; 754 755 if (!val) 756 return -EINVAL; 757 758 mutex_lock(&ns->subsys->lock); 759 if (!ns->enabled) { 760 pr_err("enable ns before revalidate.\n"); 761 mutex_unlock(&ns->subsys->lock); 762 return -EINVAL; 763 } 764 if (nvmet_ns_revalidate(ns)) 765 nvmet_ns_changed(ns->subsys, ns->nsid); 766 mutex_unlock(&ns->subsys->lock); 767 return count; 768 } 769 770 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); 771 772 static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page) 773 { 774 return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable); 775 } 776 777 static ssize_t nvmet_ns_resv_enable_store(struct config_item *item, 778 const char *page, size_t count) 779 { 780 struct nvmet_ns *ns = to_nvmet_ns(item); 781 bool val; 782 783 if (kstrtobool(page, &val)) 784 return -EINVAL; 785 786 mutex_lock(&ns->subsys->lock); 787 if (ns->enabled) { 788 pr_err("the ns:%d is already enabled.\n", ns->nsid); 789 mutex_unlock(&ns->subsys->lock); 790 return -EINVAL; 791 } 792 ns->pr.enable = val; 793 mutex_unlock(&ns->subsys->lock); 794 return count; 795 } 796 CONFIGFS_ATTR(nvmet_ns_, resv_enable); 797 798 static struct configfs_attribute *nvmet_ns_attrs[] = { 799 &nvmet_ns_attr_device_path, 800 &nvmet_ns_attr_device_nguid, 801 &nvmet_ns_attr_device_uuid, 802 &nvmet_ns_attr_ana_grpid, 803 &nvmet_ns_attr_enable, 804 &nvmet_ns_attr_buffered_io, 805 &nvmet_ns_attr_revalidate_size, 806 &nvmet_ns_attr_resv_enable, 807 #ifdef CONFIG_PCI_P2PDMA 808 &nvmet_ns_attr_p2pmem, 809 #endif 810 NULL, 811 }; 812 813 static void nvmet_ns_release(struct config_item *item) 814 { 815 struct nvmet_ns *ns = to_nvmet_ns(item); 816 817 nvmet_ns_free(ns); 818 } 819 820 static struct configfs_item_operations nvmet_ns_item_ops = { 821 .release = nvmet_ns_release, 822 }; 823 824 static const struct config_item_type nvmet_ns_type = { 825 .ct_item_ops = &nvmet_ns_item_ops, 826 .ct_attrs = nvmet_ns_attrs, 827 .ct_owner = THIS_MODULE, 828 }; 829 830 static struct config_group *nvmet_ns_make(struct config_group *group, 831 const char *name) 832 { 833 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 834 struct nvmet_ns *ns; 835 int ret; 836 u32 nsid; 837 838 ret = kstrtou32(name, 0, &nsid); 839 if (ret) 840 goto out; 841 842 ret = -EINVAL; 843 if (nsid == 0 || nsid == NVME_NSID_ALL) { 844 pr_err("invalid nsid %#x", nsid); 845 goto out; 846 } 847 848 ret = -ENOMEM; 849 ns = nvmet_ns_alloc(subsys, nsid); 850 if (!ns) 851 goto out; 852 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 853 854 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 855 856 return &ns->group; 857 out: 858 return ERR_PTR(ret); 859 } 860 861 static struct configfs_group_operations nvmet_namespaces_group_ops = { 862 .make_group = nvmet_ns_make, 863 }; 864 865 static const struct config_item_type nvmet_namespaces_type = { 866 .ct_group_ops = &nvmet_namespaces_group_ops, 867 .ct_owner = THIS_MODULE, 868 }; 869 870 #ifdef CONFIG_NVME_TARGET_PASSTHRU 871 872 static ssize_t nvmet_passthru_device_path_show(struct config_item *item, 873 char *page) 874 { 875 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 876 877 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path); 878 } 879 880 static ssize_t nvmet_passthru_device_path_store(struct config_item *item, 881 const char *page, size_t count) 882 { 883 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 884 size_t len; 885 int ret; 886 887 mutex_lock(&subsys->lock); 888 889 ret = -EBUSY; 890 if (subsys->passthru_ctrl) 891 goto out_unlock; 892 893 ret = -EINVAL; 894 len = strcspn(page, "\n"); 895 if (!len) 896 goto out_unlock; 897 898 kfree(subsys->passthru_ctrl_path); 899 ret = -ENOMEM; 900 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL); 901 if (!subsys->passthru_ctrl_path) 902 goto out_unlock; 903 904 mutex_unlock(&subsys->lock); 905 906 return count; 907 out_unlock: 908 mutex_unlock(&subsys->lock); 909 return ret; 910 } 911 CONFIGFS_ATTR(nvmet_passthru_, device_path); 912 913 static ssize_t nvmet_passthru_enable_show(struct config_item *item, 914 char *page) 915 { 916 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 917 918 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0); 919 } 920 921 static ssize_t nvmet_passthru_enable_store(struct config_item *item, 922 const char *page, size_t count) 923 { 924 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 925 bool enable; 926 int ret = 0; 927 928 if (kstrtobool(page, &enable)) 929 return -EINVAL; 930 931 if (enable) 932 ret = nvmet_passthru_ctrl_enable(subsys); 933 else 934 nvmet_passthru_ctrl_disable(subsys); 935 936 return ret ? ret : count; 937 } 938 CONFIGFS_ATTR(nvmet_passthru_, enable); 939 940 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item, 941 char *page) 942 { 943 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout); 944 } 945 946 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item, 947 const char *page, size_t count) 948 { 949 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 950 unsigned int timeout; 951 952 if (kstrtouint(page, 0, &timeout)) 953 return -EINVAL; 954 subsys->admin_timeout = timeout; 955 return count; 956 } 957 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout); 958 959 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item, 960 char *page) 961 { 962 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout); 963 } 964 965 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, 966 const char *page, size_t count) 967 { 968 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 969 unsigned int timeout; 970 971 if (kstrtouint(page, 0, &timeout)) 972 return -EINVAL; 973 subsys->io_timeout = timeout; 974 return count; 975 } 976 CONFIGFS_ATTR(nvmet_passthru_, io_timeout); 977 978 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item, 979 char *page) 980 { 981 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids); 982 } 983 984 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item, 985 const char *page, size_t count) 986 { 987 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 988 unsigned int clear_ids; 989 990 if (kstrtouint(page, 0, &clear_ids)) 991 return -EINVAL; 992 subsys->clear_ids = clear_ids; 993 return count; 994 } 995 CONFIGFS_ATTR(nvmet_passthru_, clear_ids); 996 997 static struct configfs_attribute *nvmet_passthru_attrs[] = { 998 &nvmet_passthru_attr_device_path, 999 &nvmet_passthru_attr_enable, 1000 &nvmet_passthru_attr_admin_timeout, 1001 &nvmet_passthru_attr_io_timeout, 1002 &nvmet_passthru_attr_clear_ids, 1003 NULL, 1004 }; 1005 1006 static const struct config_item_type nvmet_passthru_type = { 1007 .ct_attrs = nvmet_passthru_attrs, 1008 .ct_owner = THIS_MODULE, 1009 }; 1010 1011 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 1012 { 1013 config_group_init_type_name(&subsys->passthru_group, 1014 "passthru", &nvmet_passthru_type); 1015 configfs_add_default_group(&subsys->passthru_group, 1016 &subsys->group); 1017 } 1018 1019 #else /* CONFIG_NVME_TARGET_PASSTHRU */ 1020 1021 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 1022 { 1023 } 1024 1025 #endif /* CONFIG_NVME_TARGET_PASSTHRU */ 1026 1027 static int nvmet_port_subsys_allow_link(struct config_item *parent, 1028 struct config_item *target) 1029 { 1030 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 1031 struct nvmet_subsys *subsys; 1032 struct nvmet_subsys_link *link, *p; 1033 int ret; 1034 1035 if (target->ci_type != &nvmet_subsys_type) { 1036 pr_err("can only link subsystems into the subsystems dir.!\n"); 1037 return -EINVAL; 1038 } 1039 subsys = to_subsys(target); 1040 link = kmalloc(sizeof(*link), GFP_KERNEL); 1041 if (!link) 1042 return -ENOMEM; 1043 link->subsys = subsys; 1044 1045 down_write(&nvmet_config_sem); 1046 ret = -EEXIST; 1047 list_for_each_entry(p, &port->subsystems, entry) { 1048 if (p->subsys == subsys) 1049 goto out_free_link; 1050 } 1051 1052 if (list_empty(&port->subsystems)) { 1053 ret = nvmet_enable_port(port); 1054 if (ret) 1055 goto out_free_link; 1056 } 1057 1058 list_add_tail(&link->entry, &port->subsystems); 1059 nvmet_port_disc_changed(port, subsys); 1060 1061 up_write(&nvmet_config_sem); 1062 return 0; 1063 1064 out_free_link: 1065 up_write(&nvmet_config_sem); 1066 kfree(link); 1067 return ret; 1068 } 1069 1070 static void nvmet_port_subsys_drop_link(struct config_item *parent, 1071 struct config_item *target) 1072 { 1073 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 1074 struct nvmet_subsys *subsys = to_subsys(target); 1075 struct nvmet_subsys_link *p; 1076 1077 down_write(&nvmet_config_sem); 1078 list_for_each_entry(p, &port->subsystems, entry) { 1079 if (p->subsys == subsys) 1080 goto found; 1081 } 1082 up_write(&nvmet_config_sem); 1083 return; 1084 1085 found: 1086 list_del(&p->entry); 1087 nvmet_port_del_ctrls(port, subsys); 1088 nvmet_port_disc_changed(port, subsys); 1089 1090 if (list_empty(&port->subsystems)) 1091 nvmet_disable_port(port); 1092 up_write(&nvmet_config_sem); 1093 kfree(p); 1094 } 1095 1096 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 1097 .allow_link = nvmet_port_subsys_allow_link, 1098 .drop_link = nvmet_port_subsys_drop_link, 1099 }; 1100 1101 static const struct config_item_type nvmet_port_subsys_type = { 1102 .ct_item_ops = &nvmet_port_subsys_item_ops, 1103 .ct_owner = THIS_MODULE, 1104 }; 1105 1106 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 1107 struct config_item *target) 1108 { 1109 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 1110 struct nvmet_host *host; 1111 struct nvmet_host_link *link, *p; 1112 int ret; 1113 1114 if (target->ci_type != &nvmet_host_type) { 1115 pr_err("can only link hosts into the allowed_hosts directory!\n"); 1116 return -EINVAL; 1117 } 1118 1119 host = to_host(target); 1120 link = kmalloc(sizeof(*link), GFP_KERNEL); 1121 if (!link) 1122 return -ENOMEM; 1123 link->host = host; 1124 1125 down_write(&nvmet_config_sem); 1126 ret = -EINVAL; 1127 if (subsys->allow_any_host) { 1128 pr_err("can't add hosts when allow_any_host is set!\n"); 1129 goto out_free_link; 1130 } 1131 1132 ret = -EEXIST; 1133 list_for_each_entry(p, &subsys->hosts, entry) { 1134 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 1135 goto out_free_link; 1136 } 1137 list_add_tail(&link->entry, &subsys->hosts); 1138 nvmet_subsys_disc_changed(subsys, host); 1139 1140 up_write(&nvmet_config_sem); 1141 return 0; 1142 out_free_link: 1143 up_write(&nvmet_config_sem); 1144 kfree(link); 1145 return ret; 1146 } 1147 1148 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 1149 struct config_item *target) 1150 { 1151 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 1152 struct nvmet_host *host = to_host(target); 1153 struct nvmet_host_link *p; 1154 1155 down_write(&nvmet_config_sem); 1156 list_for_each_entry(p, &subsys->hosts, entry) { 1157 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 1158 goto found; 1159 } 1160 up_write(&nvmet_config_sem); 1161 return; 1162 1163 found: 1164 list_del(&p->entry); 1165 nvmet_subsys_disc_changed(subsys, host); 1166 1167 up_write(&nvmet_config_sem); 1168 kfree(p); 1169 } 1170 1171 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 1172 .allow_link = nvmet_allowed_hosts_allow_link, 1173 .drop_link = nvmet_allowed_hosts_drop_link, 1174 }; 1175 1176 static const struct config_item_type nvmet_allowed_hosts_type = { 1177 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 1178 .ct_owner = THIS_MODULE, 1179 }; 1180 1181 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 1182 char *page) 1183 { 1184 return snprintf(page, PAGE_SIZE, "%d\n", 1185 to_subsys(item)->allow_any_host); 1186 } 1187 1188 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 1189 const char *page, size_t count) 1190 { 1191 struct nvmet_subsys *subsys = to_subsys(item); 1192 bool allow_any_host; 1193 int ret = 0; 1194 1195 if (kstrtobool(page, &allow_any_host)) 1196 return -EINVAL; 1197 1198 down_write(&nvmet_config_sem); 1199 if (allow_any_host && !list_empty(&subsys->hosts)) { 1200 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 1201 ret = -EINVAL; 1202 goto out_unlock; 1203 } 1204 1205 if (subsys->allow_any_host != allow_any_host) { 1206 subsys->allow_any_host = allow_any_host; 1207 nvmet_subsys_disc_changed(subsys, NULL); 1208 } 1209 1210 out_unlock: 1211 up_write(&nvmet_config_sem); 1212 return ret ? ret : count; 1213 } 1214 1215 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 1216 1217 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 1218 char *page) 1219 { 1220 struct nvmet_subsys *subsys = to_subsys(item); 1221 1222 if (NVME_TERTIARY(subsys->ver)) 1223 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n", 1224 NVME_MAJOR(subsys->ver), 1225 NVME_MINOR(subsys->ver), 1226 NVME_TERTIARY(subsys->ver)); 1227 1228 return snprintf(page, PAGE_SIZE, "%llu.%llu\n", 1229 NVME_MAJOR(subsys->ver), 1230 NVME_MINOR(subsys->ver)); 1231 } 1232 1233 static ssize_t 1234 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys, 1235 const char *page, size_t count) 1236 { 1237 int major, minor, tertiary = 0; 1238 int ret; 1239 1240 if (subsys->subsys_discovered) { 1241 if (NVME_TERTIARY(subsys->ver)) 1242 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n", 1243 NVME_MAJOR(subsys->ver), 1244 NVME_MINOR(subsys->ver), 1245 NVME_TERTIARY(subsys->ver)); 1246 else 1247 pr_err("Can't set version number. %llu.%llu is already assigned\n", 1248 NVME_MAJOR(subsys->ver), 1249 NVME_MINOR(subsys->ver)); 1250 return -EINVAL; 1251 } 1252 1253 /* passthru subsystems use the underlying controller's version */ 1254 if (nvmet_is_passthru_subsys(subsys)) 1255 return -EINVAL; 1256 1257 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 1258 if (ret != 2 && ret != 3) 1259 return -EINVAL; 1260 1261 subsys->ver = NVME_VS(major, minor, tertiary); 1262 1263 return count; 1264 } 1265 1266 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 1267 const char *page, size_t count) 1268 { 1269 struct nvmet_subsys *subsys = to_subsys(item); 1270 ssize_t ret; 1271 1272 down_write(&nvmet_config_sem); 1273 mutex_lock(&subsys->lock); 1274 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count); 1275 mutex_unlock(&subsys->lock); 1276 up_write(&nvmet_config_sem); 1277 1278 return ret; 1279 } 1280 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 1281 1282 /* See Section 1.5 of NVMe 1.4 */ 1283 static bool nvmet_is_ascii(const char c) 1284 { 1285 return c >= 0x20 && c <= 0x7e; 1286 } 1287 1288 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 1289 char *page) 1290 { 1291 struct nvmet_subsys *subsys = to_subsys(item); 1292 1293 return snprintf(page, PAGE_SIZE, "%.*s\n", 1294 NVMET_SN_MAX_SIZE, subsys->serial); 1295 } 1296 1297 static ssize_t 1298 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys, 1299 const char *page, size_t count) 1300 { 1301 int pos, len = strcspn(page, "\n"); 1302 1303 if (subsys->subsys_discovered) { 1304 pr_err("Can't set serial number. %s is already assigned\n", 1305 subsys->serial); 1306 return -EINVAL; 1307 } 1308 1309 if (!len || len > NVMET_SN_MAX_SIZE) { 1310 pr_err("Serial Number can not be empty or exceed %d Bytes\n", 1311 NVMET_SN_MAX_SIZE); 1312 return -EINVAL; 1313 } 1314 1315 for (pos = 0; pos < len; pos++) { 1316 if (!nvmet_is_ascii(page[pos])) { 1317 pr_err("Serial Number must contain only ASCII strings\n"); 1318 return -EINVAL; 1319 } 1320 } 1321 1322 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' '); 1323 1324 return count; 1325 } 1326 1327 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 1328 const char *page, size_t count) 1329 { 1330 struct nvmet_subsys *subsys = to_subsys(item); 1331 ssize_t ret; 1332 1333 down_write(&nvmet_config_sem); 1334 mutex_lock(&subsys->lock); 1335 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count); 1336 mutex_unlock(&subsys->lock); 1337 up_write(&nvmet_config_sem); 1338 1339 return ret; 1340 } 1341 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 1342 1343 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item, 1344 char *page) 1345 { 1346 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min); 1347 } 1348 1349 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, 1350 const char *page, size_t cnt) 1351 { 1352 u16 cntlid_min; 1353 1354 if (sscanf(page, "%hu\n", &cntlid_min) != 1) 1355 return -EINVAL; 1356 1357 if (cntlid_min == 0) 1358 return -EINVAL; 1359 1360 down_write(&nvmet_config_sem); 1361 if (cntlid_min > to_subsys(item)->cntlid_max) 1362 goto out_unlock; 1363 to_subsys(item)->cntlid_min = cntlid_min; 1364 up_write(&nvmet_config_sem); 1365 return cnt; 1366 1367 out_unlock: 1368 up_write(&nvmet_config_sem); 1369 return -EINVAL; 1370 } 1371 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min); 1372 1373 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item, 1374 char *page) 1375 { 1376 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max); 1377 } 1378 1379 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, 1380 const char *page, size_t cnt) 1381 { 1382 u16 cntlid_max; 1383 1384 if (sscanf(page, "%hu\n", &cntlid_max) != 1) 1385 return -EINVAL; 1386 1387 if (cntlid_max == 0) 1388 return -EINVAL; 1389 1390 down_write(&nvmet_config_sem); 1391 if (cntlid_max < to_subsys(item)->cntlid_min) 1392 goto out_unlock; 1393 to_subsys(item)->cntlid_max = cntlid_max; 1394 up_write(&nvmet_config_sem); 1395 return cnt; 1396 1397 out_unlock: 1398 up_write(&nvmet_config_sem); 1399 return -EINVAL; 1400 } 1401 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); 1402 1403 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, 1404 char *page) 1405 { 1406 struct nvmet_subsys *subsys = to_subsys(item); 1407 1408 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number); 1409 } 1410 1411 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, 1412 const char *page, size_t count) 1413 { 1414 int pos = 0, len; 1415 char *val; 1416 1417 if (subsys->subsys_discovered) { 1418 pr_err("Can't set model number. %s is already assigned\n", 1419 subsys->model_number); 1420 return -EINVAL; 1421 } 1422 1423 len = strcspn(page, "\n"); 1424 if (!len) 1425 return -EINVAL; 1426 1427 if (len > NVMET_MN_MAX_SIZE) { 1428 pr_err("Model number size can not exceed %d Bytes\n", 1429 NVMET_MN_MAX_SIZE); 1430 return -EINVAL; 1431 } 1432 1433 for (pos = 0; pos < len; pos++) { 1434 if (!nvmet_is_ascii(page[pos])) 1435 return -EINVAL; 1436 } 1437 1438 val = kmemdup_nul(page, len, GFP_KERNEL); 1439 if (!val) 1440 return -ENOMEM; 1441 kfree(subsys->model_number); 1442 subsys->model_number = val; 1443 return count; 1444 } 1445 1446 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, 1447 const char *page, size_t count) 1448 { 1449 struct nvmet_subsys *subsys = to_subsys(item); 1450 ssize_t ret; 1451 1452 down_write(&nvmet_config_sem); 1453 mutex_lock(&subsys->lock); 1454 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count); 1455 mutex_unlock(&subsys->lock); 1456 up_write(&nvmet_config_sem); 1457 1458 return ret; 1459 } 1460 CONFIGFS_ATTR(nvmet_subsys_, attr_model); 1461 1462 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item, 1463 char *page) 1464 { 1465 struct nvmet_subsys *subsys = to_subsys(item); 1466 1467 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui); 1468 } 1469 1470 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys, 1471 const char *page, size_t count) 1472 { 1473 uint32_t val = 0; 1474 int ret; 1475 1476 if (subsys->subsys_discovered) { 1477 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n", 1478 subsys->ieee_oui); 1479 return -EINVAL; 1480 } 1481 1482 ret = kstrtou32(page, 0, &val); 1483 if (ret < 0) 1484 return ret; 1485 1486 if (val >= 0x1000000) 1487 return -EINVAL; 1488 1489 subsys->ieee_oui = val; 1490 1491 return count; 1492 } 1493 1494 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item, 1495 const char *page, size_t count) 1496 { 1497 struct nvmet_subsys *subsys = to_subsys(item); 1498 ssize_t ret; 1499 1500 down_write(&nvmet_config_sem); 1501 mutex_lock(&subsys->lock); 1502 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count); 1503 mutex_unlock(&subsys->lock); 1504 up_write(&nvmet_config_sem); 1505 1506 return ret; 1507 } 1508 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui); 1509 1510 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item, 1511 char *page) 1512 { 1513 struct nvmet_subsys *subsys = to_subsys(item); 1514 1515 return sysfs_emit(page, "%s\n", subsys->firmware_rev); 1516 } 1517 1518 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys, 1519 const char *page, size_t count) 1520 { 1521 int pos = 0, len; 1522 char *val; 1523 1524 if (subsys->subsys_discovered) { 1525 pr_err("Can't set firmware revision. %s is already assigned\n", 1526 subsys->firmware_rev); 1527 return -EINVAL; 1528 } 1529 1530 len = strcspn(page, "\n"); 1531 if (!len) 1532 return -EINVAL; 1533 1534 if (len > NVMET_FR_MAX_SIZE) { 1535 pr_err("Firmware revision size can not exceed %d Bytes\n", 1536 NVMET_FR_MAX_SIZE); 1537 return -EINVAL; 1538 } 1539 1540 for (pos = 0; pos < len; pos++) { 1541 if (!nvmet_is_ascii(page[pos])) 1542 return -EINVAL; 1543 } 1544 1545 val = kmemdup_nul(page, len, GFP_KERNEL); 1546 if (!val) 1547 return -ENOMEM; 1548 1549 kfree(subsys->firmware_rev); 1550 1551 subsys->firmware_rev = val; 1552 1553 return count; 1554 } 1555 1556 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item, 1557 const char *page, size_t count) 1558 { 1559 struct nvmet_subsys *subsys = to_subsys(item); 1560 ssize_t ret; 1561 1562 down_write(&nvmet_config_sem); 1563 mutex_lock(&subsys->lock); 1564 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count); 1565 mutex_unlock(&subsys->lock); 1566 up_write(&nvmet_config_sem); 1567 1568 return ret; 1569 } 1570 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware); 1571 1572 #ifdef CONFIG_BLK_DEV_INTEGRITY 1573 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, 1574 char *page) 1575 { 1576 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support); 1577 } 1578 1579 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item, 1580 const char *page, size_t count) 1581 { 1582 struct nvmet_subsys *subsys = to_subsys(item); 1583 bool pi_enable; 1584 1585 if (kstrtobool(page, &pi_enable)) 1586 return -EINVAL; 1587 1588 subsys->pi_support = pi_enable; 1589 return count; 1590 } 1591 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable); 1592 #endif 1593 1594 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, 1595 char *page) 1596 { 1597 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid); 1598 } 1599 1600 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, 1601 const char *page, size_t cnt) 1602 { 1603 struct nvmet_subsys *subsys = to_subsys(item); 1604 struct nvmet_ctrl *ctrl; 1605 u16 qid_max; 1606 1607 if (sscanf(page, "%hu\n", &qid_max) != 1) 1608 return -EINVAL; 1609 1610 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES) 1611 return -EINVAL; 1612 1613 down_write(&nvmet_config_sem); 1614 subsys->max_qid = qid_max; 1615 1616 /* Force reconnect */ 1617 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 1618 ctrl->ops->delete_ctrl(ctrl); 1619 up_write(&nvmet_config_sem); 1620 1621 return cnt; 1622 } 1623 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max); 1624 1625 static struct configfs_attribute *nvmet_subsys_attrs[] = { 1626 &nvmet_subsys_attr_attr_allow_any_host, 1627 &nvmet_subsys_attr_attr_version, 1628 &nvmet_subsys_attr_attr_serial, 1629 &nvmet_subsys_attr_attr_cntlid_min, 1630 &nvmet_subsys_attr_attr_cntlid_max, 1631 &nvmet_subsys_attr_attr_model, 1632 &nvmet_subsys_attr_attr_qid_max, 1633 &nvmet_subsys_attr_attr_ieee_oui, 1634 &nvmet_subsys_attr_attr_firmware, 1635 #ifdef CONFIG_BLK_DEV_INTEGRITY 1636 &nvmet_subsys_attr_attr_pi_enable, 1637 #endif 1638 NULL, 1639 }; 1640 1641 /* 1642 * Subsystem structures & folder operation functions below 1643 */ 1644 static void nvmet_subsys_release(struct config_item *item) 1645 { 1646 struct nvmet_subsys *subsys = to_subsys(item); 1647 1648 nvmet_subsys_del_ctrls(subsys); 1649 nvmet_subsys_put(subsys); 1650 } 1651 1652 static struct configfs_item_operations nvmet_subsys_item_ops = { 1653 .release = nvmet_subsys_release, 1654 }; 1655 1656 static const struct config_item_type nvmet_subsys_type = { 1657 .ct_item_ops = &nvmet_subsys_item_ops, 1658 .ct_attrs = nvmet_subsys_attrs, 1659 .ct_owner = THIS_MODULE, 1660 }; 1661 1662 static struct config_group *nvmet_subsys_make(struct config_group *group, 1663 const char *name) 1664 { 1665 struct nvmet_subsys *subsys; 1666 1667 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 1668 pr_err("can't create discovery subsystem through configfs\n"); 1669 return ERR_PTR(-EINVAL); 1670 } 1671 1672 if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) { 1673 pr_err("can't create subsystem using unique discovery NQN\n"); 1674 return ERR_PTR(-EINVAL); 1675 } 1676 1677 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 1678 if (IS_ERR(subsys)) 1679 return ERR_CAST(subsys); 1680 1681 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 1682 1683 config_group_init_type_name(&subsys->namespaces_group, 1684 "namespaces", &nvmet_namespaces_type); 1685 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 1686 1687 config_group_init_type_name(&subsys->allowed_hosts_group, 1688 "allowed_hosts", &nvmet_allowed_hosts_type); 1689 configfs_add_default_group(&subsys->allowed_hosts_group, 1690 &subsys->group); 1691 1692 nvmet_add_passthru_group(subsys); 1693 1694 return &subsys->group; 1695 } 1696 1697 static struct configfs_group_operations nvmet_subsystems_group_ops = { 1698 .make_group = nvmet_subsys_make, 1699 }; 1700 1701 static const struct config_item_type nvmet_subsystems_type = { 1702 .ct_group_ops = &nvmet_subsystems_group_ops, 1703 .ct_owner = THIS_MODULE, 1704 }; 1705 1706 static ssize_t nvmet_referral_enable_show(struct config_item *item, 1707 char *page) 1708 { 1709 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 1710 } 1711 1712 static ssize_t nvmet_referral_enable_store(struct config_item *item, 1713 const char *page, size_t count) 1714 { 1715 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1716 struct nvmet_port *port = to_nvmet_port(item); 1717 bool enable; 1718 1719 if (kstrtobool(page, &enable)) 1720 goto inval; 1721 1722 if (enable) 1723 nvmet_referral_enable(parent, port); 1724 else 1725 nvmet_referral_disable(parent, port); 1726 1727 return count; 1728 inval: 1729 pr_err("Invalid value '%s' for enable\n", page); 1730 return -EINVAL; 1731 } 1732 1733 CONFIGFS_ATTR(nvmet_referral_, enable); 1734 1735 /* 1736 * Discovery Service subsystem definitions 1737 */ 1738 static struct configfs_attribute *nvmet_referral_attrs[] = { 1739 &nvmet_attr_addr_adrfam, 1740 &nvmet_attr_addr_portid, 1741 &nvmet_attr_addr_treq, 1742 &nvmet_attr_addr_traddr, 1743 &nvmet_attr_addr_trsvcid, 1744 &nvmet_attr_addr_trtype, 1745 &nvmet_referral_attr_enable, 1746 NULL, 1747 }; 1748 1749 static void nvmet_referral_notify(struct config_group *group, 1750 struct config_item *item) 1751 { 1752 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1753 struct nvmet_port *port = to_nvmet_port(item); 1754 1755 nvmet_referral_disable(parent, port); 1756 } 1757 1758 static void nvmet_referral_release(struct config_item *item) 1759 { 1760 struct nvmet_port *port = to_nvmet_port(item); 1761 1762 kfree(port); 1763 } 1764 1765 static struct configfs_item_operations nvmet_referral_item_ops = { 1766 .release = nvmet_referral_release, 1767 }; 1768 1769 static const struct config_item_type nvmet_referral_type = { 1770 .ct_owner = THIS_MODULE, 1771 .ct_attrs = nvmet_referral_attrs, 1772 .ct_item_ops = &nvmet_referral_item_ops, 1773 }; 1774 1775 static struct config_group *nvmet_referral_make( 1776 struct config_group *group, const char *name) 1777 { 1778 struct nvmet_port *port; 1779 1780 port = kzalloc(sizeof(*port), GFP_KERNEL); 1781 if (!port) 1782 return ERR_PTR(-ENOMEM); 1783 1784 INIT_LIST_HEAD(&port->entry); 1785 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 1786 1787 return &port->group; 1788 } 1789 1790 static struct configfs_group_operations nvmet_referral_group_ops = { 1791 .make_group = nvmet_referral_make, 1792 .disconnect_notify = nvmet_referral_notify, 1793 }; 1794 1795 static const struct config_item_type nvmet_referrals_type = { 1796 .ct_owner = THIS_MODULE, 1797 .ct_group_ops = &nvmet_referral_group_ops, 1798 }; 1799 1800 static struct nvmet_type_name_map nvmet_ana_state[] = { 1801 { NVME_ANA_OPTIMIZED, "optimized" }, 1802 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 1803 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 1804 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 1805 { NVME_ANA_CHANGE, "change" }, 1806 }; 1807 1808 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 1809 char *page) 1810 { 1811 struct nvmet_ana_group *grp = to_ana_group(item); 1812 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 1813 int i; 1814 1815 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1816 if (state == nvmet_ana_state[i].type) 1817 return sprintf(page, "%s\n", nvmet_ana_state[i].name); 1818 } 1819 1820 return sprintf(page, "\n"); 1821 } 1822 1823 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 1824 const char *page, size_t count) 1825 { 1826 struct nvmet_ana_group *grp = to_ana_group(item); 1827 enum nvme_ana_state *ana_state = grp->port->ana_state; 1828 int i; 1829 1830 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1831 if (sysfs_streq(page, nvmet_ana_state[i].name)) 1832 goto found; 1833 } 1834 1835 pr_err("Invalid value '%s' for ana_state\n", page); 1836 return -EINVAL; 1837 1838 found: 1839 down_write(&nvmet_ana_sem); 1840 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type; 1841 nvmet_ana_chgcnt++; 1842 up_write(&nvmet_ana_sem); 1843 nvmet_port_send_ana_event(grp->port); 1844 return count; 1845 } 1846 1847 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1848 1849 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1850 &nvmet_ana_group_attr_ana_state, 1851 NULL, 1852 }; 1853 1854 static void nvmet_ana_group_release(struct config_item *item) 1855 { 1856 struct nvmet_ana_group *grp = to_ana_group(item); 1857 1858 if (grp == &grp->port->ana_default_group) 1859 return; 1860 1861 down_write(&nvmet_ana_sem); 1862 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1863 nvmet_ana_group_enabled[grp->grpid]--; 1864 up_write(&nvmet_ana_sem); 1865 1866 nvmet_port_send_ana_event(grp->port); 1867 kfree(grp); 1868 } 1869 1870 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1871 .release = nvmet_ana_group_release, 1872 }; 1873 1874 static const struct config_item_type nvmet_ana_group_type = { 1875 .ct_item_ops = &nvmet_ana_group_item_ops, 1876 .ct_attrs = nvmet_ana_group_attrs, 1877 .ct_owner = THIS_MODULE, 1878 }; 1879 1880 static struct config_group *nvmet_ana_groups_make_group( 1881 struct config_group *group, const char *name) 1882 { 1883 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1884 struct nvmet_ana_group *grp; 1885 u32 grpid; 1886 int ret; 1887 1888 ret = kstrtou32(name, 0, &grpid); 1889 if (ret) 1890 goto out; 1891 1892 ret = -EINVAL; 1893 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1894 goto out; 1895 1896 ret = -ENOMEM; 1897 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1898 if (!grp) 1899 goto out; 1900 grp->port = port; 1901 grp->grpid = grpid; 1902 1903 down_write(&nvmet_ana_sem); 1904 grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS); 1905 nvmet_ana_group_enabled[grpid]++; 1906 up_write(&nvmet_ana_sem); 1907 1908 nvmet_port_send_ana_event(grp->port); 1909 1910 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1911 return &grp->group; 1912 out: 1913 return ERR_PTR(ret); 1914 } 1915 1916 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1917 .make_group = nvmet_ana_groups_make_group, 1918 }; 1919 1920 static const struct config_item_type nvmet_ana_groups_type = { 1921 .ct_group_ops = &nvmet_ana_groups_group_ops, 1922 .ct_owner = THIS_MODULE, 1923 }; 1924 1925 /* 1926 * Ports definitions. 1927 */ 1928 static void nvmet_port_release(struct config_item *item) 1929 { 1930 struct nvmet_port *port = to_nvmet_port(item); 1931 1932 /* Let inflight controllers teardown complete */ 1933 flush_workqueue(nvmet_wq); 1934 list_del(&port->global_entry); 1935 1936 key_put(port->keyring); 1937 kfree(port->ana_state); 1938 kfree(port); 1939 } 1940 1941 static struct configfs_attribute *nvmet_port_attrs[] = { 1942 &nvmet_attr_addr_adrfam, 1943 &nvmet_attr_addr_treq, 1944 &nvmet_attr_addr_traddr, 1945 &nvmet_attr_addr_trsvcid, 1946 &nvmet_attr_addr_trtype, 1947 &nvmet_attr_addr_tsas, 1948 &nvmet_attr_param_inline_data_size, 1949 &nvmet_attr_param_max_queue_size, 1950 #ifdef CONFIG_BLK_DEV_INTEGRITY 1951 &nvmet_attr_param_pi_enable, 1952 #endif 1953 NULL, 1954 }; 1955 1956 static struct configfs_item_operations nvmet_port_item_ops = { 1957 .release = nvmet_port_release, 1958 }; 1959 1960 static const struct config_item_type nvmet_port_type = { 1961 .ct_attrs = nvmet_port_attrs, 1962 .ct_item_ops = &nvmet_port_item_ops, 1963 .ct_owner = THIS_MODULE, 1964 }; 1965 1966 static struct config_group *nvmet_ports_make(struct config_group *group, 1967 const char *name) 1968 { 1969 struct nvmet_port *port; 1970 u16 portid; 1971 u32 i; 1972 1973 if (kstrtou16(name, 0, &portid)) 1974 return ERR_PTR(-EINVAL); 1975 1976 port = kzalloc(sizeof(*port), GFP_KERNEL); 1977 if (!port) 1978 return ERR_PTR(-ENOMEM); 1979 1980 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 1981 sizeof(*port->ana_state), GFP_KERNEL); 1982 if (!port->ana_state) { 1983 kfree(port); 1984 return ERR_PTR(-ENOMEM); 1985 } 1986 1987 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) { 1988 port->keyring = key_lookup(nvme_keyring_id()); 1989 if (IS_ERR(port->keyring)) { 1990 pr_warn("NVMe keyring not available, disabling TLS\n"); 1991 port->keyring = NULL; 1992 } 1993 } 1994 1995 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 1996 if (i == NVMET_DEFAULT_ANA_GRPID) 1997 port->ana_state[1] = NVME_ANA_OPTIMIZED; 1998 else 1999 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 2000 } 2001 2002 list_add(&port->global_entry, &nvmet_ports_list); 2003 2004 INIT_LIST_HEAD(&port->entry); 2005 INIT_LIST_HEAD(&port->subsystems); 2006 INIT_LIST_HEAD(&port->referrals); 2007 port->inline_data_size = -1; /* < 0 == let the transport choose */ 2008 port->max_queue_size = -1; /* < 0 == let the transport choose */ 2009 2010 port->disc_addr.portid = cpu_to_le16(portid); 2011 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; 2012 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; 2013 config_group_init_type_name(&port->group, name, &nvmet_port_type); 2014 2015 config_group_init_type_name(&port->subsys_group, 2016 "subsystems", &nvmet_port_subsys_type); 2017 configfs_add_default_group(&port->subsys_group, &port->group); 2018 2019 config_group_init_type_name(&port->referrals_group, 2020 "referrals", &nvmet_referrals_type); 2021 configfs_add_default_group(&port->referrals_group, &port->group); 2022 2023 config_group_init_type_name(&port->ana_groups_group, 2024 "ana_groups", &nvmet_ana_groups_type); 2025 configfs_add_default_group(&port->ana_groups_group, &port->group); 2026 2027 port->ana_default_group.port = port; 2028 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 2029 config_group_init_type_name(&port->ana_default_group.group, 2030 __stringify(NVMET_DEFAULT_ANA_GRPID), 2031 &nvmet_ana_group_type); 2032 configfs_add_default_group(&port->ana_default_group.group, 2033 &port->ana_groups_group); 2034 2035 return &port->group; 2036 } 2037 2038 static struct configfs_group_operations nvmet_ports_group_ops = { 2039 .make_group = nvmet_ports_make, 2040 }; 2041 2042 static const struct config_item_type nvmet_ports_type = { 2043 .ct_group_ops = &nvmet_ports_group_ops, 2044 .ct_owner = THIS_MODULE, 2045 }; 2046 2047 static struct config_group nvmet_subsystems_group; 2048 static struct config_group nvmet_ports_group; 2049 2050 #ifdef CONFIG_NVME_TARGET_AUTH 2051 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, 2052 char *page) 2053 { 2054 u8 *dhchap_secret; 2055 ssize_t ret; 2056 2057 down_read(&nvmet_config_sem); 2058 dhchap_secret = to_host(item)->dhchap_secret; 2059 if (!dhchap_secret) 2060 ret = sprintf(page, "\n"); 2061 else 2062 ret = sprintf(page, "%s\n", dhchap_secret); 2063 up_read(&nvmet_config_sem); 2064 return ret; 2065 } 2066 2067 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, 2068 const char *page, size_t count) 2069 { 2070 struct nvmet_host *host = to_host(item); 2071 int ret; 2072 2073 ret = nvmet_auth_set_key(host, page, false); 2074 /* 2075 * Re-authentication is a soft state, so keep the 2076 * current authentication valid until the host 2077 * requests re-authentication. 2078 */ 2079 return ret < 0 ? ret : count; 2080 } 2081 2082 CONFIGFS_ATTR(nvmet_host_, dhchap_key); 2083 2084 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, 2085 char *page) 2086 { 2087 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; 2088 ssize_t ret; 2089 2090 down_read(&nvmet_config_sem); 2091 dhchap_secret = to_host(item)->dhchap_ctrl_secret; 2092 if (!dhchap_secret) 2093 ret = sprintf(page, "\n"); 2094 else 2095 ret = sprintf(page, "%s\n", dhchap_secret); 2096 up_read(&nvmet_config_sem); 2097 return ret; 2098 } 2099 2100 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, 2101 const char *page, size_t count) 2102 { 2103 struct nvmet_host *host = to_host(item); 2104 int ret; 2105 2106 ret = nvmet_auth_set_key(host, page, true); 2107 /* 2108 * Re-authentication is a soft state, so keep the 2109 * current authentication valid until the host 2110 * requests re-authentication. 2111 */ 2112 return ret < 0 ? ret : count; 2113 } 2114 2115 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key); 2116 2117 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item, 2118 char *page) 2119 { 2120 struct nvmet_host *host = to_host(item); 2121 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); 2122 2123 return sprintf(page, "%s\n", hash_name ? hash_name : "none"); 2124 } 2125 2126 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item, 2127 const char *page, size_t count) 2128 { 2129 struct nvmet_host *host = to_host(item); 2130 u8 hmac_id; 2131 2132 hmac_id = nvme_auth_hmac_id(page); 2133 if (hmac_id == NVME_AUTH_HASH_INVALID) 2134 return -EINVAL; 2135 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0)) 2136 return -ENOTSUPP; 2137 host->dhchap_hash_id = hmac_id; 2138 return count; 2139 } 2140 2141 CONFIGFS_ATTR(nvmet_host_, dhchap_hash); 2142 2143 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item, 2144 char *page) 2145 { 2146 struct nvmet_host *host = to_host(item); 2147 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id); 2148 2149 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none"); 2150 } 2151 2152 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item, 2153 const char *page, size_t count) 2154 { 2155 struct nvmet_host *host = to_host(item); 2156 int dhgroup_id; 2157 2158 dhgroup_id = nvme_auth_dhgroup_id(page); 2159 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID) 2160 return -EINVAL; 2161 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) { 2162 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id); 2163 2164 if (!crypto_has_kpp(kpp, 0, 0)) 2165 return -EINVAL; 2166 } 2167 host->dhchap_dhgroup_id = dhgroup_id; 2168 return count; 2169 } 2170 2171 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup); 2172 2173 static struct configfs_attribute *nvmet_host_attrs[] = { 2174 &nvmet_host_attr_dhchap_key, 2175 &nvmet_host_attr_dhchap_ctrl_key, 2176 &nvmet_host_attr_dhchap_hash, 2177 &nvmet_host_attr_dhchap_dhgroup, 2178 NULL, 2179 }; 2180 #endif /* CONFIG_NVME_TARGET_AUTH */ 2181 2182 static void nvmet_host_release(struct config_item *item) 2183 { 2184 struct nvmet_host *host = to_host(item); 2185 2186 #ifdef CONFIG_NVME_TARGET_AUTH 2187 kfree(host->dhchap_secret); 2188 kfree(host->dhchap_ctrl_secret); 2189 #endif 2190 kfree(host); 2191 } 2192 2193 static struct configfs_item_operations nvmet_host_item_ops = { 2194 .release = nvmet_host_release, 2195 }; 2196 2197 static const struct config_item_type nvmet_host_type = { 2198 .ct_item_ops = &nvmet_host_item_ops, 2199 #ifdef CONFIG_NVME_TARGET_AUTH 2200 .ct_attrs = nvmet_host_attrs, 2201 #endif 2202 .ct_owner = THIS_MODULE, 2203 }; 2204 2205 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 2206 const char *name) 2207 { 2208 struct nvmet_host *host; 2209 2210 host = kzalloc(sizeof(*host), GFP_KERNEL); 2211 if (!host) 2212 return ERR_PTR(-ENOMEM); 2213 2214 #ifdef CONFIG_NVME_TARGET_AUTH 2215 /* Default to SHA256 */ 2216 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256; 2217 #endif 2218 2219 config_group_init_type_name(&host->group, name, &nvmet_host_type); 2220 2221 return &host->group; 2222 } 2223 2224 static struct configfs_group_operations nvmet_hosts_group_ops = { 2225 .make_group = nvmet_hosts_make_group, 2226 }; 2227 2228 static const struct config_item_type nvmet_hosts_type = { 2229 .ct_group_ops = &nvmet_hosts_group_ops, 2230 .ct_owner = THIS_MODULE, 2231 }; 2232 2233 static struct config_group nvmet_hosts_group; 2234 2235 static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item, 2236 char *page) 2237 { 2238 return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn); 2239 } 2240 2241 static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, 2242 const char *page, size_t count) 2243 { 2244 struct list_head *entry; 2245 char *old_nqn, *new_nqn; 2246 size_t len; 2247 2248 len = strcspn(page, "\n"); 2249 if (!len || len > NVMF_NQN_FIELD_LEN - 1) 2250 return -EINVAL; 2251 2252 new_nqn = kstrndup(page, len, GFP_KERNEL); 2253 if (!new_nqn) 2254 return -ENOMEM; 2255 2256 down_write(&nvmet_config_sem); 2257 list_for_each(entry, &nvmet_subsystems_group.cg_children) { 2258 struct config_item *item = 2259 container_of(entry, struct config_item, ci_entry); 2260 2261 if (!strncmp(config_item_name(item), page, len)) { 2262 pr_err("duplicate NQN %s\n", config_item_name(item)); 2263 up_write(&nvmet_config_sem); 2264 kfree(new_nqn); 2265 return -EINVAL; 2266 } 2267 } 2268 old_nqn = nvmet_disc_subsys->subsysnqn; 2269 nvmet_disc_subsys->subsysnqn = new_nqn; 2270 up_write(&nvmet_config_sem); 2271 2272 kfree(old_nqn); 2273 return len; 2274 } 2275 2276 CONFIGFS_ATTR(nvmet_root_, discovery_nqn); 2277 2278 static struct configfs_attribute *nvmet_root_attrs[] = { 2279 &nvmet_root_attr_discovery_nqn, 2280 NULL, 2281 }; 2282 2283 static const struct config_item_type nvmet_root_type = { 2284 .ct_attrs = nvmet_root_attrs, 2285 .ct_owner = THIS_MODULE, 2286 }; 2287 2288 static struct configfs_subsystem nvmet_configfs_subsystem = { 2289 .su_group = { 2290 .cg_item = { 2291 .ci_namebuf = "nvmet", 2292 .ci_type = &nvmet_root_type, 2293 }, 2294 }, 2295 }; 2296 2297 int __init nvmet_init_configfs(void) 2298 { 2299 int ret; 2300 2301 config_group_init(&nvmet_configfs_subsystem.su_group); 2302 mutex_init(&nvmet_configfs_subsystem.su_mutex); 2303 2304 config_group_init_type_name(&nvmet_subsystems_group, 2305 "subsystems", &nvmet_subsystems_type); 2306 configfs_add_default_group(&nvmet_subsystems_group, 2307 &nvmet_configfs_subsystem.su_group); 2308 2309 config_group_init_type_name(&nvmet_ports_group, 2310 "ports", &nvmet_ports_type); 2311 configfs_add_default_group(&nvmet_ports_group, 2312 &nvmet_configfs_subsystem.su_group); 2313 2314 config_group_init_type_name(&nvmet_hosts_group, 2315 "hosts", &nvmet_hosts_type); 2316 configfs_add_default_group(&nvmet_hosts_group, 2317 &nvmet_configfs_subsystem.su_group); 2318 2319 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 2320 if (ret) { 2321 pr_err("configfs_register_subsystem: %d\n", ret); 2322 return ret; 2323 } 2324 2325 return 0; 2326 } 2327 2328 void __exit nvmet_exit_configfs(void) 2329 { 2330 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 2331 } 2332