1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Configfs interface for the NVMe target. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/kstrtox.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/stat.h> 12 #include <linux/ctype.h> 13 #include <linux/pci.h> 14 #include <linux/pci-p2pdma.h> 15 #ifdef CONFIG_NVME_TARGET_AUTH 16 #include <linux/nvme-auth.h> 17 #endif 18 #include <linux/nvme-keyring.h> 19 #include <crypto/hash.h> 20 #include <crypto/kpp.h> 21 #include <linux/nospec.h> 22 23 #include "nvmet.h" 24 25 static const struct config_item_type nvmet_host_type; 26 static const struct config_item_type nvmet_subsys_type; 27 28 static LIST_HEAD(nvmet_ports_list); 29 struct list_head *nvmet_ports = &nvmet_ports_list; 30 31 struct nvmet_type_name_map { 32 u8 type; 33 const char *name; 34 }; 35 36 static struct nvmet_type_name_map nvmet_transport[] = { 37 { NVMF_TRTYPE_RDMA, "rdma" }, 38 { NVMF_TRTYPE_FC, "fc" }, 39 { NVMF_TRTYPE_TCP, "tcp" }, 40 { NVMF_TRTYPE_PCI, "pci" }, 41 { NVMF_TRTYPE_LOOP, "loop" }, 42 }; 43 44 static const struct nvmet_type_name_map nvmet_addr_family[] = { 45 { NVMF_ADDR_FAMILY_PCI, "pcie" }, 46 { NVMF_ADDR_FAMILY_IP4, "ipv4" }, 47 { NVMF_ADDR_FAMILY_IP6, "ipv6" }, 48 { NVMF_ADDR_FAMILY_IB, "ib" }, 49 { NVMF_ADDR_FAMILY_FC, "fc" }, 50 { NVMF_ADDR_FAMILY_PCI, "pci" }, 51 { NVMF_ADDR_FAMILY_LOOP, "loop" }, 52 }; 53 54 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller) 55 { 56 if (p->enabled) 57 pr_err("Disable port '%u' before changing attribute in %s\n", 58 le16_to_cpu(p->disc_addr.portid), caller); 59 return p->enabled; 60 } 61 62 /* 63 * nvmet_port Generic ConfigFS definitions. 64 * Used in any place in the ConfigFS tree that refers to an address. 65 */ 66 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page) 67 { 68 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam; 69 int i; 70 71 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 72 if (nvmet_addr_family[i].type == adrfam) 73 return snprintf(page, PAGE_SIZE, "%s\n", 74 nvmet_addr_family[i].name); 75 } 76 77 return snprintf(page, PAGE_SIZE, "\n"); 78 } 79 80 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 81 const char *page, size_t count) 82 { 83 struct nvmet_port *port = to_nvmet_port(item); 84 int i; 85 86 if (nvmet_is_port_enabled(port, __func__)) 87 return -EACCES; 88 89 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 90 if (sysfs_streq(page, nvmet_addr_family[i].name)) 91 goto found; 92 } 93 94 pr_err("Invalid value '%s' for adrfam\n", page); 95 return -EINVAL; 96 97 found: 98 port->disc_addr.adrfam = nvmet_addr_family[i].type; 99 return count; 100 } 101 102 CONFIGFS_ATTR(nvmet_, addr_adrfam); 103 104 static ssize_t nvmet_addr_portid_show(struct config_item *item, 105 char *page) 106 { 107 __le16 portid = to_nvmet_port(item)->disc_addr.portid; 108 109 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid)); 110 } 111 112 static ssize_t nvmet_addr_portid_store(struct config_item *item, 113 const char *page, size_t count) 114 { 115 struct nvmet_port *port = to_nvmet_port(item); 116 u16 portid = 0; 117 118 if (kstrtou16(page, 0, &portid)) { 119 pr_err("Invalid value '%s' for portid\n", page); 120 return -EINVAL; 121 } 122 123 if (nvmet_is_port_enabled(port, __func__)) 124 return -EACCES; 125 126 port->disc_addr.portid = cpu_to_le16(portid); 127 return count; 128 } 129 130 CONFIGFS_ATTR(nvmet_, addr_portid); 131 132 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 133 char *page) 134 { 135 struct nvmet_port *port = to_nvmet_port(item); 136 137 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr); 138 } 139 140 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 141 const char *page, size_t count) 142 { 143 struct nvmet_port *port = to_nvmet_port(item); 144 145 if (count > NVMF_TRADDR_SIZE) { 146 pr_err("Invalid value '%s' for traddr\n", page); 147 return -EINVAL; 148 } 149 150 if (nvmet_is_port_enabled(port, __func__)) 151 return -EACCES; 152 153 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 154 return -EINVAL; 155 return count; 156 } 157 158 CONFIGFS_ATTR(nvmet_, addr_traddr); 159 160 static const struct nvmet_type_name_map nvmet_addr_treq[] = { 161 { NVMF_TREQ_NOT_SPECIFIED, "not specified" }, 162 { NVMF_TREQ_REQUIRED, "required" }, 163 { NVMF_TREQ_NOT_REQUIRED, "not required" }, 164 }; 165 166 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port) 167 { 168 return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK); 169 } 170 171 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) 172 { 173 u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item)); 174 int i; 175 176 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 177 if (treq == nvmet_addr_treq[i].type) 178 return snprintf(page, PAGE_SIZE, "%s\n", 179 nvmet_addr_treq[i].name); 180 } 181 182 return snprintf(page, PAGE_SIZE, "\n"); 183 } 184 185 static ssize_t nvmet_addr_treq_store(struct config_item *item, 186 const char *page, size_t count) 187 { 188 struct nvmet_port *port = to_nvmet_port(item); 189 u8 treq = nvmet_port_disc_addr_treq_mask(port); 190 int i; 191 192 if (nvmet_is_port_enabled(port, __func__)) 193 return -EACCES; 194 195 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 196 if (sysfs_streq(page, nvmet_addr_treq[i].name)) 197 goto found; 198 } 199 200 pr_err("Invalid value '%s' for treq\n", page); 201 return -EINVAL; 202 203 found: 204 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP && 205 port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) { 206 switch (nvmet_addr_treq[i].type) { 207 case NVMF_TREQ_NOT_SPECIFIED: 208 pr_debug("treq '%s' not allowed for TLS1.3\n", 209 nvmet_addr_treq[i].name); 210 return -EINVAL; 211 case NVMF_TREQ_NOT_REQUIRED: 212 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n"); 213 break; 214 default: 215 break; 216 } 217 } 218 treq |= nvmet_addr_treq[i].type; 219 port->disc_addr.treq = treq; 220 return count; 221 } 222 223 CONFIGFS_ATTR(nvmet_, addr_treq); 224 225 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 226 char *page) 227 { 228 struct nvmet_port *port = to_nvmet_port(item); 229 230 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid); 231 } 232 233 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 234 const char *page, size_t count) 235 { 236 struct nvmet_port *port = to_nvmet_port(item); 237 238 if (count > NVMF_TRSVCID_SIZE) { 239 pr_err("Invalid value '%s' for trsvcid\n", page); 240 return -EINVAL; 241 } 242 if (nvmet_is_port_enabled(port, __func__)) 243 return -EACCES; 244 245 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 246 return -EINVAL; 247 return count; 248 } 249 250 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 251 252 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 253 char *page) 254 { 255 struct nvmet_port *port = to_nvmet_port(item); 256 257 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 258 } 259 260 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 261 const char *page, size_t count) 262 { 263 struct nvmet_port *port = to_nvmet_port(item); 264 int ret; 265 266 if (nvmet_is_port_enabled(port, __func__)) 267 return -EACCES; 268 ret = kstrtoint(page, 0, &port->inline_data_size); 269 if (ret) { 270 pr_err("Invalid value '%s' for inline_data_size\n", page); 271 return -EINVAL; 272 } 273 return count; 274 } 275 276 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 277 278 static ssize_t nvmet_param_max_queue_size_show(struct config_item *item, 279 char *page) 280 { 281 struct nvmet_port *port = to_nvmet_port(item); 282 283 return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size); 284 } 285 286 static ssize_t nvmet_param_max_queue_size_store(struct config_item *item, 287 const char *page, size_t count) 288 { 289 struct nvmet_port *port = to_nvmet_port(item); 290 int ret; 291 292 if (nvmet_is_port_enabled(port, __func__)) 293 return -EACCES; 294 ret = kstrtoint(page, 0, &port->max_queue_size); 295 if (ret) { 296 pr_err("Invalid value '%s' for max_queue_size\n", page); 297 return -EINVAL; 298 } 299 return count; 300 } 301 302 CONFIGFS_ATTR(nvmet_, param_max_queue_size); 303 304 #ifdef CONFIG_BLK_DEV_INTEGRITY 305 static ssize_t nvmet_param_pi_enable_show(struct config_item *item, 306 char *page) 307 { 308 struct nvmet_port *port = to_nvmet_port(item); 309 310 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable); 311 } 312 313 static ssize_t nvmet_param_pi_enable_store(struct config_item *item, 314 const char *page, size_t count) 315 { 316 struct nvmet_port *port = to_nvmet_port(item); 317 bool val; 318 319 if (kstrtobool(page, &val)) 320 return -EINVAL; 321 322 if (nvmet_is_port_enabled(port, __func__)) 323 return -EACCES; 324 325 port->pi_enable = val; 326 return count; 327 } 328 329 CONFIGFS_ATTR(nvmet_, param_pi_enable); 330 #endif 331 332 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 333 char *page) 334 { 335 struct nvmet_port *port = to_nvmet_port(item); 336 int i; 337 338 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 339 if (port->disc_addr.trtype == nvmet_transport[i].type) 340 return snprintf(page, PAGE_SIZE, 341 "%s\n", nvmet_transport[i].name); 342 } 343 344 return sprintf(page, "\n"); 345 } 346 347 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 348 { 349 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 350 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 351 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 352 } 353 354 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype) 355 { 356 port->disc_addr.tsas.tcp.sectype = sectype; 357 } 358 359 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 360 const char *page, size_t count) 361 { 362 struct nvmet_port *port = to_nvmet_port(item); 363 int i; 364 365 if (nvmet_is_port_enabled(port, __func__)) 366 return -EACCES; 367 368 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 369 if (sysfs_streq(page, nvmet_transport[i].name)) 370 goto found; 371 } 372 373 pr_err("Invalid value '%s' for trtype\n", page); 374 return -EINVAL; 375 376 found: 377 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 378 port->disc_addr.trtype = nvmet_transport[i].type; 379 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 380 nvmet_port_init_tsas_rdma(port); 381 else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) 382 nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE); 383 return count; 384 } 385 386 CONFIGFS_ATTR(nvmet_, addr_trtype); 387 388 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = { 389 { NVMF_TCP_SECTYPE_NONE, "none" }, 390 { NVMF_TCP_SECTYPE_TLS13, "tls1.3" }, 391 }; 392 393 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = { 394 { NVMF_RDMA_QPTYPE_CONNECTED, "connected" }, 395 { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" }, 396 }; 397 398 static ssize_t nvmet_addr_tsas_show(struct config_item *item, 399 char *page) 400 { 401 struct nvmet_port *port = to_nvmet_port(item); 402 int i; 403 404 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { 405 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { 406 if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type) 407 return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name); 408 } 409 } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { 410 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { 411 if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type) 412 return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name); 413 } 414 } 415 return sprintf(page, "\n"); 416 } 417 418 static u8 nvmet_addr_tsas_rdma_store(const char *page) 419 { 420 int i; 421 422 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { 423 if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name)) 424 return nvmet_addr_tsas_rdma[i].type; 425 } 426 return NVMF_RDMA_QPTYPE_INVALID; 427 } 428 429 static u8 nvmet_addr_tsas_tcp_store(const char *page) 430 { 431 int i; 432 433 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { 434 if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) 435 return nvmet_addr_tsas_tcp[i].type; 436 } 437 return NVMF_TCP_SECTYPE_INVALID; 438 } 439 440 static ssize_t nvmet_addr_tsas_store(struct config_item *item, 441 const char *page, size_t count) 442 { 443 struct nvmet_port *port = to_nvmet_port(item); 444 u8 treq = nvmet_port_disc_addr_treq_mask(port); 445 u8 sectype, qptype; 446 447 if (nvmet_is_port_enabled(port, __func__)) 448 return -EACCES; 449 450 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { 451 qptype = nvmet_addr_tsas_rdma_store(page); 452 if (qptype == port->disc_addr.tsas.rdma.qptype) 453 return count; 454 } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { 455 sectype = nvmet_addr_tsas_tcp_store(page); 456 if (sectype != NVMF_TCP_SECTYPE_INVALID) 457 goto found; 458 } 459 460 pr_err("Invalid value '%s' for tsas\n", page); 461 return -EINVAL; 462 463 found: 464 if (sectype == NVMF_TCP_SECTYPE_TLS13) { 465 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) { 466 pr_err("TLS is not supported\n"); 467 return -EINVAL; 468 } 469 if (!port->keyring) { 470 pr_err("TLS keyring not configured\n"); 471 return -EINVAL; 472 } 473 } 474 475 nvmet_port_init_tsas_tcp(port, sectype); 476 /* 477 * If TLS is enabled TREQ should be set to 'required' per default 478 */ 479 if (sectype == NVMF_TCP_SECTYPE_TLS13) { 480 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port); 481 482 if (sc == NVMF_TREQ_NOT_SPECIFIED) 483 treq |= NVMF_TREQ_REQUIRED; 484 else 485 treq |= sc; 486 } else { 487 treq |= NVMF_TREQ_NOT_SPECIFIED; 488 } 489 port->disc_addr.treq = treq; 490 return count; 491 } 492 493 CONFIGFS_ATTR(nvmet_, addr_tsas); 494 495 /* 496 * Namespace structures & file operation functions below 497 */ 498 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 499 { 500 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 501 } 502 503 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 504 const char *page, size_t count) 505 { 506 struct nvmet_ns *ns = to_nvmet_ns(item); 507 struct nvmet_subsys *subsys = ns->subsys; 508 size_t len; 509 int ret; 510 511 mutex_lock(&subsys->lock); 512 ret = -EBUSY; 513 if (ns->enabled) 514 goto out_unlock; 515 516 ret = -EINVAL; 517 len = strcspn(page, "\n"); 518 if (!len) 519 goto out_unlock; 520 521 kfree(ns->device_path); 522 ret = -ENOMEM; 523 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL); 524 if (!ns->device_path) 525 goto out_unlock; 526 527 mutex_unlock(&subsys->lock); 528 return count; 529 530 out_unlock: 531 mutex_unlock(&subsys->lock); 532 return ret; 533 } 534 535 CONFIGFS_ATTR(nvmet_ns_, device_path); 536 537 #ifdef CONFIG_PCI_P2PDMA 538 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) 539 { 540 struct nvmet_ns *ns = to_nvmet_ns(item); 541 542 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); 543 } 544 545 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, 546 const char *page, size_t count) 547 { 548 struct nvmet_ns *ns = to_nvmet_ns(item); 549 struct pci_dev *p2p_dev = NULL; 550 bool use_p2pmem; 551 int ret = count; 552 int error; 553 554 mutex_lock(&ns->subsys->lock); 555 if (ns->enabled) { 556 ret = -EBUSY; 557 goto out_unlock; 558 } 559 560 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); 561 if (error) { 562 ret = error; 563 goto out_unlock; 564 } 565 566 ns->use_p2pmem = use_p2pmem; 567 pci_dev_put(ns->p2p_dev); 568 ns->p2p_dev = p2p_dev; 569 570 out_unlock: 571 mutex_unlock(&ns->subsys->lock); 572 573 return ret; 574 } 575 576 CONFIGFS_ATTR(nvmet_ns_, p2pmem); 577 #endif /* CONFIG_PCI_P2PDMA */ 578 579 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 580 { 581 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 582 } 583 584 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 585 const char *page, size_t count) 586 { 587 struct nvmet_ns *ns = to_nvmet_ns(item); 588 struct nvmet_subsys *subsys = ns->subsys; 589 int ret = 0; 590 591 mutex_lock(&subsys->lock); 592 if (ns->enabled) { 593 ret = -EBUSY; 594 goto out_unlock; 595 } 596 597 if (uuid_parse(page, &ns->uuid)) 598 ret = -EINVAL; 599 600 out_unlock: 601 mutex_unlock(&subsys->lock); 602 return ret ? ret : count; 603 } 604 605 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 606 607 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 608 { 609 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 610 } 611 612 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 613 const char *page, size_t count) 614 { 615 struct nvmet_ns *ns = to_nvmet_ns(item); 616 struct nvmet_subsys *subsys = ns->subsys; 617 u8 nguid[16]; 618 const char *p = page; 619 int i; 620 int ret = 0; 621 622 mutex_lock(&subsys->lock); 623 if (ns->enabled) { 624 ret = -EBUSY; 625 goto out_unlock; 626 } 627 628 for (i = 0; i < 16; i++) { 629 if (p + 2 > page + count) { 630 ret = -EINVAL; 631 goto out_unlock; 632 } 633 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 634 ret = -EINVAL; 635 goto out_unlock; 636 } 637 638 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 639 p += 2; 640 641 if (*p == '-' || *p == ':') 642 p++; 643 } 644 645 memcpy(&ns->nguid, nguid, sizeof(nguid)); 646 out_unlock: 647 mutex_unlock(&subsys->lock); 648 return ret ? ret : count; 649 } 650 651 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 652 653 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 654 { 655 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 656 } 657 658 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 659 const char *page, size_t count) 660 { 661 struct nvmet_ns *ns = to_nvmet_ns(item); 662 u32 oldgrpid, newgrpid; 663 int ret; 664 665 ret = kstrtou32(page, 0, &newgrpid); 666 if (ret) 667 return ret; 668 669 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 670 return -EINVAL; 671 672 down_write(&nvmet_ana_sem); 673 oldgrpid = ns->anagrpid; 674 newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS); 675 nvmet_ana_group_enabled[newgrpid]++; 676 ns->anagrpid = newgrpid; 677 nvmet_ana_group_enabled[oldgrpid]--; 678 nvmet_ana_chgcnt++; 679 up_write(&nvmet_ana_sem); 680 681 nvmet_send_ana_event(ns->subsys, NULL); 682 return count; 683 } 684 685 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 686 687 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 688 { 689 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 690 } 691 692 static ssize_t nvmet_ns_enable_store(struct config_item *item, 693 const char *page, size_t count) 694 { 695 struct nvmet_ns *ns = to_nvmet_ns(item); 696 bool enable; 697 int ret = 0; 698 699 if (kstrtobool(page, &enable)) 700 return -EINVAL; 701 702 /* 703 * take a global nvmet_config_sem because the disable routine has a 704 * window where it releases the subsys-lock, giving a chance to 705 * a parallel enable to concurrently execute causing the disable to 706 * have a misaccounting of the ns percpu_ref. 707 */ 708 down_write(&nvmet_config_sem); 709 if (enable) 710 ret = nvmet_ns_enable(ns); 711 else 712 nvmet_ns_disable(ns); 713 up_write(&nvmet_config_sem); 714 715 return ret ? ret : count; 716 } 717 718 CONFIGFS_ATTR(nvmet_ns_, enable); 719 720 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 721 { 722 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 723 } 724 725 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 726 const char *page, size_t count) 727 { 728 struct nvmet_ns *ns = to_nvmet_ns(item); 729 bool val; 730 731 if (kstrtobool(page, &val)) 732 return -EINVAL; 733 734 mutex_lock(&ns->subsys->lock); 735 if (ns->enabled) { 736 pr_err("disable ns before setting buffered_io value.\n"); 737 mutex_unlock(&ns->subsys->lock); 738 return -EINVAL; 739 } 740 741 ns->buffered_io = val; 742 mutex_unlock(&ns->subsys->lock); 743 return count; 744 } 745 746 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 747 748 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, 749 const char *page, size_t count) 750 { 751 struct nvmet_ns *ns = to_nvmet_ns(item); 752 bool val; 753 754 if (kstrtobool(page, &val)) 755 return -EINVAL; 756 757 if (!val) 758 return -EINVAL; 759 760 mutex_lock(&ns->subsys->lock); 761 if (!ns->enabled) { 762 pr_err("enable ns before revalidate.\n"); 763 mutex_unlock(&ns->subsys->lock); 764 return -EINVAL; 765 } 766 if (nvmet_ns_revalidate(ns)) 767 nvmet_ns_changed(ns->subsys, ns->nsid); 768 mutex_unlock(&ns->subsys->lock); 769 return count; 770 } 771 772 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); 773 774 static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page) 775 { 776 return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable); 777 } 778 779 static ssize_t nvmet_ns_resv_enable_store(struct config_item *item, 780 const char *page, size_t count) 781 { 782 struct nvmet_ns *ns = to_nvmet_ns(item); 783 bool val; 784 785 if (kstrtobool(page, &val)) 786 return -EINVAL; 787 788 mutex_lock(&ns->subsys->lock); 789 if (ns->enabled) { 790 pr_err("the ns:%d is already enabled.\n", ns->nsid); 791 mutex_unlock(&ns->subsys->lock); 792 return -EINVAL; 793 } 794 ns->pr.enable = val; 795 mutex_unlock(&ns->subsys->lock); 796 return count; 797 } 798 CONFIGFS_ATTR(nvmet_ns_, resv_enable); 799 800 static struct configfs_attribute *nvmet_ns_attrs[] = { 801 &nvmet_ns_attr_device_path, 802 &nvmet_ns_attr_device_nguid, 803 &nvmet_ns_attr_device_uuid, 804 &nvmet_ns_attr_ana_grpid, 805 &nvmet_ns_attr_enable, 806 &nvmet_ns_attr_buffered_io, 807 &nvmet_ns_attr_revalidate_size, 808 &nvmet_ns_attr_resv_enable, 809 #ifdef CONFIG_PCI_P2PDMA 810 &nvmet_ns_attr_p2pmem, 811 #endif 812 NULL, 813 }; 814 815 static void nvmet_ns_release(struct config_item *item) 816 { 817 struct nvmet_ns *ns = to_nvmet_ns(item); 818 819 nvmet_ns_free(ns); 820 } 821 822 static struct configfs_item_operations nvmet_ns_item_ops = { 823 .release = nvmet_ns_release, 824 }; 825 826 static const struct config_item_type nvmet_ns_type = { 827 .ct_item_ops = &nvmet_ns_item_ops, 828 .ct_attrs = nvmet_ns_attrs, 829 .ct_owner = THIS_MODULE, 830 }; 831 832 static struct config_group *nvmet_ns_make(struct config_group *group, 833 const char *name) 834 { 835 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 836 struct nvmet_ns *ns; 837 int ret; 838 u32 nsid; 839 840 ret = kstrtou32(name, 0, &nsid); 841 if (ret) 842 goto out; 843 844 ret = -EINVAL; 845 if (nsid == 0 || nsid == NVME_NSID_ALL) { 846 pr_err("invalid nsid %#x", nsid); 847 goto out; 848 } 849 850 ret = -ENOMEM; 851 ns = nvmet_ns_alloc(subsys, nsid); 852 if (!ns) 853 goto out; 854 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 855 856 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 857 858 return &ns->group; 859 out: 860 return ERR_PTR(ret); 861 } 862 863 static struct configfs_group_operations nvmet_namespaces_group_ops = { 864 .make_group = nvmet_ns_make, 865 }; 866 867 static const struct config_item_type nvmet_namespaces_type = { 868 .ct_group_ops = &nvmet_namespaces_group_ops, 869 .ct_owner = THIS_MODULE, 870 }; 871 872 #ifdef CONFIG_NVME_TARGET_PASSTHRU 873 874 static ssize_t nvmet_passthru_device_path_show(struct config_item *item, 875 char *page) 876 { 877 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 878 879 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path); 880 } 881 882 static ssize_t nvmet_passthru_device_path_store(struct config_item *item, 883 const char *page, size_t count) 884 { 885 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 886 size_t len; 887 int ret; 888 889 mutex_lock(&subsys->lock); 890 891 ret = -EBUSY; 892 if (subsys->passthru_ctrl) 893 goto out_unlock; 894 895 ret = -EINVAL; 896 len = strcspn(page, "\n"); 897 if (!len) 898 goto out_unlock; 899 900 kfree(subsys->passthru_ctrl_path); 901 ret = -ENOMEM; 902 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL); 903 if (!subsys->passthru_ctrl_path) 904 goto out_unlock; 905 906 mutex_unlock(&subsys->lock); 907 908 return count; 909 out_unlock: 910 mutex_unlock(&subsys->lock); 911 return ret; 912 } 913 CONFIGFS_ATTR(nvmet_passthru_, device_path); 914 915 static ssize_t nvmet_passthru_enable_show(struct config_item *item, 916 char *page) 917 { 918 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 919 920 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0); 921 } 922 923 static ssize_t nvmet_passthru_enable_store(struct config_item *item, 924 const char *page, size_t count) 925 { 926 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 927 bool enable; 928 int ret = 0; 929 930 if (kstrtobool(page, &enable)) 931 return -EINVAL; 932 933 if (enable) 934 ret = nvmet_passthru_ctrl_enable(subsys); 935 else 936 nvmet_passthru_ctrl_disable(subsys); 937 938 return ret ? ret : count; 939 } 940 CONFIGFS_ATTR(nvmet_passthru_, enable); 941 942 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item, 943 char *page) 944 { 945 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout); 946 } 947 948 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item, 949 const char *page, size_t count) 950 { 951 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 952 unsigned int timeout; 953 954 if (kstrtouint(page, 0, &timeout)) 955 return -EINVAL; 956 subsys->admin_timeout = timeout; 957 return count; 958 } 959 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout); 960 961 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item, 962 char *page) 963 { 964 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout); 965 } 966 967 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, 968 const char *page, size_t count) 969 { 970 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 971 unsigned int timeout; 972 973 if (kstrtouint(page, 0, &timeout)) 974 return -EINVAL; 975 subsys->io_timeout = timeout; 976 return count; 977 } 978 CONFIGFS_ATTR(nvmet_passthru_, io_timeout); 979 980 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item, 981 char *page) 982 { 983 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids); 984 } 985 986 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item, 987 const char *page, size_t count) 988 { 989 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 990 unsigned int clear_ids; 991 992 if (kstrtouint(page, 0, &clear_ids)) 993 return -EINVAL; 994 subsys->clear_ids = clear_ids; 995 return count; 996 } 997 CONFIGFS_ATTR(nvmet_passthru_, clear_ids); 998 999 static struct configfs_attribute *nvmet_passthru_attrs[] = { 1000 &nvmet_passthru_attr_device_path, 1001 &nvmet_passthru_attr_enable, 1002 &nvmet_passthru_attr_admin_timeout, 1003 &nvmet_passthru_attr_io_timeout, 1004 &nvmet_passthru_attr_clear_ids, 1005 NULL, 1006 }; 1007 1008 static const struct config_item_type nvmet_passthru_type = { 1009 .ct_attrs = nvmet_passthru_attrs, 1010 .ct_owner = THIS_MODULE, 1011 }; 1012 1013 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 1014 { 1015 config_group_init_type_name(&subsys->passthru_group, 1016 "passthru", &nvmet_passthru_type); 1017 configfs_add_default_group(&subsys->passthru_group, 1018 &subsys->group); 1019 } 1020 1021 #else /* CONFIG_NVME_TARGET_PASSTHRU */ 1022 1023 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 1024 { 1025 } 1026 1027 #endif /* CONFIG_NVME_TARGET_PASSTHRU */ 1028 1029 static int nvmet_port_subsys_allow_link(struct config_item *parent, 1030 struct config_item *target) 1031 { 1032 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 1033 struct nvmet_subsys *subsys; 1034 struct nvmet_subsys_link *link, *p; 1035 int ret; 1036 1037 if (target->ci_type != &nvmet_subsys_type) { 1038 pr_err("can only link subsystems into the subsystems dir.!\n"); 1039 return -EINVAL; 1040 } 1041 subsys = to_subsys(target); 1042 link = kmalloc(sizeof(*link), GFP_KERNEL); 1043 if (!link) 1044 return -ENOMEM; 1045 link->subsys = subsys; 1046 1047 down_write(&nvmet_config_sem); 1048 ret = -EEXIST; 1049 list_for_each_entry(p, &port->subsystems, entry) { 1050 if (p->subsys == subsys) 1051 goto out_free_link; 1052 } 1053 1054 if (list_empty(&port->subsystems)) { 1055 ret = nvmet_enable_port(port); 1056 if (ret) 1057 goto out_free_link; 1058 } 1059 1060 list_add_tail(&link->entry, &port->subsystems); 1061 nvmet_port_disc_changed(port, subsys); 1062 1063 up_write(&nvmet_config_sem); 1064 return 0; 1065 1066 out_free_link: 1067 up_write(&nvmet_config_sem); 1068 kfree(link); 1069 return ret; 1070 } 1071 1072 static void nvmet_port_subsys_drop_link(struct config_item *parent, 1073 struct config_item *target) 1074 { 1075 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 1076 struct nvmet_subsys *subsys = to_subsys(target); 1077 struct nvmet_subsys_link *p; 1078 1079 down_write(&nvmet_config_sem); 1080 list_for_each_entry(p, &port->subsystems, entry) { 1081 if (p->subsys == subsys) 1082 goto found; 1083 } 1084 up_write(&nvmet_config_sem); 1085 return; 1086 1087 found: 1088 list_del(&p->entry); 1089 nvmet_port_del_ctrls(port, subsys); 1090 nvmet_port_disc_changed(port, subsys); 1091 1092 if (list_empty(&port->subsystems)) 1093 nvmet_disable_port(port); 1094 up_write(&nvmet_config_sem); 1095 kfree(p); 1096 } 1097 1098 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 1099 .allow_link = nvmet_port_subsys_allow_link, 1100 .drop_link = nvmet_port_subsys_drop_link, 1101 }; 1102 1103 static const struct config_item_type nvmet_port_subsys_type = { 1104 .ct_item_ops = &nvmet_port_subsys_item_ops, 1105 .ct_owner = THIS_MODULE, 1106 }; 1107 1108 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 1109 struct config_item *target) 1110 { 1111 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 1112 struct nvmet_host *host; 1113 struct nvmet_host_link *link, *p; 1114 int ret; 1115 1116 if (target->ci_type != &nvmet_host_type) { 1117 pr_err("can only link hosts into the allowed_hosts directory!\n"); 1118 return -EINVAL; 1119 } 1120 1121 host = to_host(target); 1122 link = kmalloc(sizeof(*link), GFP_KERNEL); 1123 if (!link) 1124 return -ENOMEM; 1125 link->host = host; 1126 1127 down_write(&nvmet_config_sem); 1128 ret = -EINVAL; 1129 if (subsys->allow_any_host) { 1130 pr_err("can't add hosts when allow_any_host is set!\n"); 1131 goto out_free_link; 1132 } 1133 1134 ret = -EEXIST; 1135 list_for_each_entry(p, &subsys->hosts, entry) { 1136 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 1137 goto out_free_link; 1138 } 1139 list_add_tail(&link->entry, &subsys->hosts); 1140 nvmet_subsys_disc_changed(subsys, host); 1141 1142 up_write(&nvmet_config_sem); 1143 return 0; 1144 out_free_link: 1145 up_write(&nvmet_config_sem); 1146 kfree(link); 1147 return ret; 1148 } 1149 1150 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 1151 struct config_item *target) 1152 { 1153 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 1154 struct nvmet_host *host = to_host(target); 1155 struct nvmet_host_link *p; 1156 1157 down_write(&nvmet_config_sem); 1158 list_for_each_entry(p, &subsys->hosts, entry) { 1159 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 1160 goto found; 1161 } 1162 up_write(&nvmet_config_sem); 1163 return; 1164 1165 found: 1166 list_del(&p->entry); 1167 nvmet_subsys_disc_changed(subsys, host); 1168 1169 up_write(&nvmet_config_sem); 1170 kfree(p); 1171 } 1172 1173 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 1174 .allow_link = nvmet_allowed_hosts_allow_link, 1175 .drop_link = nvmet_allowed_hosts_drop_link, 1176 }; 1177 1178 static const struct config_item_type nvmet_allowed_hosts_type = { 1179 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 1180 .ct_owner = THIS_MODULE, 1181 }; 1182 1183 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 1184 char *page) 1185 { 1186 return snprintf(page, PAGE_SIZE, "%d\n", 1187 to_subsys(item)->allow_any_host); 1188 } 1189 1190 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 1191 const char *page, size_t count) 1192 { 1193 struct nvmet_subsys *subsys = to_subsys(item); 1194 bool allow_any_host; 1195 int ret = 0; 1196 1197 if (kstrtobool(page, &allow_any_host)) 1198 return -EINVAL; 1199 1200 down_write(&nvmet_config_sem); 1201 if (allow_any_host && !list_empty(&subsys->hosts)) { 1202 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 1203 ret = -EINVAL; 1204 goto out_unlock; 1205 } 1206 1207 if (subsys->allow_any_host != allow_any_host) { 1208 subsys->allow_any_host = allow_any_host; 1209 nvmet_subsys_disc_changed(subsys, NULL); 1210 } 1211 1212 out_unlock: 1213 up_write(&nvmet_config_sem); 1214 return ret ? ret : count; 1215 } 1216 1217 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 1218 1219 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 1220 char *page) 1221 { 1222 struct nvmet_subsys *subsys = to_subsys(item); 1223 1224 if (NVME_TERTIARY(subsys->ver)) 1225 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n", 1226 NVME_MAJOR(subsys->ver), 1227 NVME_MINOR(subsys->ver), 1228 NVME_TERTIARY(subsys->ver)); 1229 1230 return snprintf(page, PAGE_SIZE, "%llu.%llu\n", 1231 NVME_MAJOR(subsys->ver), 1232 NVME_MINOR(subsys->ver)); 1233 } 1234 1235 static ssize_t 1236 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys, 1237 const char *page, size_t count) 1238 { 1239 int major, minor, tertiary = 0; 1240 int ret; 1241 1242 if (subsys->subsys_discovered) { 1243 if (NVME_TERTIARY(subsys->ver)) 1244 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n", 1245 NVME_MAJOR(subsys->ver), 1246 NVME_MINOR(subsys->ver), 1247 NVME_TERTIARY(subsys->ver)); 1248 else 1249 pr_err("Can't set version number. %llu.%llu is already assigned\n", 1250 NVME_MAJOR(subsys->ver), 1251 NVME_MINOR(subsys->ver)); 1252 return -EINVAL; 1253 } 1254 1255 /* passthru subsystems use the underlying controller's version */ 1256 if (nvmet_is_passthru_subsys(subsys)) 1257 return -EINVAL; 1258 1259 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 1260 if (ret != 2 && ret != 3) 1261 return -EINVAL; 1262 1263 subsys->ver = NVME_VS(major, minor, tertiary); 1264 1265 return count; 1266 } 1267 1268 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 1269 const char *page, size_t count) 1270 { 1271 struct nvmet_subsys *subsys = to_subsys(item); 1272 ssize_t ret; 1273 1274 down_write(&nvmet_config_sem); 1275 mutex_lock(&subsys->lock); 1276 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count); 1277 mutex_unlock(&subsys->lock); 1278 up_write(&nvmet_config_sem); 1279 1280 return ret; 1281 } 1282 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 1283 1284 /* See Section 1.5 of NVMe 1.4 */ 1285 static bool nvmet_is_ascii(const char c) 1286 { 1287 return c >= 0x20 && c <= 0x7e; 1288 } 1289 1290 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 1291 char *page) 1292 { 1293 struct nvmet_subsys *subsys = to_subsys(item); 1294 1295 return snprintf(page, PAGE_SIZE, "%.*s\n", 1296 NVMET_SN_MAX_SIZE, subsys->serial); 1297 } 1298 1299 static ssize_t 1300 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys, 1301 const char *page, size_t count) 1302 { 1303 int pos, len = strcspn(page, "\n"); 1304 1305 if (subsys->subsys_discovered) { 1306 pr_err("Can't set serial number. %s is already assigned\n", 1307 subsys->serial); 1308 return -EINVAL; 1309 } 1310 1311 if (!len || len > NVMET_SN_MAX_SIZE) { 1312 pr_err("Serial Number can not be empty or exceed %d Bytes\n", 1313 NVMET_SN_MAX_SIZE); 1314 return -EINVAL; 1315 } 1316 1317 for (pos = 0; pos < len; pos++) { 1318 if (!nvmet_is_ascii(page[pos])) { 1319 pr_err("Serial Number must contain only ASCII strings\n"); 1320 return -EINVAL; 1321 } 1322 } 1323 1324 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' '); 1325 1326 return count; 1327 } 1328 1329 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 1330 const char *page, size_t count) 1331 { 1332 struct nvmet_subsys *subsys = to_subsys(item); 1333 ssize_t ret; 1334 1335 down_write(&nvmet_config_sem); 1336 mutex_lock(&subsys->lock); 1337 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count); 1338 mutex_unlock(&subsys->lock); 1339 up_write(&nvmet_config_sem); 1340 1341 return ret; 1342 } 1343 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 1344 1345 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item, 1346 char *page) 1347 { 1348 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min); 1349 } 1350 1351 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, 1352 const char *page, size_t cnt) 1353 { 1354 u16 cntlid_min; 1355 1356 if (sscanf(page, "%hu\n", &cntlid_min) != 1) 1357 return -EINVAL; 1358 1359 if (cntlid_min == 0) 1360 return -EINVAL; 1361 1362 down_write(&nvmet_config_sem); 1363 if (cntlid_min > to_subsys(item)->cntlid_max) 1364 goto out_unlock; 1365 to_subsys(item)->cntlid_min = cntlid_min; 1366 up_write(&nvmet_config_sem); 1367 return cnt; 1368 1369 out_unlock: 1370 up_write(&nvmet_config_sem); 1371 return -EINVAL; 1372 } 1373 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min); 1374 1375 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item, 1376 char *page) 1377 { 1378 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max); 1379 } 1380 1381 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, 1382 const char *page, size_t cnt) 1383 { 1384 u16 cntlid_max; 1385 1386 if (sscanf(page, "%hu\n", &cntlid_max) != 1) 1387 return -EINVAL; 1388 1389 if (cntlid_max == 0) 1390 return -EINVAL; 1391 1392 down_write(&nvmet_config_sem); 1393 if (cntlid_max < to_subsys(item)->cntlid_min) 1394 goto out_unlock; 1395 to_subsys(item)->cntlid_max = cntlid_max; 1396 up_write(&nvmet_config_sem); 1397 return cnt; 1398 1399 out_unlock: 1400 up_write(&nvmet_config_sem); 1401 return -EINVAL; 1402 } 1403 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); 1404 1405 static ssize_t nvmet_subsys_attr_vendor_id_show(struct config_item *item, 1406 char *page) 1407 { 1408 return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id); 1409 } 1410 1411 static ssize_t nvmet_subsys_attr_vendor_id_store(struct config_item *item, 1412 const char *page, size_t count) 1413 { 1414 u16 vid; 1415 1416 if (kstrtou16(page, 0, &vid)) 1417 return -EINVAL; 1418 1419 down_write(&nvmet_config_sem); 1420 to_subsys(item)->vendor_id = vid; 1421 up_write(&nvmet_config_sem); 1422 return count; 1423 } 1424 CONFIGFS_ATTR(nvmet_subsys_, attr_vendor_id); 1425 1426 static ssize_t nvmet_subsys_attr_subsys_vendor_id_show(struct config_item *item, 1427 char *page) 1428 { 1429 return snprintf(page, PAGE_SIZE, "0x%x\n", 1430 to_subsys(item)->subsys_vendor_id); 1431 } 1432 1433 static ssize_t nvmet_subsys_attr_subsys_vendor_id_store(struct config_item *item, 1434 const char *page, size_t count) 1435 { 1436 u16 ssvid; 1437 1438 if (kstrtou16(page, 0, &ssvid)) 1439 return -EINVAL; 1440 1441 down_write(&nvmet_config_sem); 1442 to_subsys(item)->subsys_vendor_id = ssvid; 1443 up_write(&nvmet_config_sem); 1444 return count; 1445 } 1446 CONFIGFS_ATTR(nvmet_subsys_, attr_subsys_vendor_id); 1447 1448 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, 1449 char *page) 1450 { 1451 struct nvmet_subsys *subsys = to_subsys(item); 1452 1453 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number); 1454 } 1455 1456 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, 1457 const char *page, size_t count) 1458 { 1459 int pos = 0, len; 1460 char *val; 1461 1462 if (subsys->subsys_discovered) { 1463 pr_err("Can't set model number. %s is already assigned\n", 1464 subsys->model_number); 1465 return -EINVAL; 1466 } 1467 1468 len = strcspn(page, "\n"); 1469 if (!len) 1470 return -EINVAL; 1471 1472 if (len > NVMET_MN_MAX_SIZE) { 1473 pr_err("Model number size can not exceed %d Bytes\n", 1474 NVMET_MN_MAX_SIZE); 1475 return -EINVAL; 1476 } 1477 1478 for (pos = 0; pos < len; pos++) { 1479 if (!nvmet_is_ascii(page[pos])) 1480 return -EINVAL; 1481 } 1482 1483 val = kmemdup_nul(page, len, GFP_KERNEL); 1484 if (!val) 1485 return -ENOMEM; 1486 kfree(subsys->model_number); 1487 subsys->model_number = val; 1488 return count; 1489 } 1490 1491 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, 1492 const char *page, size_t count) 1493 { 1494 struct nvmet_subsys *subsys = to_subsys(item); 1495 ssize_t ret; 1496 1497 down_write(&nvmet_config_sem); 1498 mutex_lock(&subsys->lock); 1499 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count); 1500 mutex_unlock(&subsys->lock); 1501 up_write(&nvmet_config_sem); 1502 1503 return ret; 1504 } 1505 CONFIGFS_ATTR(nvmet_subsys_, attr_model); 1506 1507 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item, 1508 char *page) 1509 { 1510 struct nvmet_subsys *subsys = to_subsys(item); 1511 1512 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui); 1513 } 1514 1515 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys, 1516 const char *page, size_t count) 1517 { 1518 uint32_t val = 0; 1519 int ret; 1520 1521 if (subsys->subsys_discovered) { 1522 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n", 1523 subsys->ieee_oui); 1524 return -EINVAL; 1525 } 1526 1527 ret = kstrtou32(page, 0, &val); 1528 if (ret < 0) 1529 return ret; 1530 1531 if (val >= 0x1000000) 1532 return -EINVAL; 1533 1534 subsys->ieee_oui = val; 1535 1536 return count; 1537 } 1538 1539 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item, 1540 const char *page, size_t count) 1541 { 1542 struct nvmet_subsys *subsys = to_subsys(item); 1543 ssize_t ret; 1544 1545 down_write(&nvmet_config_sem); 1546 mutex_lock(&subsys->lock); 1547 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count); 1548 mutex_unlock(&subsys->lock); 1549 up_write(&nvmet_config_sem); 1550 1551 return ret; 1552 } 1553 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui); 1554 1555 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item, 1556 char *page) 1557 { 1558 struct nvmet_subsys *subsys = to_subsys(item); 1559 1560 return sysfs_emit(page, "%s\n", subsys->firmware_rev); 1561 } 1562 1563 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys, 1564 const char *page, size_t count) 1565 { 1566 int pos = 0, len; 1567 char *val; 1568 1569 if (subsys->subsys_discovered) { 1570 pr_err("Can't set firmware revision. %s is already assigned\n", 1571 subsys->firmware_rev); 1572 return -EINVAL; 1573 } 1574 1575 len = strcspn(page, "\n"); 1576 if (!len) 1577 return -EINVAL; 1578 1579 if (len > NVMET_FR_MAX_SIZE) { 1580 pr_err("Firmware revision size can not exceed %d Bytes\n", 1581 NVMET_FR_MAX_SIZE); 1582 return -EINVAL; 1583 } 1584 1585 for (pos = 0; pos < len; pos++) { 1586 if (!nvmet_is_ascii(page[pos])) 1587 return -EINVAL; 1588 } 1589 1590 val = kmemdup_nul(page, len, GFP_KERNEL); 1591 if (!val) 1592 return -ENOMEM; 1593 1594 kfree(subsys->firmware_rev); 1595 1596 subsys->firmware_rev = val; 1597 1598 return count; 1599 } 1600 1601 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item, 1602 const char *page, size_t count) 1603 { 1604 struct nvmet_subsys *subsys = to_subsys(item); 1605 ssize_t ret; 1606 1607 down_write(&nvmet_config_sem); 1608 mutex_lock(&subsys->lock); 1609 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count); 1610 mutex_unlock(&subsys->lock); 1611 up_write(&nvmet_config_sem); 1612 1613 return ret; 1614 } 1615 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware); 1616 1617 #ifdef CONFIG_BLK_DEV_INTEGRITY 1618 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, 1619 char *page) 1620 { 1621 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support); 1622 } 1623 1624 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item, 1625 const char *page, size_t count) 1626 { 1627 struct nvmet_subsys *subsys = to_subsys(item); 1628 bool pi_enable; 1629 1630 if (kstrtobool(page, &pi_enable)) 1631 return -EINVAL; 1632 1633 subsys->pi_support = pi_enable; 1634 return count; 1635 } 1636 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable); 1637 #endif 1638 1639 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, 1640 char *page) 1641 { 1642 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid); 1643 } 1644 1645 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, 1646 const char *page, size_t cnt) 1647 { 1648 struct nvmet_subsys *subsys = to_subsys(item); 1649 struct nvmet_ctrl *ctrl; 1650 u16 qid_max; 1651 1652 if (sscanf(page, "%hu\n", &qid_max) != 1) 1653 return -EINVAL; 1654 1655 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES) 1656 return -EINVAL; 1657 1658 down_write(&nvmet_config_sem); 1659 subsys->max_qid = qid_max; 1660 1661 /* Force reconnect */ 1662 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 1663 ctrl->ops->delete_ctrl(ctrl); 1664 up_write(&nvmet_config_sem); 1665 1666 return cnt; 1667 } 1668 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max); 1669 1670 static struct configfs_attribute *nvmet_subsys_attrs[] = { 1671 &nvmet_subsys_attr_attr_allow_any_host, 1672 &nvmet_subsys_attr_attr_version, 1673 &nvmet_subsys_attr_attr_serial, 1674 &nvmet_subsys_attr_attr_cntlid_min, 1675 &nvmet_subsys_attr_attr_cntlid_max, 1676 &nvmet_subsys_attr_attr_vendor_id, 1677 &nvmet_subsys_attr_attr_subsys_vendor_id, 1678 &nvmet_subsys_attr_attr_model, 1679 &nvmet_subsys_attr_attr_qid_max, 1680 &nvmet_subsys_attr_attr_ieee_oui, 1681 &nvmet_subsys_attr_attr_firmware, 1682 #ifdef CONFIG_BLK_DEV_INTEGRITY 1683 &nvmet_subsys_attr_attr_pi_enable, 1684 #endif 1685 NULL, 1686 }; 1687 1688 /* 1689 * Subsystem structures & folder operation functions below 1690 */ 1691 static void nvmet_subsys_release(struct config_item *item) 1692 { 1693 struct nvmet_subsys *subsys = to_subsys(item); 1694 1695 nvmet_subsys_del_ctrls(subsys); 1696 nvmet_subsys_put(subsys); 1697 } 1698 1699 static struct configfs_item_operations nvmet_subsys_item_ops = { 1700 .release = nvmet_subsys_release, 1701 }; 1702 1703 static const struct config_item_type nvmet_subsys_type = { 1704 .ct_item_ops = &nvmet_subsys_item_ops, 1705 .ct_attrs = nvmet_subsys_attrs, 1706 .ct_owner = THIS_MODULE, 1707 }; 1708 1709 static struct config_group *nvmet_subsys_make(struct config_group *group, 1710 const char *name) 1711 { 1712 struct nvmet_subsys *subsys; 1713 1714 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 1715 pr_err("can't create discovery subsystem through configfs\n"); 1716 return ERR_PTR(-EINVAL); 1717 } 1718 1719 if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) { 1720 pr_err("can't create subsystem using unique discovery NQN\n"); 1721 return ERR_PTR(-EINVAL); 1722 } 1723 1724 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 1725 if (IS_ERR(subsys)) 1726 return ERR_CAST(subsys); 1727 1728 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 1729 1730 config_group_init_type_name(&subsys->namespaces_group, 1731 "namespaces", &nvmet_namespaces_type); 1732 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 1733 1734 config_group_init_type_name(&subsys->allowed_hosts_group, 1735 "allowed_hosts", &nvmet_allowed_hosts_type); 1736 configfs_add_default_group(&subsys->allowed_hosts_group, 1737 &subsys->group); 1738 1739 nvmet_add_passthru_group(subsys); 1740 1741 return &subsys->group; 1742 } 1743 1744 static struct configfs_group_operations nvmet_subsystems_group_ops = { 1745 .make_group = nvmet_subsys_make, 1746 }; 1747 1748 static const struct config_item_type nvmet_subsystems_type = { 1749 .ct_group_ops = &nvmet_subsystems_group_ops, 1750 .ct_owner = THIS_MODULE, 1751 }; 1752 1753 static ssize_t nvmet_referral_enable_show(struct config_item *item, 1754 char *page) 1755 { 1756 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 1757 } 1758 1759 static ssize_t nvmet_referral_enable_store(struct config_item *item, 1760 const char *page, size_t count) 1761 { 1762 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1763 struct nvmet_port *port = to_nvmet_port(item); 1764 bool enable; 1765 1766 if (kstrtobool(page, &enable)) 1767 goto inval; 1768 1769 if (enable) 1770 nvmet_referral_enable(parent, port); 1771 else 1772 nvmet_referral_disable(parent, port); 1773 1774 return count; 1775 inval: 1776 pr_err("Invalid value '%s' for enable\n", page); 1777 return -EINVAL; 1778 } 1779 1780 CONFIGFS_ATTR(nvmet_referral_, enable); 1781 1782 /* 1783 * Discovery Service subsystem definitions 1784 */ 1785 static struct configfs_attribute *nvmet_referral_attrs[] = { 1786 &nvmet_attr_addr_adrfam, 1787 &nvmet_attr_addr_portid, 1788 &nvmet_attr_addr_treq, 1789 &nvmet_attr_addr_traddr, 1790 &nvmet_attr_addr_trsvcid, 1791 &nvmet_attr_addr_trtype, 1792 &nvmet_referral_attr_enable, 1793 NULL, 1794 }; 1795 1796 static void nvmet_referral_notify(struct config_group *group, 1797 struct config_item *item) 1798 { 1799 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1800 struct nvmet_port *port = to_nvmet_port(item); 1801 1802 nvmet_referral_disable(parent, port); 1803 } 1804 1805 static void nvmet_referral_release(struct config_item *item) 1806 { 1807 struct nvmet_port *port = to_nvmet_port(item); 1808 1809 kfree(port); 1810 } 1811 1812 static struct configfs_item_operations nvmet_referral_item_ops = { 1813 .release = nvmet_referral_release, 1814 }; 1815 1816 static const struct config_item_type nvmet_referral_type = { 1817 .ct_owner = THIS_MODULE, 1818 .ct_attrs = nvmet_referral_attrs, 1819 .ct_item_ops = &nvmet_referral_item_ops, 1820 }; 1821 1822 static struct config_group *nvmet_referral_make( 1823 struct config_group *group, const char *name) 1824 { 1825 struct nvmet_port *port; 1826 1827 port = kzalloc(sizeof(*port), GFP_KERNEL); 1828 if (!port) 1829 return ERR_PTR(-ENOMEM); 1830 1831 INIT_LIST_HEAD(&port->entry); 1832 port->disc_addr.trtype = NVMF_TRTYPE_MAX; 1833 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 1834 1835 return &port->group; 1836 } 1837 1838 static struct configfs_group_operations nvmet_referral_group_ops = { 1839 .make_group = nvmet_referral_make, 1840 .disconnect_notify = nvmet_referral_notify, 1841 }; 1842 1843 static const struct config_item_type nvmet_referrals_type = { 1844 .ct_owner = THIS_MODULE, 1845 .ct_group_ops = &nvmet_referral_group_ops, 1846 }; 1847 1848 static struct nvmet_type_name_map nvmet_ana_state[] = { 1849 { NVME_ANA_OPTIMIZED, "optimized" }, 1850 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 1851 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 1852 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 1853 { NVME_ANA_CHANGE, "change" }, 1854 }; 1855 1856 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 1857 char *page) 1858 { 1859 struct nvmet_ana_group *grp = to_ana_group(item); 1860 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 1861 int i; 1862 1863 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1864 if (state == nvmet_ana_state[i].type) 1865 return sprintf(page, "%s\n", nvmet_ana_state[i].name); 1866 } 1867 1868 return sprintf(page, "\n"); 1869 } 1870 1871 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 1872 const char *page, size_t count) 1873 { 1874 struct nvmet_ana_group *grp = to_ana_group(item); 1875 enum nvme_ana_state *ana_state = grp->port->ana_state; 1876 int i; 1877 1878 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1879 if (sysfs_streq(page, nvmet_ana_state[i].name)) 1880 goto found; 1881 } 1882 1883 pr_err("Invalid value '%s' for ana_state\n", page); 1884 return -EINVAL; 1885 1886 found: 1887 down_write(&nvmet_ana_sem); 1888 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type; 1889 nvmet_ana_chgcnt++; 1890 up_write(&nvmet_ana_sem); 1891 nvmet_port_send_ana_event(grp->port); 1892 return count; 1893 } 1894 1895 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1896 1897 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1898 &nvmet_ana_group_attr_ana_state, 1899 NULL, 1900 }; 1901 1902 static void nvmet_ana_group_release(struct config_item *item) 1903 { 1904 struct nvmet_ana_group *grp = to_ana_group(item); 1905 1906 if (grp == &grp->port->ana_default_group) 1907 return; 1908 1909 down_write(&nvmet_ana_sem); 1910 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1911 nvmet_ana_group_enabled[grp->grpid]--; 1912 up_write(&nvmet_ana_sem); 1913 1914 nvmet_port_send_ana_event(grp->port); 1915 kfree(grp); 1916 } 1917 1918 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1919 .release = nvmet_ana_group_release, 1920 }; 1921 1922 static const struct config_item_type nvmet_ana_group_type = { 1923 .ct_item_ops = &nvmet_ana_group_item_ops, 1924 .ct_attrs = nvmet_ana_group_attrs, 1925 .ct_owner = THIS_MODULE, 1926 }; 1927 1928 static struct config_group *nvmet_ana_groups_make_group( 1929 struct config_group *group, const char *name) 1930 { 1931 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1932 struct nvmet_ana_group *grp; 1933 u32 grpid; 1934 int ret; 1935 1936 ret = kstrtou32(name, 0, &grpid); 1937 if (ret) 1938 goto out; 1939 1940 ret = -EINVAL; 1941 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1942 goto out; 1943 1944 ret = -ENOMEM; 1945 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1946 if (!grp) 1947 goto out; 1948 grp->port = port; 1949 grp->grpid = grpid; 1950 1951 down_write(&nvmet_ana_sem); 1952 grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS); 1953 nvmet_ana_group_enabled[grpid]++; 1954 up_write(&nvmet_ana_sem); 1955 1956 nvmet_port_send_ana_event(grp->port); 1957 1958 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1959 return &grp->group; 1960 out: 1961 return ERR_PTR(ret); 1962 } 1963 1964 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1965 .make_group = nvmet_ana_groups_make_group, 1966 }; 1967 1968 static const struct config_item_type nvmet_ana_groups_type = { 1969 .ct_group_ops = &nvmet_ana_groups_group_ops, 1970 .ct_owner = THIS_MODULE, 1971 }; 1972 1973 /* 1974 * Ports definitions. 1975 */ 1976 static void nvmet_port_release(struct config_item *item) 1977 { 1978 struct nvmet_port *port = to_nvmet_port(item); 1979 1980 /* Let inflight controllers teardown complete */ 1981 flush_workqueue(nvmet_wq); 1982 list_del(&port->global_entry); 1983 1984 key_put(port->keyring); 1985 kfree(port->ana_state); 1986 kfree(port); 1987 } 1988 1989 static struct configfs_attribute *nvmet_port_attrs[] = { 1990 &nvmet_attr_addr_adrfam, 1991 &nvmet_attr_addr_treq, 1992 &nvmet_attr_addr_traddr, 1993 &nvmet_attr_addr_trsvcid, 1994 &nvmet_attr_addr_trtype, 1995 &nvmet_attr_addr_tsas, 1996 &nvmet_attr_param_inline_data_size, 1997 &nvmet_attr_param_max_queue_size, 1998 #ifdef CONFIG_BLK_DEV_INTEGRITY 1999 &nvmet_attr_param_pi_enable, 2000 #endif 2001 NULL, 2002 }; 2003 2004 static struct configfs_item_operations nvmet_port_item_ops = { 2005 .release = nvmet_port_release, 2006 }; 2007 2008 static const struct config_item_type nvmet_port_type = { 2009 .ct_attrs = nvmet_port_attrs, 2010 .ct_item_ops = &nvmet_port_item_ops, 2011 .ct_owner = THIS_MODULE, 2012 }; 2013 2014 static struct config_group *nvmet_ports_make(struct config_group *group, 2015 const char *name) 2016 { 2017 struct nvmet_port *port; 2018 u16 portid; 2019 u32 i; 2020 2021 if (kstrtou16(name, 0, &portid)) 2022 return ERR_PTR(-EINVAL); 2023 2024 port = kzalloc(sizeof(*port), GFP_KERNEL); 2025 if (!port) 2026 return ERR_PTR(-ENOMEM); 2027 2028 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 2029 sizeof(*port->ana_state), GFP_KERNEL); 2030 if (!port->ana_state) { 2031 kfree(port); 2032 return ERR_PTR(-ENOMEM); 2033 } 2034 2035 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) { 2036 port->keyring = key_lookup(nvme_keyring_id()); 2037 if (IS_ERR(port->keyring)) { 2038 pr_warn("NVMe keyring not available, disabling TLS\n"); 2039 port->keyring = NULL; 2040 } 2041 } 2042 2043 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 2044 if (i == NVMET_DEFAULT_ANA_GRPID) 2045 port->ana_state[1] = NVME_ANA_OPTIMIZED; 2046 else 2047 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 2048 } 2049 2050 list_add(&port->global_entry, &nvmet_ports_list); 2051 2052 INIT_LIST_HEAD(&port->entry); 2053 INIT_LIST_HEAD(&port->subsystems); 2054 INIT_LIST_HEAD(&port->referrals); 2055 port->inline_data_size = -1; /* < 0 == let the transport choose */ 2056 port->max_queue_size = -1; /* < 0 == let the transport choose */ 2057 2058 port->disc_addr.trtype = NVMF_TRTYPE_MAX; 2059 port->disc_addr.portid = cpu_to_le16(portid); 2060 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; 2061 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; 2062 config_group_init_type_name(&port->group, name, &nvmet_port_type); 2063 2064 config_group_init_type_name(&port->subsys_group, 2065 "subsystems", &nvmet_port_subsys_type); 2066 configfs_add_default_group(&port->subsys_group, &port->group); 2067 2068 config_group_init_type_name(&port->referrals_group, 2069 "referrals", &nvmet_referrals_type); 2070 configfs_add_default_group(&port->referrals_group, &port->group); 2071 2072 config_group_init_type_name(&port->ana_groups_group, 2073 "ana_groups", &nvmet_ana_groups_type); 2074 configfs_add_default_group(&port->ana_groups_group, &port->group); 2075 2076 port->ana_default_group.port = port; 2077 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 2078 config_group_init_type_name(&port->ana_default_group.group, 2079 __stringify(NVMET_DEFAULT_ANA_GRPID), 2080 &nvmet_ana_group_type); 2081 configfs_add_default_group(&port->ana_default_group.group, 2082 &port->ana_groups_group); 2083 2084 return &port->group; 2085 } 2086 2087 static struct configfs_group_operations nvmet_ports_group_ops = { 2088 .make_group = nvmet_ports_make, 2089 }; 2090 2091 static const struct config_item_type nvmet_ports_type = { 2092 .ct_group_ops = &nvmet_ports_group_ops, 2093 .ct_owner = THIS_MODULE, 2094 }; 2095 2096 static struct config_group nvmet_subsystems_group; 2097 static struct config_group nvmet_ports_group; 2098 2099 #ifdef CONFIG_NVME_TARGET_AUTH 2100 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, 2101 char *page) 2102 { 2103 u8 *dhchap_secret; 2104 ssize_t ret; 2105 2106 down_read(&nvmet_config_sem); 2107 dhchap_secret = to_host(item)->dhchap_secret; 2108 if (!dhchap_secret) 2109 ret = sprintf(page, "\n"); 2110 else 2111 ret = sprintf(page, "%s\n", dhchap_secret); 2112 up_read(&nvmet_config_sem); 2113 return ret; 2114 } 2115 2116 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, 2117 const char *page, size_t count) 2118 { 2119 struct nvmet_host *host = to_host(item); 2120 int ret; 2121 2122 ret = nvmet_auth_set_key(host, page, false); 2123 /* 2124 * Re-authentication is a soft state, so keep the 2125 * current authentication valid until the host 2126 * requests re-authentication. 2127 */ 2128 return ret < 0 ? ret : count; 2129 } 2130 2131 CONFIGFS_ATTR(nvmet_host_, dhchap_key); 2132 2133 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, 2134 char *page) 2135 { 2136 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; 2137 ssize_t ret; 2138 2139 down_read(&nvmet_config_sem); 2140 dhchap_secret = to_host(item)->dhchap_ctrl_secret; 2141 if (!dhchap_secret) 2142 ret = sprintf(page, "\n"); 2143 else 2144 ret = sprintf(page, "%s\n", dhchap_secret); 2145 up_read(&nvmet_config_sem); 2146 return ret; 2147 } 2148 2149 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, 2150 const char *page, size_t count) 2151 { 2152 struct nvmet_host *host = to_host(item); 2153 int ret; 2154 2155 ret = nvmet_auth_set_key(host, page, true); 2156 /* 2157 * Re-authentication is a soft state, so keep the 2158 * current authentication valid until the host 2159 * requests re-authentication. 2160 */ 2161 return ret < 0 ? ret : count; 2162 } 2163 2164 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key); 2165 2166 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item, 2167 char *page) 2168 { 2169 struct nvmet_host *host = to_host(item); 2170 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); 2171 2172 return sprintf(page, "%s\n", hash_name ? hash_name : "none"); 2173 } 2174 2175 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item, 2176 const char *page, size_t count) 2177 { 2178 struct nvmet_host *host = to_host(item); 2179 u8 hmac_id; 2180 2181 hmac_id = nvme_auth_hmac_id(page); 2182 if (hmac_id == NVME_AUTH_HASH_INVALID) 2183 return -EINVAL; 2184 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0)) 2185 return -ENOTSUPP; 2186 host->dhchap_hash_id = hmac_id; 2187 return count; 2188 } 2189 2190 CONFIGFS_ATTR(nvmet_host_, dhchap_hash); 2191 2192 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item, 2193 char *page) 2194 { 2195 struct nvmet_host *host = to_host(item); 2196 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id); 2197 2198 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none"); 2199 } 2200 2201 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item, 2202 const char *page, size_t count) 2203 { 2204 struct nvmet_host *host = to_host(item); 2205 int dhgroup_id; 2206 2207 dhgroup_id = nvme_auth_dhgroup_id(page); 2208 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID) 2209 return -EINVAL; 2210 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) { 2211 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id); 2212 2213 if (!crypto_has_kpp(kpp, 0, 0)) 2214 return -EINVAL; 2215 } 2216 host->dhchap_dhgroup_id = dhgroup_id; 2217 return count; 2218 } 2219 2220 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup); 2221 2222 static struct configfs_attribute *nvmet_host_attrs[] = { 2223 &nvmet_host_attr_dhchap_key, 2224 &nvmet_host_attr_dhchap_ctrl_key, 2225 &nvmet_host_attr_dhchap_hash, 2226 &nvmet_host_attr_dhchap_dhgroup, 2227 NULL, 2228 }; 2229 #endif /* CONFIG_NVME_TARGET_AUTH */ 2230 2231 static void nvmet_host_release(struct config_item *item) 2232 { 2233 struct nvmet_host *host = to_host(item); 2234 2235 #ifdef CONFIG_NVME_TARGET_AUTH 2236 kfree(host->dhchap_secret); 2237 kfree(host->dhchap_ctrl_secret); 2238 #endif 2239 kfree(host); 2240 } 2241 2242 static struct configfs_item_operations nvmet_host_item_ops = { 2243 .release = nvmet_host_release, 2244 }; 2245 2246 static const struct config_item_type nvmet_host_type = { 2247 .ct_item_ops = &nvmet_host_item_ops, 2248 #ifdef CONFIG_NVME_TARGET_AUTH 2249 .ct_attrs = nvmet_host_attrs, 2250 #endif 2251 .ct_owner = THIS_MODULE, 2252 }; 2253 2254 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 2255 const char *name) 2256 { 2257 struct nvmet_host *host; 2258 2259 host = kzalloc(sizeof(*host), GFP_KERNEL); 2260 if (!host) 2261 return ERR_PTR(-ENOMEM); 2262 2263 #ifdef CONFIG_NVME_TARGET_AUTH 2264 /* Default to SHA256 */ 2265 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256; 2266 #endif 2267 2268 config_group_init_type_name(&host->group, name, &nvmet_host_type); 2269 2270 return &host->group; 2271 } 2272 2273 static struct configfs_group_operations nvmet_hosts_group_ops = { 2274 .make_group = nvmet_hosts_make_group, 2275 }; 2276 2277 static const struct config_item_type nvmet_hosts_type = { 2278 .ct_group_ops = &nvmet_hosts_group_ops, 2279 .ct_owner = THIS_MODULE, 2280 }; 2281 2282 static struct config_group nvmet_hosts_group; 2283 2284 static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item, 2285 char *page) 2286 { 2287 return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn); 2288 } 2289 2290 static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, 2291 const char *page, size_t count) 2292 { 2293 struct list_head *entry; 2294 char *old_nqn, *new_nqn; 2295 size_t len; 2296 2297 len = strcspn(page, "\n"); 2298 if (!len || len > NVMF_NQN_FIELD_LEN - 1) 2299 return -EINVAL; 2300 2301 new_nqn = kstrndup(page, len, GFP_KERNEL); 2302 if (!new_nqn) 2303 return -ENOMEM; 2304 2305 down_write(&nvmet_config_sem); 2306 list_for_each(entry, &nvmet_subsystems_group.cg_children) { 2307 struct config_item *item = 2308 container_of(entry, struct config_item, ci_entry); 2309 2310 if (!strncmp(config_item_name(item), page, len)) { 2311 pr_err("duplicate NQN %s\n", config_item_name(item)); 2312 up_write(&nvmet_config_sem); 2313 kfree(new_nqn); 2314 return -EINVAL; 2315 } 2316 } 2317 old_nqn = nvmet_disc_subsys->subsysnqn; 2318 nvmet_disc_subsys->subsysnqn = new_nqn; 2319 up_write(&nvmet_config_sem); 2320 2321 kfree(old_nqn); 2322 return len; 2323 } 2324 2325 CONFIGFS_ATTR(nvmet_root_, discovery_nqn); 2326 2327 static struct configfs_attribute *nvmet_root_attrs[] = { 2328 &nvmet_root_attr_discovery_nqn, 2329 NULL, 2330 }; 2331 2332 static const struct config_item_type nvmet_root_type = { 2333 .ct_attrs = nvmet_root_attrs, 2334 .ct_owner = THIS_MODULE, 2335 }; 2336 2337 static struct configfs_subsystem nvmet_configfs_subsystem = { 2338 .su_group = { 2339 .cg_item = { 2340 .ci_namebuf = "nvmet", 2341 .ci_type = &nvmet_root_type, 2342 }, 2343 }, 2344 }; 2345 2346 int __init nvmet_init_configfs(void) 2347 { 2348 int ret; 2349 2350 config_group_init(&nvmet_configfs_subsystem.su_group); 2351 mutex_init(&nvmet_configfs_subsystem.su_mutex); 2352 2353 config_group_init_type_name(&nvmet_subsystems_group, 2354 "subsystems", &nvmet_subsystems_type); 2355 configfs_add_default_group(&nvmet_subsystems_group, 2356 &nvmet_configfs_subsystem.su_group); 2357 2358 config_group_init_type_name(&nvmet_ports_group, 2359 "ports", &nvmet_ports_type); 2360 configfs_add_default_group(&nvmet_ports_group, 2361 &nvmet_configfs_subsystem.su_group); 2362 2363 config_group_init_type_name(&nvmet_hosts_group, 2364 "hosts", &nvmet_hosts_type); 2365 configfs_add_default_group(&nvmet_hosts_group, 2366 &nvmet_configfs_subsystem.su_group); 2367 2368 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 2369 if (ret) { 2370 pr_err("configfs_register_subsystem: %d\n", ret); 2371 return ret; 2372 } 2373 2374 return 0; 2375 } 2376 2377 void __exit nvmet_exit_configfs(void) 2378 { 2379 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 2380 } 2381