1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Sysfs interface for the NVMe core driver. 4 * 5 * Copyright (c) 2011-2014, Intel Corporation. 6 */ 7 8 #include <linux/nvme-auth.h> 9 10 #include "nvme.h" 11 #include "fabrics.h" 12 13 static ssize_t nvme_sysfs_reset(struct device *dev, 14 struct device_attribute *attr, const char *buf, 15 size_t count) 16 { 17 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 18 int ret; 19 20 ret = nvme_reset_ctrl_sync(ctrl); 21 if (ret < 0) 22 return ret; 23 return count; 24 } 25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 26 27 static ssize_t nvme_sysfs_rescan(struct device *dev, 28 struct device_attribute *attr, const char *buf, 29 size_t count) 30 { 31 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 32 33 nvme_queue_scan(ctrl); 34 return count; 35 } 36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 37 38 static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev, 39 struct device_attribute *attr, char *buf) 40 { 41 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 42 43 return sysfs_emit(buf, 44 ctrl->passthru_err_log_enabled ? "on\n" : "off\n"); 45 } 46 47 static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev, 48 struct device_attribute *attr, const char *buf, size_t count) 49 { 50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 51 bool passthru_err_log_enabled; 52 int err; 53 54 err = kstrtobool(buf, &passthru_err_log_enabled); 55 if (err) 56 return -EINVAL; 57 58 ctrl->passthru_err_log_enabled = passthru_err_log_enabled; 59 60 return count; 61 } 62 63 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 64 { 65 struct gendisk *disk = dev_to_disk(dev); 66 67 if (nvme_disk_is_ns_head(disk)) 68 return disk->private_data; 69 return nvme_get_ns_from_dev(dev)->head; 70 } 71 72 static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev, 73 struct device_attribute *attr, char *buf) 74 { 75 struct nvme_ns_head *head = dev_to_ns_head(dev); 76 77 return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n"); 78 } 79 80 static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev, 81 struct device_attribute *attr, const char *buf, size_t count) 82 { 83 struct nvme_ns_head *head = dev_to_ns_head(dev); 84 bool passthru_err_log_enabled; 85 int err; 86 87 err = kstrtobool(buf, &passthru_err_log_enabled); 88 if (err) 89 return -EINVAL; 90 head->passthru_err_log_enabled = passthru_err_log_enabled; 91 92 return count; 93 } 94 95 static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \ 96 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ 97 nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store); 98 99 static struct device_attribute dev_attr_io_passthru_err_log_enabled = \ 100 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \ 101 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store); 102 103 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 104 char *buf) 105 { 106 struct nvme_ns_head *head = dev_to_ns_head(dev); 107 struct nvme_ns_ids *ids = &head->ids; 108 struct nvme_subsystem *subsys = head->subsys; 109 int serial_len = sizeof(subsys->serial); 110 int model_len = sizeof(subsys->model); 111 112 if (!uuid_is_null(&ids->uuid)) 113 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); 114 115 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 116 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); 117 118 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 119 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); 120 121 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 122 subsys->serial[serial_len - 1] == '\0')) 123 serial_len--; 124 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 125 subsys->model[model_len - 1] == '\0')) 126 model_len--; 127 128 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 129 serial_len, subsys->serial, model_len, subsys->model, 130 head->ns_id); 131 } 132 static DEVICE_ATTR_RO(wwid); 133 134 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 135 char *buf) 136 { 137 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 138 } 139 static DEVICE_ATTR_RO(nguid); 140 141 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 142 char *buf) 143 { 144 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 145 146 /* For backward compatibility expose the NGUID to userspace if 147 * we have no UUID set 148 */ 149 if (uuid_is_null(&ids->uuid)) { 150 dev_warn_once(dev, 151 "No UUID available providing old NGUID\n"); 152 return sysfs_emit(buf, "%pU\n", ids->nguid); 153 } 154 return sysfs_emit(buf, "%pU\n", &ids->uuid); 155 } 156 static DEVICE_ATTR_RO(uuid); 157 158 static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 159 char *buf) 160 { 161 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 162 } 163 static DEVICE_ATTR_RO(eui); 164 165 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 166 char *buf) 167 { 168 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 169 } 170 static DEVICE_ATTR_RO(nsid); 171 172 static ssize_t csi_show(struct device *dev, struct device_attribute *attr, 173 char *buf) 174 { 175 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi); 176 } 177 static DEVICE_ATTR_RO(csi); 178 179 static ssize_t metadata_bytes_show(struct device *dev, 180 struct device_attribute *attr, char *buf) 181 { 182 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms); 183 } 184 static DEVICE_ATTR_RO(metadata_bytes); 185 186 static int ns_head_update_nuse(struct nvme_ns_head *head) 187 { 188 struct nvme_id_ns *id; 189 struct nvme_ns *ns; 190 int srcu_idx, ret = -EWOULDBLOCK; 191 192 /* Avoid issuing commands too often by rate limiting the update */ 193 if (!__ratelimit(&head->rs_nuse)) 194 return 0; 195 196 srcu_idx = srcu_read_lock(&head->srcu); 197 ns = nvme_find_path(head); 198 if (!ns) 199 goto out_unlock; 200 201 ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id); 202 if (ret) 203 goto out_unlock; 204 205 head->nuse = le64_to_cpu(id->nuse); 206 kfree(id); 207 208 out_unlock: 209 srcu_read_unlock(&head->srcu, srcu_idx); 210 return ret; 211 } 212 213 static int ns_update_nuse(struct nvme_ns *ns) 214 { 215 struct nvme_id_ns *id; 216 int ret; 217 218 /* Avoid issuing commands too often by rate limiting the update. */ 219 if (!__ratelimit(&ns->head->rs_nuse)) 220 return 0; 221 222 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id); 223 if (ret) 224 return ret; 225 226 ns->head->nuse = le64_to_cpu(id->nuse); 227 kfree(id); 228 return 0; 229 } 230 231 static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, 232 char *buf) 233 { 234 struct nvme_ns_head *head = dev_to_ns_head(dev); 235 struct gendisk *disk = dev_to_disk(dev); 236 int ret; 237 238 if (nvme_disk_is_ns_head(disk)) 239 ret = ns_head_update_nuse(head); 240 else 241 ret = ns_update_nuse(disk->private_data); 242 if (ret) 243 return ret; 244 245 return sysfs_emit(buf, "%llu\n", head->nuse); 246 } 247 static DEVICE_ATTR_RO(nuse); 248 249 static struct attribute *nvme_ns_attrs[] = { 250 &dev_attr_wwid.attr, 251 &dev_attr_uuid.attr, 252 &dev_attr_nguid.attr, 253 &dev_attr_eui.attr, 254 &dev_attr_csi.attr, 255 &dev_attr_nsid.attr, 256 &dev_attr_metadata_bytes.attr, 257 &dev_attr_nuse.attr, 258 #ifdef CONFIG_NVME_MULTIPATH 259 &dev_attr_ana_grpid.attr, 260 &dev_attr_ana_state.attr, 261 &dev_attr_queue_depth.attr, 262 &dev_attr_numa_nodes.attr, 263 &dev_attr_delayed_removal_secs.attr, 264 #endif 265 &dev_attr_io_passthru_err_log_enabled.attr, 266 NULL, 267 }; 268 269 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, 270 struct attribute *a, int n) 271 { 272 struct device *dev = container_of(kobj, struct device, kobj); 273 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 274 275 if (a == &dev_attr_uuid.attr) { 276 if (uuid_is_null(&ids->uuid) && 277 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 278 return 0; 279 } 280 if (a == &dev_attr_nguid.attr) { 281 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 282 return 0; 283 } 284 if (a == &dev_attr_eui.attr) { 285 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 286 return 0; 287 } 288 #ifdef CONFIG_NVME_MULTIPATH 289 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 290 /* per-path attr */ 291 if (nvme_disk_is_ns_head(dev_to_disk(dev))) 292 return 0; 293 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 294 return 0; 295 } 296 if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) { 297 if (nvme_disk_is_ns_head(dev_to_disk(dev))) 298 return 0; 299 } 300 if (a == &dev_attr_delayed_removal_secs.attr) { 301 struct gendisk *disk = dev_to_disk(dev); 302 303 if (!nvme_disk_is_ns_head(disk)) 304 return 0; 305 } 306 #endif 307 return a->mode; 308 } 309 310 static const struct attribute_group nvme_ns_attr_group = { 311 .attrs = nvme_ns_attrs, 312 .is_visible = nvme_ns_attrs_are_visible, 313 }; 314 315 #ifdef CONFIG_NVME_MULTIPATH 316 /* 317 * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow 318 * control over the visibility of the multipath sysfs node. Without at least one 319 * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not 320 * invoke the multipath_sysfs_group_visible() method. As a result, we would not 321 * be able to control the visibility of the multipath sysfs node. 322 */ 323 static struct attribute dummy_attr = { 324 .name = "dummy", 325 }; 326 327 static struct attribute *nvme_ns_mpath_attrs[] = { 328 &dummy_attr, 329 NULL, 330 }; 331 332 static bool multipath_sysfs_group_visible(struct kobject *kobj) 333 { 334 struct device *dev = container_of(kobj, struct device, kobj); 335 336 return nvme_disk_is_ns_head(dev_to_disk(dev)); 337 } 338 339 static bool multipath_sysfs_attr_visible(struct kobject *kobj, 340 struct attribute *attr, int n) 341 { 342 return false; 343 } 344 345 DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs) 346 347 const struct attribute_group nvme_ns_mpath_attr_group = { 348 .name = "multipath", 349 .attrs = nvme_ns_mpath_attrs, 350 .is_visible = SYSFS_GROUP_VISIBLE(multipath_sysfs), 351 }; 352 #endif 353 354 const struct attribute_group *nvme_ns_attr_groups[] = { 355 &nvme_ns_attr_group, 356 #ifdef CONFIG_NVME_MULTIPATH 357 &nvme_ns_mpath_attr_group, 358 #endif 359 NULL, 360 }; 361 362 #define nvme_show_str_function(field) \ 363 static ssize_t field##_show(struct device *dev, \ 364 struct device_attribute *attr, char *buf) \ 365 { \ 366 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 367 return sysfs_emit(buf, "%.*s\n", \ 368 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 369 } \ 370 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 371 372 nvme_show_str_function(model); 373 nvme_show_str_function(serial); 374 nvme_show_str_function(firmware_rev); 375 376 #define nvme_show_int_function(field) \ 377 static ssize_t field##_show(struct device *dev, \ 378 struct device_attribute *attr, char *buf) \ 379 { \ 380 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 381 return sysfs_emit(buf, "%d\n", ctrl->field); \ 382 } \ 383 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 384 385 nvme_show_int_function(cntlid); 386 nvme_show_int_function(numa_node); 387 nvme_show_int_function(queue_count); 388 nvme_show_int_function(sqsize); 389 nvme_show_int_function(kato); 390 391 static ssize_t nvme_sysfs_delete(struct device *dev, 392 struct device_attribute *attr, const char *buf, 393 size_t count) 394 { 395 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 396 397 if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) 398 return -EBUSY; 399 400 if (device_remove_file_self(dev, attr)) 401 nvme_delete_ctrl_sync(ctrl); 402 return count; 403 } 404 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 405 406 static ssize_t nvme_sysfs_show_transport(struct device *dev, 407 struct device_attribute *attr, 408 char *buf) 409 { 410 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 411 412 return sysfs_emit(buf, "%s\n", ctrl->ops->name); 413 } 414 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 415 416 static ssize_t nvme_sysfs_show_state(struct device *dev, 417 struct device_attribute *attr, 418 char *buf) 419 { 420 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 421 unsigned state = (unsigned)nvme_ctrl_state(ctrl); 422 static const char *const state_name[] = { 423 [NVME_CTRL_NEW] = "new", 424 [NVME_CTRL_LIVE] = "live", 425 [NVME_CTRL_RESETTING] = "resetting", 426 [NVME_CTRL_CONNECTING] = "connecting", 427 [NVME_CTRL_DELETING] = "deleting", 428 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", 429 [NVME_CTRL_DEAD] = "dead", 430 }; 431 432 if (state < ARRAY_SIZE(state_name) && state_name[state]) 433 return sysfs_emit(buf, "%s\n", state_name[state]); 434 435 return sysfs_emit(buf, "unknown state\n"); 436 } 437 438 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 439 440 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 441 struct device_attribute *attr, 442 char *buf) 443 { 444 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 445 446 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn); 447 } 448 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 449 450 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 451 struct device_attribute *attr, 452 char *buf) 453 { 454 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 455 456 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn); 457 } 458 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 459 460 static ssize_t nvme_sysfs_show_hostid(struct device *dev, 461 struct device_attribute *attr, 462 char *buf) 463 { 464 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 465 466 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id); 467 } 468 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 469 470 static ssize_t nvme_sysfs_show_address(struct device *dev, 471 struct device_attribute *attr, 472 char *buf) 473 { 474 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 475 476 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 477 } 478 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 479 480 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, 481 struct device_attribute *attr, char *buf) 482 { 483 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 484 struct nvmf_ctrl_options *opts = ctrl->opts; 485 486 if (ctrl->opts->max_reconnects == -1) 487 return sysfs_emit(buf, "off\n"); 488 return sysfs_emit(buf, "%d\n", 489 opts->max_reconnects * opts->reconnect_delay); 490 } 491 492 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, 493 struct device_attribute *attr, const char *buf, size_t count) 494 { 495 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 496 struct nvmf_ctrl_options *opts = ctrl->opts; 497 int ctrl_loss_tmo, err; 498 499 err = kstrtoint(buf, 10, &ctrl_loss_tmo); 500 if (err) 501 return -EINVAL; 502 503 if (ctrl_loss_tmo < 0) 504 opts->max_reconnects = -1; 505 else 506 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 507 opts->reconnect_delay); 508 return count; 509 } 510 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, 511 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); 512 513 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, 514 struct device_attribute *attr, char *buf) 515 { 516 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 517 518 if (ctrl->opts->reconnect_delay == -1) 519 return sysfs_emit(buf, "off\n"); 520 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); 521 } 522 523 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, 524 struct device_attribute *attr, const char *buf, size_t count) 525 { 526 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 527 unsigned int v; 528 int err; 529 530 err = kstrtou32(buf, 10, &v); 531 if (err) 532 return err; 533 534 ctrl->opts->reconnect_delay = v; 535 return count; 536 } 537 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, 538 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); 539 540 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev, 541 struct device_attribute *attr, char *buf) 542 { 543 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 544 545 if (ctrl->opts->fast_io_fail_tmo == -1) 546 return sysfs_emit(buf, "off\n"); 547 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo); 548 } 549 550 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev, 551 struct device_attribute *attr, const char *buf, size_t count) 552 { 553 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 554 struct nvmf_ctrl_options *opts = ctrl->opts; 555 int fast_io_fail_tmo, err; 556 557 err = kstrtoint(buf, 10, &fast_io_fail_tmo); 558 if (err) 559 return -EINVAL; 560 561 if (fast_io_fail_tmo < 0) 562 opts->fast_io_fail_tmo = -1; 563 else 564 opts->fast_io_fail_tmo = fast_io_fail_tmo; 565 return count; 566 } 567 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, 568 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store); 569 570 static ssize_t cntrltype_show(struct device *dev, 571 struct device_attribute *attr, char *buf) 572 { 573 static const char * const type[] = { 574 [NVME_CTRL_IO] = "io\n", 575 [NVME_CTRL_DISC] = "discovery\n", 576 [NVME_CTRL_ADMIN] = "admin\n", 577 }; 578 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 579 580 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype]) 581 return sysfs_emit(buf, "reserved\n"); 582 583 return sysfs_emit(buf, type[ctrl->cntrltype]); 584 } 585 static DEVICE_ATTR_RO(cntrltype); 586 587 static ssize_t dctype_show(struct device *dev, 588 struct device_attribute *attr, char *buf) 589 { 590 static const char * const type[] = { 591 [NVME_DCTYPE_NOT_REPORTED] = "none\n", 592 [NVME_DCTYPE_DDC] = "ddc\n", 593 [NVME_DCTYPE_CDC] = "cdc\n", 594 }; 595 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 596 597 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype]) 598 return sysfs_emit(buf, "reserved\n"); 599 600 return sysfs_emit(buf, type[ctrl->dctype]); 601 } 602 static DEVICE_ATTR_RO(dctype); 603 604 static ssize_t quirks_show(struct device *dev, struct device_attribute *attr, 605 char *buf) 606 { 607 int count = 0, i; 608 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 609 unsigned long quirks = ctrl->quirks; 610 611 if (!quirks) 612 return sysfs_emit(buf, "none\n"); 613 614 for (i = 0; quirks; ++i) { 615 if (quirks & 1) { 616 count += sysfs_emit_at(buf, count, "%s\n", 617 nvme_quirk_name(BIT(i))); 618 } 619 quirks >>= 1; 620 } 621 622 return count; 623 } 624 static DEVICE_ATTR_RO(quirks); 625 626 #ifdef CONFIG_NVME_HOST_AUTH 627 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, 628 struct device_attribute *attr, char *buf) 629 { 630 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 631 struct nvmf_ctrl_options *opts = ctrl->opts; 632 633 if (!opts->dhchap_secret) 634 return sysfs_emit(buf, "none\n"); 635 return sysfs_emit(buf, "%s\n", opts->dhchap_secret); 636 } 637 638 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev, 639 struct device_attribute *attr, const char *buf, size_t count) 640 { 641 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 642 struct nvmf_ctrl_options *opts = ctrl->opts; 643 char *dhchap_secret; 644 645 if (!ctrl->opts->dhchap_secret) 646 return -EINVAL; 647 if (count < 7) 648 return -EINVAL; 649 if (memcmp(buf, "DHHC-1:", 7)) 650 return -EINVAL; 651 652 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 653 if (!dhchap_secret) 654 return -ENOMEM; 655 memcpy(dhchap_secret, buf, count); 656 nvme_auth_stop(ctrl); 657 if (strcmp(dhchap_secret, opts->dhchap_secret)) { 658 struct nvme_dhchap_key *key, *host_key; 659 int ret; 660 661 ret = nvme_auth_generate_key(dhchap_secret, &key); 662 if (ret) { 663 kfree(dhchap_secret); 664 return ret; 665 } 666 kfree(opts->dhchap_secret); 667 opts->dhchap_secret = dhchap_secret; 668 host_key = ctrl->host_key; 669 mutex_lock(&ctrl->dhchap_auth_mutex); 670 ctrl->host_key = key; 671 mutex_unlock(&ctrl->dhchap_auth_mutex); 672 nvme_auth_free_key(host_key); 673 } else 674 kfree(dhchap_secret); 675 /* Start re-authentication */ 676 dev_info(ctrl->device, "re-authenticating controller\n"); 677 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 678 679 return count; 680 } 681 682 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR, 683 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store); 684 685 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev, 686 struct device_attribute *attr, char *buf) 687 { 688 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 689 struct nvmf_ctrl_options *opts = ctrl->opts; 690 691 if (!opts->dhchap_ctrl_secret) 692 return sysfs_emit(buf, "none\n"); 693 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret); 694 } 695 696 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev, 697 struct device_attribute *attr, const char *buf, size_t count) 698 { 699 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 700 struct nvmf_ctrl_options *opts = ctrl->opts; 701 char *dhchap_secret; 702 703 if (!ctrl->opts->dhchap_ctrl_secret) 704 return -EINVAL; 705 if (count < 7) 706 return -EINVAL; 707 if (memcmp(buf, "DHHC-1:", 7)) 708 return -EINVAL; 709 710 dhchap_secret = kzalloc(count + 1, GFP_KERNEL); 711 if (!dhchap_secret) 712 return -ENOMEM; 713 memcpy(dhchap_secret, buf, count); 714 nvme_auth_stop(ctrl); 715 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) { 716 struct nvme_dhchap_key *key, *ctrl_key; 717 int ret; 718 719 ret = nvme_auth_generate_key(dhchap_secret, &key); 720 if (ret) { 721 kfree(dhchap_secret); 722 return ret; 723 } 724 kfree(opts->dhchap_ctrl_secret); 725 opts->dhchap_ctrl_secret = dhchap_secret; 726 ctrl_key = ctrl->ctrl_key; 727 mutex_lock(&ctrl->dhchap_auth_mutex); 728 ctrl->ctrl_key = key; 729 mutex_unlock(&ctrl->dhchap_auth_mutex); 730 nvme_auth_free_key(ctrl_key); 731 } else 732 kfree(dhchap_secret); 733 /* Start re-authentication */ 734 dev_info(ctrl->device, "re-authenticating controller\n"); 735 queue_work(nvme_wq, &ctrl->dhchap_auth_work); 736 737 return count; 738 } 739 740 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, 741 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); 742 #endif 743 744 static struct attribute *nvme_dev_attrs[] = { 745 &dev_attr_reset_controller.attr, 746 &dev_attr_rescan_controller.attr, 747 &dev_attr_model.attr, 748 &dev_attr_serial.attr, 749 &dev_attr_firmware_rev.attr, 750 &dev_attr_cntlid.attr, 751 &dev_attr_delete_controller.attr, 752 &dev_attr_transport.attr, 753 &dev_attr_subsysnqn.attr, 754 &dev_attr_address.attr, 755 &dev_attr_state.attr, 756 &dev_attr_numa_node.attr, 757 &dev_attr_queue_count.attr, 758 &dev_attr_sqsize.attr, 759 &dev_attr_hostnqn.attr, 760 &dev_attr_hostid.attr, 761 &dev_attr_ctrl_loss_tmo.attr, 762 &dev_attr_reconnect_delay.attr, 763 &dev_attr_fast_io_fail_tmo.attr, 764 &dev_attr_kato.attr, 765 &dev_attr_cntrltype.attr, 766 &dev_attr_dctype.attr, 767 &dev_attr_quirks.attr, 768 #ifdef CONFIG_NVME_HOST_AUTH 769 &dev_attr_dhchap_secret.attr, 770 &dev_attr_dhchap_ctrl_secret.attr, 771 #endif 772 &dev_attr_adm_passthru_err_log_enabled.attr, 773 NULL 774 }; 775 776 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 777 struct attribute *a, int n) 778 { 779 struct device *dev = container_of(kobj, struct device, kobj); 780 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 781 782 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 783 return 0; 784 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 785 return 0; 786 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 787 return 0; 788 if (a == &dev_attr_hostid.attr && !ctrl->opts) 789 return 0; 790 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) 791 return 0; 792 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) 793 return 0; 794 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts) 795 return 0; 796 #ifdef CONFIG_NVME_HOST_AUTH 797 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts) 798 return 0; 799 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) 800 return 0; 801 #endif 802 803 return a->mode; 804 } 805 806 const struct attribute_group nvme_dev_attrs_group = { 807 .attrs = nvme_dev_attrs, 808 .is_visible = nvme_dev_attrs_are_visible, 809 }; 810 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); 811 812 #ifdef CONFIG_NVME_TCP_TLS 813 static ssize_t tls_key_show(struct device *dev, 814 struct device_attribute *attr, char *buf) 815 { 816 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 817 818 if (!ctrl->tls_pskid) 819 return 0; 820 return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid); 821 } 822 static DEVICE_ATTR_RO(tls_key); 823 824 static ssize_t tls_configured_key_show(struct device *dev, 825 struct device_attribute *attr, char *buf) 826 { 827 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 828 struct key *key = ctrl->opts->tls_key; 829 830 return sysfs_emit(buf, "%08x\n", key_serial(key)); 831 } 832 static DEVICE_ATTR_RO(tls_configured_key); 833 834 static ssize_t tls_keyring_show(struct device *dev, 835 struct device_attribute *attr, char *buf) 836 { 837 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 838 struct key *keyring = ctrl->opts->keyring; 839 840 return sysfs_emit(buf, "%s\n", keyring->description); 841 } 842 static DEVICE_ATTR_RO(tls_keyring); 843 844 static struct attribute *nvme_tls_attrs[] = { 845 &dev_attr_tls_key.attr, 846 &dev_attr_tls_configured_key.attr, 847 &dev_attr_tls_keyring.attr, 848 NULL, 849 }; 850 851 static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj, 852 struct attribute *a, int n) 853 { 854 struct device *dev = container_of(kobj, struct device, kobj); 855 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 856 857 if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")) 858 return 0; 859 860 if (a == &dev_attr_tls_key.attr && 861 !ctrl->opts->tls && !ctrl->opts->concat) 862 return 0; 863 if (a == &dev_attr_tls_configured_key.attr && 864 (!ctrl->opts->tls_key || ctrl->opts->concat)) 865 return 0; 866 if (a == &dev_attr_tls_keyring.attr && 867 !ctrl->opts->keyring) 868 return 0; 869 870 return a->mode; 871 } 872 873 static const struct attribute_group nvme_tls_attrs_group = { 874 .attrs = nvme_tls_attrs, 875 .is_visible = nvme_tls_attrs_are_visible, 876 }; 877 #endif 878 879 const struct attribute_group *nvme_dev_attr_groups[] = { 880 &nvme_dev_attrs_group, 881 #ifdef CONFIG_NVME_TCP_TLS 882 &nvme_tls_attrs_group, 883 #endif 884 NULL, 885 }; 886 887 #define SUBSYS_ATTR_RO(_name, _mode, _show) \ 888 struct device_attribute subsys_attr_##_name = \ 889 __ATTR(_name, _mode, _show, NULL) 890 891 static ssize_t nvme_subsys_show_nqn(struct device *dev, 892 struct device_attribute *attr, 893 char *buf) 894 { 895 struct nvme_subsystem *subsys = 896 container_of(dev, struct nvme_subsystem, dev); 897 898 return sysfs_emit(buf, "%s\n", subsys->subnqn); 899 } 900 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 901 902 static ssize_t nvme_subsys_show_type(struct device *dev, 903 struct device_attribute *attr, 904 char *buf) 905 { 906 struct nvme_subsystem *subsys = 907 container_of(dev, struct nvme_subsystem, dev); 908 909 switch (subsys->subtype) { 910 case NVME_NQN_DISC: 911 return sysfs_emit(buf, "discovery\n"); 912 case NVME_NQN_NVME: 913 return sysfs_emit(buf, "nvm\n"); 914 default: 915 return sysfs_emit(buf, "reserved\n"); 916 } 917 } 918 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type); 919 920 #define nvme_subsys_show_str_function(field) \ 921 static ssize_t subsys_##field##_show(struct device *dev, \ 922 struct device_attribute *attr, char *buf) \ 923 { \ 924 struct nvme_subsystem *subsys = \ 925 container_of(dev, struct nvme_subsystem, dev); \ 926 return sysfs_emit(buf, "%.*s\n", \ 927 (int)sizeof(subsys->field), subsys->field); \ 928 } \ 929 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 930 931 nvme_subsys_show_str_function(model); 932 nvme_subsys_show_str_function(serial); 933 nvme_subsys_show_str_function(firmware_rev); 934 935 static struct attribute *nvme_subsys_attrs[] = { 936 &subsys_attr_model.attr, 937 &subsys_attr_serial.attr, 938 &subsys_attr_firmware_rev.attr, 939 &subsys_attr_subsysnqn.attr, 940 &subsys_attr_subsystype.attr, 941 #ifdef CONFIG_NVME_MULTIPATH 942 &subsys_attr_iopolicy.attr, 943 #endif 944 NULL, 945 }; 946 947 static const struct attribute_group nvme_subsys_attrs_group = { 948 .attrs = nvme_subsys_attrs, 949 }; 950 951 const struct attribute_group *nvme_subsys_attrs_groups[] = { 952 &nvme_subsys_attrs_group, 953 NULL, 954 }; 955