1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Sysfs interface for the NVMe core driver.
4 *
5 * Copyright (c) 2011-2014, Intel Corporation.
6 */
7
8 #include <linux/nvme-auth.h>
9
10 #include "nvme.h"
11 #include "fabrics.h"
12
nvme_sysfs_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)13 static ssize_t nvme_sysfs_reset(struct device *dev,
14 struct device_attribute *attr, const char *buf,
15 size_t count)
16 {
17 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
18 int ret;
19
20 ret = nvme_reset_ctrl_sync(ctrl);
21 if (ret < 0)
22 return ret;
23 return count;
24 }
25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
26
nvme_sysfs_rescan(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)27 static ssize_t nvme_sysfs_rescan(struct device *dev,
28 struct device_attribute *attr, const char *buf,
29 size_t count)
30 {
31 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
32
33 nvme_queue_scan(ctrl);
34 return count;
35 }
36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
37
nvme_adm_passthru_err_log_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)38 static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
39 struct device_attribute *attr, char *buf)
40 {
41 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
42
43 return sysfs_emit(buf,
44 ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
45 }
46
nvme_adm_passthru_err_log_enabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)47 static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
48 struct device_attribute *attr, const char *buf, size_t count)
49 {
50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
51 bool passthru_err_log_enabled;
52 int err;
53
54 err = kstrtobool(buf, &passthru_err_log_enabled);
55 if (err)
56 return -EINVAL;
57
58 ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
59
60 return count;
61 }
62
dev_to_ns_head(struct device * dev)63 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
64 {
65 struct gendisk *disk = dev_to_disk(dev);
66
67 if (nvme_disk_is_ns_head(disk))
68 return disk->private_data;
69 return nvme_get_ns_from_dev(dev)->head;
70 }
71
nvme_io_passthru_err_log_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)72 static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
73 struct device_attribute *attr, char *buf)
74 {
75 struct nvme_ns_head *head = dev_to_ns_head(dev);
76
77 return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
78 }
79
nvme_io_passthru_err_log_enabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)80 static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
81 struct device_attribute *attr, const char *buf, size_t count)
82 {
83 struct nvme_ns_head *head = dev_to_ns_head(dev);
84 bool passthru_err_log_enabled;
85 int err;
86
87 err = kstrtobool(buf, &passthru_err_log_enabled);
88 if (err)
89 return -EINVAL;
90 head->passthru_err_log_enabled = passthru_err_log_enabled;
91
92 return count;
93 }
94
95 static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
96 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
97 nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
98
99 static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
100 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
101 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
102
wwid_show(struct device * dev,struct device_attribute * attr,char * buf)103 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
104 char *buf)
105 {
106 struct nvme_ns_head *head = dev_to_ns_head(dev);
107 struct nvme_ns_ids *ids = &head->ids;
108 struct nvme_subsystem *subsys = head->subsys;
109 int serial_len = sizeof(subsys->serial);
110 int model_len = sizeof(subsys->model);
111
112 if (!uuid_is_null(&ids->uuid))
113 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
114
115 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
116 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
117
118 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
119 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
120
121 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
122 subsys->serial[serial_len - 1] == '\0'))
123 serial_len--;
124 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
125 subsys->model[model_len - 1] == '\0'))
126 model_len--;
127
128 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
129 serial_len, subsys->serial, model_len, subsys->model,
130 head->ns_id);
131 }
132 static DEVICE_ATTR_RO(wwid);
133
nguid_show(struct device * dev,struct device_attribute * attr,char * buf)134 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
135 char *buf)
136 {
137 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
138 }
139 static DEVICE_ATTR_RO(nguid);
140
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)141 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
142 char *buf)
143 {
144 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
145
146 /* For backward compatibility expose the NGUID to userspace if
147 * we have no UUID set
148 */
149 if (uuid_is_null(&ids->uuid)) {
150 dev_warn_once(dev,
151 "No UUID available providing old NGUID\n");
152 return sysfs_emit(buf, "%pU\n", ids->nguid);
153 }
154 return sysfs_emit(buf, "%pU\n", &ids->uuid);
155 }
156 static DEVICE_ATTR_RO(uuid);
157
eui_show(struct device * dev,struct device_attribute * attr,char * buf)158 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
159 char *buf)
160 {
161 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
162 }
163 static DEVICE_ATTR_RO(eui);
164
nsid_show(struct device * dev,struct device_attribute * attr,char * buf)165 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
166 char *buf)
167 {
168 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
169 }
170 static DEVICE_ATTR_RO(nsid);
171
csi_show(struct device * dev,struct device_attribute * attr,char * buf)172 static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
173 char *buf)
174 {
175 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
176 }
177 static DEVICE_ATTR_RO(csi);
178
metadata_bytes_show(struct device * dev,struct device_attribute * attr,char * buf)179 static ssize_t metadata_bytes_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181 {
182 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
183 }
184 static DEVICE_ATTR_RO(metadata_bytes);
185
ns_head_update_nuse(struct nvme_ns_head * head)186 static int ns_head_update_nuse(struct nvme_ns_head *head)
187 {
188 struct nvme_id_ns *id;
189 struct nvme_ns *ns;
190 int srcu_idx, ret = -EWOULDBLOCK;
191
192 /* Avoid issuing commands too often by rate limiting the update */
193 if (!__ratelimit(&head->rs_nuse))
194 return 0;
195
196 srcu_idx = srcu_read_lock(&head->srcu);
197 ns = nvme_find_path(head);
198 if (!ns)
199 goto out_unlock;
200
201 ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
202 if (ret)
203 goto out_unlock;
204
205 head->nuse = le64_to_cpu(id->nuse);
206 kfree(id);
207
208 out_unlock:
209 srcu_read_unlock(&head->srcu, srcu_idx);
210 return ret;
211 }
212
ns_update_nuse(struct nvme_ns * ns)213 static int ns_update_nuse(struct nvme_ns *ns)
214 {
215 struct nvme_id_ns *id;
216 int ret;
217
218 /* Avoid issuing commands too often by rate limiting the update. */
219 if (!__ratelimit(&ns->head->rs_nuse))
220 return 0;
221
222 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
223 if (ret)
224 return ret;
225
226 ns->head->nuse = le64_to_cpu(id->nuse);
227 kfree(id);
228 return 0;
229 }
230
nuse_show(struct device * dev,struct device_attribute * attr,char * buf)231 static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
232 char *buf)
233 {
234 struct nvme_ns_head *head = dev_to_ns_head(dev);
235 struct gendisk *disk = dev_to_disk(dev);
236 int ret;
237
238 if (nvme_disk_is_ns_head(disk))
239 ret = ns_head_update_nuse(head);
240 else
241 ret = ns_update_nuse(disk->private_data);
242 if (ret)
243 return ret;
244
245 return sysfs_emit(buf, "%llu\n", head->nuse);
246 }
247 static DEVICE_ATTR_RO(nuse);
248
249 static struct attribute *nvme_ns_attrs[] = {
250 &dev_attr_wwid.attr,
251 &dev_attr_uuid.attr,
252 &dev_attr_nguid.attr,
253 &dev_attr_eui.attr,
254 &dev_attr_csi.attr,
255 &dev_attr_nsid.attr,
256 &dev_attr_metadata_bytes.attr,
257 &dev_attr_nuse.attr,
258 #ifdef CONFIG_NVME_MULTIPATH
259 &dev_attr_ana_grpid.attr,
260 &dev_attr_ana_state.attr,
261 &dev_attr_queue_depth.attr,
262 &dev_attr_numa_nodes.attr,
263 #endif
264 &dev_attr_io_passthru_err_log_enabled.attr,
265 NULL,
266 };
267
nvme_ns_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)268 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
269 struct attribute *a, int n)
270 {
271 struct device *dev = container_of(kobj, struct device, kobj);
272 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
273
274 if (a == &dev_attr_uuid.attr) {
275 if (uuid_is_null(&ids->uuid) &&
276 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
277 return 0;
278 }
279 if (a == &dev_attr_nguid.attr) {
280 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
281 return 0;
282 }
283 if (a == &dev_attr_eui.attr) {
284 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
285 return 0;
286 }
287 #ifdef CONFIG_NVME_MULTIPATH
288 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
289 /* per-path attr */
290 if (nvme_disk_is_ns_head(dev_to_disk(dev)))
291 return 0;
292 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
293 return 0;
294 }
295 if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
296 if (nvme_disk_is_ns_head(dev_to_disk(dev)))
297 return 0;
298 }
299 #endif
300 return a->mode;
301 }
302
303 static const struct attribute_group nvme_ns_attr_group = {
304 .attrs = nvme_ns_attrs,
305 .is_visible = nvme_ns_attrs_are_visible,
306 };
307
308 #ifdef CONFIG_NVME_MULTIPATH
309 static struct attribute *nvme_ns_mpath_attrs[] = {
310 NULL,
311 };
312
313 const struct attribute_group nvme_ns_mpath_attr_group = {
314 .name = "multipath",
315 .attrs = nvme_ns_mpath_attrs,
316 };
317 #endif
318
319 const struct attribute_group *nvme_ns_attr_groups[] = {
320 &nvme_ns_attr_group,
321 #ifdef CONFIG_NVME_MULTIPATH
322 &nvme_ns_mpath_attr_group,
323 #endif
324 NULL,
325 };
326
327 #define nvme_show_str_function(field) \
328 static ssize_t field##_show(struct device *dev, \
329 struct device_attribute *attr, char *buf) \
330 { \
331 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
332 return sysfs_emit(buf, "%.*s\n", \
333 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
334 } \
335 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
336
337 nvme_show_str_function(model);
338 nvme_show_str_function(serial);
339 nvme_show_str_function(firmware_rev);
340
341 #define nvme_show_int_function(field) \
342 static ssize_t field##_show(struct device *dev, \
343 struct device_attribute *attr, char *buf) \
344 { \
345 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
346 return sysfs_emit(buf, "%d\n", ctrl->field); \
347 } \
348 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
349
350 nvme_show_int_function(cntlid);
351 nvme_show_int_function(numa_node);
352 nvme_show_int_function(queue_count);
353 nvme_show_int_function(sqsize);
354 nvme_show_int_function(kato);
355
nvme_sysfs_delete(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)356 static ssize_t nvme_sysfs_delete(struct device *dev,
357 struct device_attribute *attr, const char *buf,
358 size_t count)
359 {
360 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
361
362 if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
363 return -EBUSY;
364
365 if (device_remove_file_self(dev, attr))
366 nvme_delete_ctrl_sync(ctrl);
367 return count;
368 }
369 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
370
nvme_sysfs_show_transport(struct device * dev,struct device_attribute * attr,char * buf)371 static ssize_t nvme_sysfs_show_transport(struct device *dev,
372 struct device_attribute *attr,
373 char *buf)
374 {
375 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
376
377 return sysfs_emit(buf, "%s\n", ctrl->ops->name);
378 }
379 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
380
nvme_sysfs_show_state(struct device * dev,struct device_attribute * attr,char * buf)381 static ssize_t nvme_sysfs_show_state(struct device *dev,
382 struct device_attribute *attr,
383 char *buf)
384 {
385 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
386 unsigned state = (unsigned)nvme_ctrl_state(ctrl);
387 static const char *const state_name[] = {
388 [NVME_CTRL_NEW] = "new",
389 [NVME_CTRL_LIVE] = "live",
390 [NVME_CTRL_RESETTING] = "resetting",
391 [NVME_CTRL_CONNECTING] = "connecting",
392 [NVME_CTRL_DELETING] = "deleting",
393 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
394 [NVME_CTRL_DEAD] = "dead",
395 };
396
397 if (state < ARRAY_SIZE(state_name) && state_name[state])
398 return sysfs_emit(buf, "%s\n", state_name[state]);
399
400 return sysfs_emit(buf, "unknown state\n");
401 }
402
403 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
404
nvme_sysfs_show_subsysnqn(struct device * dev,struct device_attribute * attr,char * buf)405 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
406 struct device_attribute *attr,
407 char *buf)
408 {
409 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
410
411 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
412 }
413 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
414
nvme_sysfs_show_hostnqn(struct device * dev,struct device_attribute * attr,char * buf)415 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
416 struct device_attribute *attr,
417 char *buf)
418 {
419 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
420
421 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
422 }
423 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
424
nvme_sysfs_show_hostid(struct device * dev,struct device_attribute * attr,char * buf)425 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
426 struct device_attribute *attr,
427 char *buf)
428 {
429 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
430
431 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
432 }
433 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
434
nvme_sysfs_show_address(struct device * dev,struct device_attribute * attr,char * buf)435 static ssize_t nvme_sysfs_show_address(struct device *dev,
436 struct device_attribute *attr,
437 char *buf)
438 {
439 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
440
441 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
442 }
443 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
444
nvme_ctrl_loss_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)445 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
446 struct device_attribute *attr, char *buf)
447 {
448 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
449 struct nvmf_ctrl_options *opts = ctrl->opts;
450
451 if (ctrl->opts->max_reconnects == -1)
452 return sysfs_emit(buf, "off\n");
453 return sysfs_emit(buf, "%d\n",
454 opts->max_reconnects * opts->reconnect_delay);
455 }
456
nvme_ctrl_loss_tmo_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)457 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
458 struct device_attribute *attr, const char *buf, size_t count)
459 {
460 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
461 struct nvmf_ctrl_options *opts = ctrl->opts;
462 int ctrl_loss_tmo, err;
463
464 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
465 if (err)
466 return -EINVAL;
467
468 if (ctrl_loss_tmo < 0)
469 opts->max_reconnects = -1;
470 else
471 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
472 opts->reconnect_delay);
473 return count;
474 }
475 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
476 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
477
nvme_ctrl_reconnect_delay_show(struct device * dev,struct device_attribute * attr,char * buf)478 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
479 struct device_attribute *attr, char *buf)
480 {
481 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
482
483 if (ctrl->opts->reconnect_delay == -1)
484 return sysfs_emit(buf, "off\n");
485 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
486 }
487
nvme_ctrl_reconnect_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)488 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
489 struct device_attribute *attr, const char *buf, size_t count)
490 {
491 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
492 unsigned int v;
493 int err;
494
495 err = kstrtou32(buf, 10, &v);
496 if (err)
497 return err;
498
499 ctrl->opts->reconnect_delay = v;
500 return count;
501 }
502 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
503 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
504
nvme_ctrl_fast_io_fail_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)505 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
506 struct device_attribute *attr, char *buf)
507 {
508 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
509
510 if (ctrl->opts->fast_io_fail_tmo == -1)
511 return sysfs_emit(buf, "off\n");
512 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
513 }
514
nvme_ctrl_fast_io_fail_tmo_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)515 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
516 struct device_attribute *attr, const char *buf, size_t count)
517 {
518 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
519 struct nvmf_ctrl_options *opts = ctrl->opts;
520 int fast_io_fail_tmo, err;
521
522 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
523 if (err)
524 return -EINVAL;
525
526 if (fast_io_fail_tmo < 0)
527 opts->fast_io_fail_tmo = -1;
528 else
529 opts->fast_io_fail_tmo = fast_io_fail_tmo;
530 return count;
531 }
532 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
533 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
534
cntrltype_show(struct device * dev,struct device_attribute * attr,char * buf)535 static ssize_t cntrltype_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537 {
538 static const char * const type[] = {
539 [NVME_CTRL_IO] = "io\n",
540 [NVME_CTRL_DISC] = "discovery\n",
541 [NVME_CTRL_ADMIN] = "admin\n",
542 };
543 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
544
545 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
546 return sysfs_emit(buf, "reserved\n");
547
548 return sysfs_emit(buf, type[ctrl->cntrltype]);
549 }
550 static DEVICE_ATTR_RO(cntrltype);
551
dctype_show(struct device * dev,struct device_attribute * attr,char * buf)552 static ssize_t dctype_show(struct device *dev,
553 struct device_attribute *attr, char *buf)
554 {
555 static const char * const type[] = {
556 [NVME_DCTYPE_NOT_REPORTED] = "none\n",
557 [NVME_DCTYPE_DDC] = "ddc\n",
558 [NVME_DCTYPE_CDC] = "cdc\n",
559 };
560 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
561
562 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
563 return sysfs_emit(buf, "reserved\n");
564
565 return sysfs_emit(buf, type[ctrl->dctype]);
566 }
567 static DEVICE_ATTR_RO(dctype);
568
569 #ifdef CONFIG_NVME_HOST_AUTH
nvme_ctrl_dhchap_secret_show(struct device * dev,struct device_attribute * attr,char * buf)570 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
571 struct device_attribute *attr, char *buf)
572 {
573 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
574 struct nvmf_ctrl_options *opts = ctrl->opts;
575
576 if (!opts->dhchap_secret)
577 return sysfs_emit(buf, "none\n");
578 return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
579 }
580
nvme_ctrl_dhchap_secret_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)581 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
582 struct device_attribute *attr, const char *buf, size_t count)
583 {
584 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
585 struct nvmf_ctrl_options *opts = ctrl->opts;
586 char *dhchap_secret;
587
588 if (!ctrl->opts->dhchap_secret)
589 return -EINVAL;
590 if (count < 7)
591 return -EINVAL;
592 if (memcmp(buf, "DHHC-1:", 7))
593 return -EINVAL;
594
595 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
596 if (!dhchap_secret)
597 return -ENOMEM;
598 memcpy(dhchap_secret, buf, count);
599 nvme_auth_stop(ctrl);
600 if (strcmp(dhchap_secret, opts->dhchap_secret)) {
601 struct nvme_dhchap_key *key, *host_key;
602 int ret;
603
604 ret = nvme_auth_generate_key(dhchap_secret, &key);
605 if (ret) {
606 kfree(dhchap_secret);
607 return ret;
608 }
609 kfree(opts->dhchap_secret);
610 opts->dhchap_secret = dhchap_secret;
611 host_key = ctrl->host_key;
612 mutex_lock(&ctrl->dhchap_auth_mutex);
613 ctrl->host_key = key;
614 mutex_unlock(&ctrl->dhchap_auth_mutex);
615 nvme_auth_free_key(host_key);
616 } else
617 kfree(dhchap_secret);
618 /* Start re-authentication */
619 dev_info(ctrl->device, "re-authenticating controller\n");
620 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
621
622 return count;
623 }
624
625 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
626 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
627
nvme_ctrl_dhchap_ctrl_secret_show(struct device * dev,struct device_attribute * attr,char * buf)628 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
629 struct device_attribute *attr, char *buf)
630 {
631 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
632 struct nvmf_ctrl_options *opts = ctrl->opts;
633
634 if (!opts->dhchap_ctrl_secret)
635 return sysfs_emit(buf, "none\n");
636 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
637 }
638
nvme_ctrl_dhchap_ctrl_secret_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)639 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
640 struct device_attribute *attr, const char *buf, size_t count)
641 {
642 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
643 struct nvmf_ctrl_options *opts = ctrl->opts;
644 char *dhchap_secret;
645
646 if (!ctrl->opts->dhchap_ctrl_secret)
647 return -EINVAL;
648 if (count < 7)
649 return -EINVAL;
650 if (memcmp(buf, "DHHC-1:", 7))
651 return -EINVAL;
652
653 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
654 if (!dhchap_secret)
655 return -ENOMEM;
656 memcpy(dhchap_secret, buf, count);
657 nvme_auth_stop(ctrl);
658 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
659 struct nvme_dhchap_key *key, *ctrl_key;
660 int ret;
661
662 ret = nvme_auth_generate_key(dhchap_secret, &key);
663 if (ret) {
664 kfree(dhchap_secret);
665 return ret;
666 }
667 kfree(opts->dhchap_ctrl_secret);
668 opts->dhchap_ctrl_secret = dhchap_secret;
669 ctrl_key = ctrl->ctrl_key;
670 mutex_lock(&ctrl->dhchap_auth_mutex);
671 ctrl->ctrl_key = key;
672 mutex_unlock(&ctrl->dhchap_auth_mutex);
673 nvme_auth_free_key(ctrl_key);
674 } else
675 kfree(dhchap_secret);
676 /* Start re-authentication */
677 dev_info(ctrl->device, "re-authenticating controller\n");
678 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
679
680 return count;
681 }
682
683 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
684 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
685 #endif
686
687 static struct attribute *nvme_dev_attrs[] = {
688 &dev_attr_reset_controller.attr,
689 &dev_attr_rescan_controller.attr,
690 &dev_attr_model.attr,
691 &dev_attr_serial.attr,
692 &dev_attr_firmware_rev.attr,
693 &dev_attr_cntlid.attr,
694 &dev_attr_delete_controller.attr,
695 &dev_attr_transport.attr,
696 &dev_attr_subsysnqn.attr,
697 &dev_attr_address.attr,
698 &dev_attr_state.attr,
699 &dev_attr_numa_node.attr,
700 &dev_attr_queue_count.attr,
701 &dev_attr_sqsize.attr,
702 &dev_attr_hostnqn.attr,
703 &dev_attr_hostid.attr,
704 &dev_attr_ctrl_loss_tmo.attr,
705 &dev_attr_reconnect_delay.attr,
706 &dev_attr_fast_io_fail_tmo.attr,
707 &dev_attr_kato.attr,
708 &dev_attr_cntrltype.attr,
709 &dev_attr_dctype.attr,
710 #ifdef CONFIG_NVME_HOST_AUTH
711 &dev_attr_dhchap_secret.attr,
712 &dev_attr_dhchap_ctrl_secret.attr,
713 #endif
714 &dev_attr_adm_passthru_err_log_enabled.attr,
715 NULL
716 };
717
nvme_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)718 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
719 struct attribute *a, int n)
720 {
721 struct device *dev = container_of(kobj, struct device, kobj);
722 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
723
724 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
725 return 0;
726 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
727 return 0;
728 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
729 return 0;
730 if (a == &dev_attr_hostid.attr && !ctrl->opts)
731 return 0;
732 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
733 return 0;
734 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
735 return 0;
736 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
737 return 0;
738 #ifdef CONFIG_NVME_HOST_AUTH
739 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
740 return 0;
741 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
742 return 0;
743 #endif
744
745 return a->mode;
746 }
747
748 const struct attribute_group nvme_dev_attrs_group = {
749 .attrs = nvme_dev_attrs,
750 .is_visible = nvme_dev_attrs_are_visible,
751 };
752 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
753
754 #ifdef CONFIG_NVME_TCP_TLS
tls_key_show(struct device * dev,struct device_attribute * attr,char * buf)755 static ssize_t tls_key_show(struct device *dev,
756 struct device_attribute *attr, char *buf)
757 {
758 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
759
760 if (!ctrl->tls_pskid)
761 return 0;
762 return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
763 }
764 static DEVICE_ATTR_RO(tls_key);
765
tls_configured_key_show(struct device * dev,struct device_attribute * attr,char * buf)766 static ssize_t tls_configured_key_show(struct device *dev,
767 struct device_attribute *attr, char *buf)
768 {
769 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
770 struct key *key = ctrl->opts->tls_key;
771
772 return sysfs_emit(buf, "%08x\n", key_serial(key));
773 }
774 static DEVICE_ATTR_RO(tls_configured_key);
775
tls_keyring_show(struct device * dev,struct device_attribute * attr,char * buf)776 static ssize_t tls_keyring_show(struct device *dev,
777 struct device_attribute *attr, char *buf)
778 {
779 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
780 struct key *keyring = ctrl->opts->keyring;
781
782 return sysfs_emit(buf, "%s\n", keyring->description);
783 }
784 static DEVICE_ATTR_RO(tls_keyring);
785
786 static struct attribute *nvme_tls_attrs[] = {
787 &dev_attr_tls_key.attr,
788 &dev_attr_tls_configured_key.attr,
789 &dev_attr_tls_keyring.attr,
790 NULL,
791 };
792
nvme_tls_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)793 static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
794 struct attribute *a, int n)
795 {
796 struct device *dev = container_of(kobj, struct device, kobj);
797 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
798
799 if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
800 return 0;
801
802 if (a == &dev_attr_tls_key.attr &&
803 !ctrl->opts->tls && !ctrl->opts->concat)
804 return 0;
805 if (a == &dev_attr_tls_configured_key.attr &&
806 (!ctrl->opts->tls_key || ctrl->opts->concat))
807 return 0;
808 if (a == &dev_attr_tls_keyring.attr &&
809 !ctrl->opts->keyring)
810 return 0;
811
812 return a->mode;
813 }
814
815 static const struct attribute_group nvme_tls_attrs_group = {
816 .attrs = nvme_tls_attrs,
817 .is_visible = nvme_tls_attrs_are_visible,
818 };
819 #endif
820
821 const struct attribute_group *nvme_dev_attr_groups[] = {
822 &nvme_dev_attrs_group,
823 #ifdef CONFIG_NVME_TCP_TLS
824 &nvme_tls_attrs_group,
825 #endif
826 NULL,
827 };
828
829 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
830 struct device_attribute subsys_attr_##_name = \
831 __ATTR(_name, _mode, _show, NULL)
832
nvme_subsys_show_nqn(struct device * dev,struct device_attribute * attr,char * buf)833 static ssize_t nvme_subsys_show_nqn(struct device *dev,
834 struct device_attribute *attr,
835 char *buf)
836 {
837 struct nvme_subsystem *subsys =
838 container_of(dev, struct nvme_subsystem, dev);
839
840 return sysfs_emit(buf, "%s\n", subsys->subnqn);
841 }
842 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
843
nvme_subsys_show_type(struct device * dev,struct device_attribute * attr,char * buf)844 static ssize_t nvme_subsys_show_type(struct device *dev,
845 struct device_attribute *attr,
846 char *buf)
847 {
848 struct nvme_subsystem *subsys =
849 container_of(dev, struct nvme_subsystem, dev);
850
851 switch (subsys->subtype) {
852 case NVME_NQN_DISC:
853 return sysfs_emit(buf, "discovery\n");
854 case NVME_NQN_NVME:
855 return sysfs_emit(buf, "nvm\n");
856 default:
857 return sysfs_emit(buf, "reserved\n");
858 }
859 }
860 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
861
862 #define nvme_subsys_show_str_function(field) \
863 static ssize_t subsys_##field##_show(struct device *dev, \
864 struct device_attribute *attr, char *buf) \
865 { \
866 struct nvme_subsystem *subsys = \
867 container_of(dev, struct nvme_subsystem, dev); \
868 return sysfs_emit(buf, "%.*s\n", \
869 (int)sizeof(subsys->field), subsys->field); \
870 } \
871 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
872
873 nvme_subsys_show_str_function(model);
874 nvme_subsys_show_str_function(serial);
875 nvme_subsys_show_str_function(firmware_rev);
876
877 static struct attribute *nvme_subsys_attrs[] = {
878 &subsys_attr_model.attr,
879 &subsys_attr_serial.attr,
880 &subsys_attr_firmware_rev.attr,
881 &subsys_attr_subsysnqn.attr,
882 &subsys_attr_subsystype.attr,
883 #ifdef CONFIG_NVME_MULTIPATH
884 &subsys_attr_iopolicy.attr,
885 #endif
886 NULL,
887 };
888
889 static const struct attribute_group nvme_subsys_attrs_group = {
890 .attrs = nvme_subsys_attrs,
891 };
892
893 const struct attribute_group *nvme_subsys_attrs_groups[] = {
894 &nvme_subsys_attrs_group,
895 NULL,
896 };
897