xref: /linux/drivers/nvme/host/sysfs.c (revision 6ab1f766a80a6f46c7196f588e867cef51f4f26a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Sysfs interface for the NVMe core driver.
4  *
5  * Copyright (c) 2011-2014, Intel Corporation.
6  */
7 
8 #include <linux/nvme-auth.h>
9 
10 #include "nvme.h"
11 #include "fabrics.h"
12 
13 static ssize_t nvme_sysfs_reset(struct device *dev,
14 				struct device_attribute *attr, const char *buf,
15 				size_t count)
16 {
17 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
18 	int ret;
19 
20 	ret = nvme_reset_ctrl_sync(ctrl);
21 	if (ret < 0)
22 		return ret;
23 	return count;
24 }
25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
26 
27 static ssize_t nvme_sysfs_rescan(struct device *dev,
28 				struct device_attribute *attr, const char *buf,
29 				size_t count)
30 {
31 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
32 
33 	nvme_queue_scan(ctrl);
34 	return count;
35 }
36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
37 
38 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
39 {
40 	struct gendisk *disk = dev_to_disk(dev);
41 
42 	if (nvme_disk_is_ns_head(disk))
43 		return disk->private_data;
44 	return nvme_get_ns_from_dev(dev)->head;
45 }
46 
47 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
48 		char *buf)
49 {
50 	struct nvme_ns_head *head = dev_to_ns_head(dev);
51 	struct nvme_ns_ids *ids = &head->ids;
52 	struct nvme_subsystem *subsys = head->subsys;
53 	int serial_len = sizeof(subsys->serial);
54 	int model_len = sizeof(subsys->model);
55 
56 	if (!uuid_is_null(&ids->uuid))
57 		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
58 
59 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
60 		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
61 
62 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
63 		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
64 
65 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
66 				  subsys->serial[serial_len - 1] == '\0'))
67 		serial_len--;
68 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
69 				 subsys->model[model_len - 1] == '\0'))
70 		model_len--;
71 
72 	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
73 		serial_len, subsys->serial, model_len, subsys->model,
74 		head->ns_id);
75 }
76 static DEVICE_ATTR_RO(wwid);
77 
78 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
79 		char *buf)
80 {
81 	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
82 }
83 static DEVICE_ATTR_RO(nguid);
84 
85 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
86 		char *buf)
87 {
88 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
89 
90 	/* For backward compatibility expose the NGUID to userspace if
91 	 * we have no UUID set
92 	 */
93 	if (uuid_is_null(&ids->uuid)) {
94 		dev_warn_once(dev,
95 			"No UUID available providing old NGUID\n");
96 		return sysfs_emit(buf, "%pU\n", ids->nguid);
97 	}
98 	return sysfs_emit(buf, "%pU\n", &ids->uuid);
99 }
100 static DEVICE_ATTR_RO(uuid);
101 
102 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
103 		char *buf)
104 {
105 	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
106 }
107 static DEVICE_ATTR_RO(eui);
108 
109 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
110 		char *buf)
111 {
112 	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
113 }
114 static DEVICE_ATTR_RO(nsid);
115 
116 static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
117 		char *buf)
118 {
119 	return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
120 }
121 static DEVICE_ATTR_RO(csi);
122 
123 static ssize_t metadata_bytes_show(struct device *dev,
124 		struct device_attribute *attr, char *buf)
125 {
126 	return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
127 }
128 static DEVICE_ATTR_RO(metadata_bytes);
129 
130 static int ns_head_update_nuse(struct nvme_ns_head *head)
131 {
132 	struct nvme_id_ns *id;
133 	struct nvme_ns *ns;
134 	int srcu_idx, ret = -EWOULDBLOCK;
135 
136 	/* Avoid issuing commands too often by rate limiting the update */
137 	if (!__ratelimit(&head->rs_nuse))
138 		return 0;
139 
140 	srcu_idx = srcu_read_lock(&head->srcu);
141 	ns = nvme_find_path(head);
142 	if (!ns)
143 		goto out_unlock;
144 
145 	ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
146 	if (ret)
147 		goto out_unlock;
148 
149 	head->nuse = le64_to_cpu(id->nuse);
150 	kfree(id);
151 
152 out_unlock:
153 	srcu_read_unlock(&head->srcu, srcu_idx);
154 	return ret;
155 }
156 
157 static int ns_update_nuse(struct nvme_ns *ns)
158 {
159 	struct nvme_id_ns *id;
160 	int ret;
161 
162 	/* Avoid issuing commands too often by rate limiting the update. */
163 	if (!__ratelimit(&ns->head->rs_nuse))
164 		return 0;
165 
166 	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
167 	if (ret)
168 		goto out_free_id;
169 
170 	ns->head->nuse = le64_to_cpu(id->nuse);
171 
172 out_free_id:
173 	kfree(id);
174 
175 	return ret;
176 }
177 
178 static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
179 		char *buf)
180 {
181 	struct nvme_ns_head *head = dev_to_ns_head(dev);
182 	struct gendisk *disk = dev_to_disk(dev);
183 	struct block_device *bdev = disk->part0;
184 	int ret;
185 
186 	if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
187 	    bdev->bd_disk->fops == &nvme_ns_head_ops)
188 		ret = ns_head_update_nuse(head);
189 	else
190 		ret = ns_update_nuse(bdev->bd_disk->private_data);
191 	if (ret)
192 		return ret;
193 
194 	return sysfs_emit(buf, "%llu\n", head->nuse);
195 }
196 static DEVICE_ATTR_RO(nuse);
197 
198 static struct attribute *nvme_ns_attrs[] = {
199 	&dev_attr_wwid.attr,
200 	&dev_attr_uuid.attr,
201 	&dev_attr_nguid.attr,
202 	&dev_attr_eui.attr,
203 	&dev_attr_csi.attr,
204 	&dev_attr_nsid.attr,
205 	&dev_attr_metadata_bytes.attr,
206 	&dev_attr_nuse.attr,
207 #ifdef CONFIG_NVME_MULTIPATH
208 	&dev_attr_ana_grpid.attr,
209 	&dev_attr_ana_state.attr,
210 #endif
211 	NULL,
212 };
213 
214 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
215 		struct attribute *a, int n)
216 {
217 	struct device *dev = container_of(kobj, struct device, kobj);
218 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
219 
220 	if (a == &dev_attr_uuid.attr) {
221 		if (uuid_is_null(&ids->uuid) &&
222 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
223 			return 0;
224 	}
225 	if (a == &dev_attr_nguid.attr) {
226 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
227 			return 0;
228 	}
229 	if (a == &dev_attr_eui.attr) {
230 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
231 			return 0;
232 	}
233 #ifdef CONFIG_NVME_MULTIPATH
234 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
235 		/* per-path attr */
236 		if (nvme_disk_is_ns_head(dev_to_disk(dev)))
237 			return 0;
238 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
239 			return 0;
240 	}
241 #endif
242 	return a->mode;
243 }
244 
245 static const struct attribute_group nvme_ns_attr_group = {
246 	.attrs		= nvme_ns_attrs,
247 	.is_visible	= nvme_ns_attrs_are_visible,
248 };
249 
250 const struct attribute_group *nvme_ns_attr_groups[] = {
251 	&nvme_ns_attr_group,
252 	NULL,
253 };
254 
255 #define nvme_show_str_function(field)						\
256 static ssize_t  field##_show(struct device *dev,				\
257 			    struct device_attribute *attr, char *buf)		\
258 {										\
259         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
260         return sysfs_emit(buf, "%.*s\n",					\
261 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
262 }										\
263 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
264 
265 nvme_show_str_function(model);
266 nvme_show_str_function(serial);
267 nvme_show_str_function(firmware_rev);
268 
269 #define nvme_show_int_function(field)						\
270 static ssize_t  field##_show(struct device *dev,				\
271 			    struct device_attribute *attr, char *buf)		\
272 {										\
273         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
274         return sysfs_emit(buf, "%d\n", ctrl->field);				\
275 }										\
276 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
277 
278 nvme_show_int_function(cntlid);
279 nvme_show_int_function(numa_node);
280 nvme_show_int_function(queue_count);
281 nvme_show_int_function(sqsize);
282 nvme_show_int_function(kato);
283 
284 static ssize_t nvme_sysfs_delete(struct device *dev,
285 				struct device_attribute *attr, const char *buf,
286 				size_t count)
287 {
288 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
289 
290 	if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
291 		return -EBUSY;
292 
293 	if (device_remove_file_self(dev, attr))
294 		nvme_delete_ctrl_sync(ctrl);
295 	return count;
296 }
297 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
298 
299 static ssize_t nvme_sysfs_show_transport(struct device *dev,
300 					 struct device_attribute *attr,
301 					 char *buf)
302 {
303 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
304 
305 	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
306 }
307 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
308 
309 static ssize_t nvme_sysfs_show_state(struct device *dev,
310 				     struct device_attribute *attr,
311 				     char *buf)
312 {
313 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
314 	static const char *const state_name[] = {
315 		[NVME_CTRL_NEW]		= "new",
316 		[NVME_CTRL_LIVE]	= "live",
317 		[NVME_CTRL_RESETTING]	= "resetting",
318 		[NVME_CTRL_CONNECTING]	= "connecting",
319 		[NVME_CTRL_DELETING]	= "deleting",
320 		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
321 		[NVME_CTRL_DEAD]	= "dead",
322 	};
323 
324 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
325 	    state_name[ctrl->state])
326 		return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
327 
328 	return sysfs_emit(buf, "unknown state\n");
329 }
330 
331 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
332 
333 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
334 					 struct device_attribute *attr,
335 					 char *buf)
336 {
337 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
338 
339 	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
340 }
341 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
342 
343 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
344 					struct device_attribute *attr,
345 					char *buf)
346 {
347 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
348 
349 	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
350 }
351 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
352 
353 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
354 					struct device_attribute *attr,
355 					char *buf)
356 {
357 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
358 
359 	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
360 }
361 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
362 
363 static ssize_t nvme_sysfs_show_address(struct device *dev,
364 					 struct device_attribute *attr,
365 					 char *buf)
366 {
367 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
368 
369 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
370 }
371 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
372 
373 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
374 		struct device_attribute *attr, char *buf)
375 {
376 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
377 	struct nvmf_ctrl_options *opts = ctrl->opts;
378 
379 	if (ctrl->opts->max_reconnects == -1)
380 		return sysfs_emit(buf, "off\n");
381 	return sysfs_emit(buf, "%d\n",
382 			  opts->max_reconnects * opts->reconnect_delay);
383 }
384 
385 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
386 		struct device_attribute *attr, const char *buf, size_t count)
387 {
388 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
389 	struct nvmf_ctrl_options *opts = ctrl->opts;
390 	int ctrl_loss_tmo, err;
391 
392 	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
393 	if (err)
394 		return -EINVAL;
395 
396 	if (ctrl_loss_tmo < 0)
397 		opts->max_reconnects = -1;
398 	else
399 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
400 						opts->reconnect_delay);
401 	return count;
402 }
403 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
404 	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
405 
406 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
407 		struct device_attribute *attr, char *buf)
408 {
409 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
410 
411 	if (ctrl->opts->reconnect_delay == -1)
412 		return sysfs_emit(buf, "off\n");
413 	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
414 }
415 
416 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
417 		struct device_attribute *attr, const char *buf, size_t count)
418 {
419 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
420 	unsigned int v;
421 	int err;
422 
423 	err = kstrtou32(buf, 10, &v);
424 	if (err)
425 		return err;
426 
427 	ctrl->opts->reconnect_delay = v;
428 	return count;
429 }
430 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
431 	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
432 
433 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
434 		struct device_attribute *attr, char *buf)
435 {
436 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
437 
438 	if (ctrl->opts->fast_io_fail_tmo == -1)
439 		return sysfs_emit(buf, "off\n");
440 	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
441 }
442 
443 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
444 		struct device_attribute *attr, const char *buf, size_t count)
445 {
446 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
447 	struct nvmf_ctrl_options *opts = ctrl->opts;
448 	int fast_io_fail_tmo, err;
449 
450 	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
451 	if (err)
452 		return -EINVAL;
453 
454 	if (fast_io_fail_tmo < 0)
455 		opts->fast_io_fail_tmo = -1;
456 	else
457 		opts->fast_io_fail_tmo = fast_io_fail_tmo;
458 	return count;
459 }
460 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
461 	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
462 
463 static ssize_t cntrltype_show(struct device *dev,
464 			      struct device_attribute *attr, char *buf)
465 {
466 	static const char * const type[] = {
467 		[NVME_CTRL_IO] = "io\n",
468 		[NVME_CTRL_DISC] = "discovery\n",
469 		[NVME_CTRL_ADMIN] = "admin\n",
470 	};
471 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
472 
473 	if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
474 		return sysfs_emit(buf, "reserved\n");
475 
476 	return sysfs_emit(buf, type[ctrl->cntrltype]);
477 }
478 static DEVICE_ATTR_RO(cntrltype);
479 
480 static ssize_t dctype_show(struct device *dev,
481 			   struct device_attribute *attr, char *buf)
482 {
483 	static const char * const type[] = {
484 		[NVME_DCTYPE_NOT_REPORTED] = "none\n",
485 		[NVME_DCTYPE_DDC] = "ddc\n",
486 		[NVME_DCTYPE_CDC] = "cdc\n",
487 	};
488 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
489 
490 	if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
491 		return sysfs_emit(buf, "reserved\n");
492 
493 	return sysfs_emit(buf, type[ctrl->dctype]);
494 }
495 static DEVICE_ATTR_RO(dctype);
496 
497 #ifdef CONFIG_NVME_HOST_AUTH
498 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
499 		struct device_attribute *attr, char *buf)
500 {
501 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
502 	struct nvmf_ctrl_options *opts = ctrl->opts;
503 
504 	if (!opts->dhchap_secret)
505 		return sysfs_emit(buf, "none\n");
506 	return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
507 }
508 
509 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
510 		struct device_attribute *attr, const char *buf, size_t count)
511 {
512 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
513 	struct nvmf_ctrl_options *opts = ctrl->opts;
514 	char *dhchap_secret;
515 
516 	if (!ctrl->opts->dhchap_secret)
517 		return -EINVAL;
518 	if (count < 7)
519 		return -EINVAL;
520 	if (memcmp(buf, "DHHC-1:", 7))
521 		return -EINVAL;
522 
523 	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
524 	if (!dhchap_secret)
525 		return -ENOMEM;
526 	memcpy(dhchap_secret, buf, count);
527 	nvme_auth_stop(ctrl);
528 	if (strcmp(dhchap_secret, opts->dhchap_secret)) {
529 		struct nvme_dhchap_key *key, *host_key;
530 		int ret;
531 
532 		ret = nvme_auth_generate_key(dhchap_secret, &key);
533 		if (ret) {
534 			kfree(dhchap_secret);
535 			return ret;
536 		}
537 		kfree(opts->dhchap_secret);
538 		opts->dhchap_secret = dhchap_secret;
539 		host_key = ctrl->host_key;
540 		mutex_lock(&ctrl->dhchap_auth_mutex);
541 		ctrl->host_key = key;
542 		mutex_unlock(&ctrl->dhchap_auth_mutex);
543 		nvme_auth_free_key(host_key);
544 	} else
545 		kfree(dhchap_secret);
546 	/* Start re-authentication */
547 	dev_info(ctrl->device, "re-authenticating controller\n");
548 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
549 
550 	return count;
551 }
552 
553 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
554 	nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
555 
556 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
557 		struct device_attribute *attr, char *buf)
558 {
559 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
560 	struct nvmf_ctrl_options *opts = ctrl->opts;
561 
562 	if (!opts->dhchap_ctrl_secret)
563 		return sysfs_emit(buf, "none\n");
564 	return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
565 }
566 
567 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
568 		struct device_attribute *attr, const char *buf, size_t count)
569 {
570 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
571 	struct nvmf_ctrl_options *opts = ctrl->opts;
572 	char *dhchap_secret;
573 
574 	if (!ctrl->opts->dhchap_ctrl_secret)
575 		return -EINVAL;
576 	if (count < 7)
577 		return -EINVAL;
578 	if (memcmp(buf, "DHHC-1:", 7))
579 		return -EINVAL;
580 
581 	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
582 	if (!dhchap_secret)
583 		return -ENOMEM;
584 	memcpy(dhchap_secret, buf, count);
585 	nvme_auth_stop(ctrl);
586 	if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
587 		struct nvme_dhchap_key *key, *ctrl_key;
588 		int ret;
589 
590 		ret = nvme_auth_generate_key(dhchap_secret, &key);
591 		if (ret) {
592 			kfree(dhchap_secret);
593 			return ret;
594 		}
595 		kfree(opts->dhchap_ctrl_secret);
596 		opts->dhchap_ctrl_secret = dhchap_secret;
597 		ctrl_key = ctrl->ctrl_key;
598 		mutex_lock(&ctrl->dhchap_auth_mutex);
599 		ctrl->ctrl_key = key;
600 		mutex_unlock(&ctrl->dhchap_auth_mutex);
601 		nvme_auth_free_key(ctrl_key);
602 	} else
603 		kfree(dhchap_secret);
604 	/* Start re-authentication */
605 	dev_info(ctrl->device, "re-authenticating controller\n");
606 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
607 
608 	return count;
609 }
610 
611 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
612 	nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
613 #endif
614 
615 #ifdef CONFIG_NVME_TCP_TLS
616 static ssize_t tls_key_show(struct device *dev,
617 			    struct device_attribute *attr, char *buf)
618 {
619 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
620 
621 	if (!ctrl->tls_key)
622 		return 0;
623 	return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key));
624 }
625 static DEVICE_ATTR_RO(tls_key);
626 #endif
627 
628 static struct attribute *nvme_dev_attrs[] = {
629 	&dev_attr_reset_controller.attr,
630 	&dev_attr_rescan_controller.attr,
631 	&dev_attr_model.attr,
632 	&dev_attr_serial.attr,
633 	&dev_attr_firmware_rev.attr,
634 	&dev_attr_cntlid.attr,
635 	&dev_attr_delete_controller.attr,
636 	&dev_attr_transport.attr,
637 	&dev_attr_subsysnqn.attr,
638 	&dev_attr_address.attr,
639 	&dev_attr_state.attr,
640 	&dev_attr_numa_node.attr,
641 	&dev_attr_queue_count.attr,
642 	&dev_attr_sqsize.attr,
643 	&dev_attr_hostnqn.attr,
644 	&dev_attr_hostid.attr,
645 	&dev_attr_ctrl_loss_tmo.attr,
646 	&dev_attr_reconnect_delay.attr,
647 	&dev_attr_fast_io_fail_tmo.attr,
648 	&dev_attr_kato.attr,
649 	&dev_attr_cntrltype.attr,
650 	&dev_attr_dctype.attr,
651 #ifdef CONFIG_NVME_HOST_AUTH
652 	&dev_attr_dhchap_secret.attr,
653 	&dev_attr_dhchap_ctrl_secret.attr,
654 #endif
655 #ifdef CONFIG_NVME_TCP_TLS
656 	&dev_attr_tls_key.attr,
657 #endif
658 	NULL
659 };
660 
661 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
662 		struct attribute *a, int n)
663 {
664 	struct device *dev = container_of(kobj, struct device, kobj);
665 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
666 
667 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
668 		return 0;
669 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
670 		return 0;
671 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
672 		return 0;
673 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
674 		return 0;
675 	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
676 		return 0;
677 	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
678 		return 0;
679 	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
680 		return 0;
681 #ifdef CONFIG_NVME_HOST_AUTH
682 	if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
683 		return 0;
684 	if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
685 		return 0;
686 #endif
687 #ifdef CONFIG_NVME_TCP_TLS
688 	if (a == &dev_attr_tls_key.attr &&
689 	    (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")))
690 		return 0;
691 #endif
692 
693 	return a->mode;
694 }
695 
696 const struct attribute_group nvme_dev_attrs_group = {
697 	.attrs		= nvme_dev_attrs,
698 	.is_visible	= nvme_dev_attrs_are_visible,
699 };
700 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
701 
702 const struct attribute_group *nvme_dev_attr_groups[] = {
703 	&nvme_dev_attrs_group,
704 	NULL,
705 };
706 
707 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
708 	struct device_attribute subsys_attr_##_name = \
709 		__ATTR(_name, _mode, _show, NULL)
710 
711 static ssize_t nvme_subsys_show_nqn(struct device *dev,
712 				    struct device_attribute *attr,
713 				    char *buf)
714 {
715 	struct nvme_subsystem *subsys =
716 		container_of(dev, struct nvme_subsystem, dev);
717 
718 	return sysfs_emit(buf, "%s\n", subsys->subnqn);
719 }
720 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
721 
722 static ssize_t nvme_subsys_show_type(struct device *dev,
723 				    struct device_attribute *attr,
724 				    char *buf)
725 {
726 	struct nvme_subsystem *subsys =
727 		container_of(dev, struct nvme_subsystem, dev);
728 
729 	switch (subsys->subtype) {
730 	case NVME_NQN_DISC:
731 		return sysfs_emit(buf, "discovery\n");
732 	case NVME_NQN_NVME:
733 		return sysfs_emit(buf, "nvm\n");
734 	default:
735 		return sysfs_emit(buf, "reserved\n");
736 	}
737 }
738 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
739 
740 #define nvme_subsys_show_str_function(field)				\
741 static ssize_t subsys_##field##_show(struct device *dev,		\
742 			    struct device_attribute *attr, char *buf)	\
743 {									\
744 	struct nvme_subsystem *subsys =					\
745 		container_of(dev, struct nvme_subsystem, dev);		\
746 	return sysfs_emit(buf, "%.*s\n",				\
747 			   (int)sizeof(subsys->field), subsys->field);	\
748 }									\
749 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
750 
751 nvme_subsys_show_str_function(model);
752 nvme_subsys_show_str_function(serial);
753 nvme_subsys_show_str_function(firmware_rev);
754 
755 static struct attribute *nvme_subsys_attrs[] = {
756 	&subsys_attr_model.attr,
757 	&subsys_attr_serial.attr,
758 	&subsys_attr_firmware_rev.attr,
759 	&subsys_attr_subsysnqn.attr,
760 	&subsys_attr_subsystype.attr,
761 #ifdef CONFIG_NVME_MULTIPATH
762 	&subsys_attr_iopolicy.attr,
763 #endif
764 	NULL,
765 };
766 
767 static const struct attribute_group nvme_subsys_attrs_group = {
768 	.attrs = nvme_subsys_attrs,
769 };
770 
771 const struct attribute_group *nvme_subsys_attrs_groups[] = {
772 	&nvme_subsys_attrs_group,
773 	NULL,
774 };
775