xref: /linux/drivers/nvme/host/sysfs.c (revision 981368e1440b76f68b1ac8f5fb14e739f80ecc4e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Sysfs interface for the NVMe core driver.
4  *
5  * Copyright (c) 2011-2014, Intel Corporation.
6  */
7 
8 #include <linux/nvme-auth.h>
9 
10 #include "nvme.h"
11 #include "fabrics.h"
12 
13 static ssize_t nvme_sysfs_reset(struct device *dev,
14 				struct device_attribute *attr, const char *buf,
15 				size_t count)
16 {
17 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
18 	int ret;
19 
20 	ret = nvme_reset_ctrl_sync(ctrl);
21 	if (ret < 0)
22 		return ret;
23 	return count;
24 }
25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
26 
27 static ssize_t nvme_sysfs_rescan(struct device *dev,
28 				struct device_attribute *attr, const char *buf,
29 				size_t count)
30 {
31 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
32 
33 	nvme_queue_scan(ctrl);
34 	return count;
35 }
36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
37 
38 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
39 {
40 	struct gendisk *disk = dev_to_disk(dev);
41 
42 	if (disk->fops == &nvme_bdev_ops)
43 		return nvme_get_ns_from_dev(dev)->head;
44 	else
45 		return disk->private_data;
46 }
47 
48 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
49 		char *buf)
50 {
51 	struct nvme_ns_head *head = dev_to_ns_head(dev);
52 	struct nvme_ns_ids *ids = &head->ids;
53 	struct nvme_subsystem *subsys = head->subsys;
54 	int serial_len = sizeof(subsys->serial);
55 	int model_len = sizeof(subsys->model);
56 
57 	if (!uuid_is_null(&ids->uuid))
58 		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
59 
60 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
61 		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
62 
63 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
64 		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
65 
66 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
67 				  subsys->serial[serial_len - 1] == '\0'))
68 		serial_len--;
69 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
70 				 subsys->model[model_len - 1] == '\0'))
71 		model_len--;
72 
73 	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
74 		serial_len, subsys->serial, model_len, subsys->model,
75 		head->ns_id);
76 }
77 static DEVICE_ATTR_RO(wwid);
78 
79 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
80 		char *buf)
81 {
82 	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
83 }
84 static DEVICE_ATTR_RO(nguid);
85 
86 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
87 		char *buf)
88 {
89 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
90 
91 	/* For backward compatibility expose the NGUID to userspace if
92 	 * we have no UUID set
93 	 */
94 	if (uuid_is_null(&ids->uuid)) {
95 		dev_warn_once(dev,
96 			"No UUID available providing old NGUID\n");
97 		return sysfs_emit(buf, "%pU\n", ids->nguid);
98 	}
99 	return sysfs_emit(buf, "%pU\n", &ids->uuid);
100 }
101 static DEVICE_ATTR_RO(uuid);
102 
103 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
104 		char *buf)
105 {
106 	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
107 }
108 static DEVICE_ATTR_RO(eui);
109 
110 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
111 		char *buf)
112 {
113 	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
114 }
115 static DEVICE_ATTR_RO(nsid);
116 
117 static struct attribute *nvme_ns_id_attrs[] = {
118 	&dev_attr_wwid.attr,
119 	&dev_attr_uuid.attr,
120 	&dev_attr_nguid.attr,
121 	&dev_attr_eui.attr,
122 	&dev_attr_nsid.attr,
123 #ifdef CONFIG_NVME_MULTIPATH
124 	&dev_attr_ana_grpid.attr,
125 	&dev_attr_ana_state.attr,
126 #endif
127 	NULL,
128 };
129 
130 static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
131 		struct attribute *a, int n)
132 {
133 	struct device *dev = container_of(kobj, struct device, kobj);
134 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
135 
136 	if (a == &dev_attr_uuid.attr) {
137 		if (uuid_is_null(&ids->uuid) &&
138 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
139 			return 0;
140 	}
141 	if (a == &dev_attr_nguid.attr) {
142 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
143 			return 0;
144 	}
145 	if (a == &dev_attr_eui.attr) {
146 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
147 			return 0;
148 	}
149 #ifdef CONFIG_NVME_MULTIPATH
150 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
151 		if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
152 			return 0;
153 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
154 			return 0;
155 	}
156 #endif
157 	return a->mode;
158 }
159 
160 static const struct attribute_group nvme_ns_id_attr_group = {
161 	.attrs		= nvme_ns_id_attrs,
162 	.is_visible	= nvme_ns_id_attrs_are_visible,
163 };
164 
165 const struct attribute_group *nvme_ns_id_attr_groups[] = {
166 	&nvme_ns_id_attr_group,
167 	NULL,
168 };
169 
170 #define nvme_show_str_function(field)						\
171 static ssize_t  field##_show(struct device *dev,				\
172 			    struct device_attribute *attr, char *buf)		\
173 {										\
174         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
175         return sysfs_emit(buf, "%.*s\n",					\
176 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
177 }										\
178 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
179 
180 nvme_show_str_function(model);
181 nvme_show_str_function(serial);
182 nvme_show_str_function(firmware_rev);
183 
184 #define nvme_show_int_function(field)						\
185 static ssize_t  field##_show(struct device *dev,				\
186 			    struct device_attribute *attr, char *buf)		\
187 {										\
188         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
189         return sysfs_emit(buf, "%d\n", ctrl->field);				\
190 }										\
191 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
192 
193 nvme_show_int_function(cntlid);
194 nvme_show_int_function(numa_node);
195 nvme_show_int_function(queue_count);
196 nvme_show_int_function(sqsize);
197 nvme_show_int_function(kato);
198 
199 static ssize_t nvme_sysfs_delete(struct device *dev,
200 				struct device_attribute *attr, const char *buf,
201 				size_t count)
202 {
203 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
204 
205 	if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
206 		return -EBUSY;
207 
208 	if (device_remove_file_self(dev, attr))
209 		nvme_delete_ctrl_sync(ctrl);
210 	return count;
211 }
212 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
213 
214 static ssize_t nvme_sysfs_show_transport(struct device *dev,
215 					 struct device_attribute *attr,
216 					 char *buf)
217 {
218 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
219 
220 	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
221 }
222 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
223 
224 static ssize_t nvme_sysfs_show_state(struct device *dev,
225 				     struct device_attribute *attr,
226 				     char *buf)
227 {
228 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
229 	static const char *const state_name[] = {
230 		[NVME_CTRL_NEW]		= "new",
231 		[NVME_CTRL_LIVE]	= "live",
232 		[NVME_CTRL_RESETTING]	= "resetting",
233 		[NVME_CTRL_CONNECTING]	= "connecting",
234 		[NVME_CTRL_DELETING]	= "deleting",
235 		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
236 		[NVME_CTRL_DEAD]	= "dead",
237 	};
238 
239 	if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
240 	    state_name[ctrl->state])
241 		return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
242 
243 	return sysfs_emit(buf, "unknown state\n");
244 }
245 
246 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
247 
248 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
249 					 struct device_attribute *attr,
250 					 char *buf)
251 {
252 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
253 
254 	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
255 }
256 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
257 
258 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
259 					struct device_attribute *attr,
260 					char *buf)
261 {
262 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
263 
264 	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
265 }
266 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
267 
268 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
269 					struct device_attribute *attr,
270 					char *buf)
271 {
272 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
273 
274 	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
275 }
276 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
277 
278 static ssize_t nvme_sysfs_show_address(struct device *dev,
279 					 struct device_attribute *attr,
280 					 char *buf)
281 {
282 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
283 
284 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
285 }
286 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
287 
288 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
289 		struct device_attribute *attr, char *buf)
290 {
291 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
292 	struct nvmf_ctrl_options *opts = ctrl->opts;
293 
294 	if (ctrl->opts->max_reconnects == -1)
295 		return sysfs_emit(buf, "off\n");
296 	return sysfs_emit(buf, "%d\n",
297 			  opts->max_reconnects * opts->reconnect_delay);
298 }
299 
300 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
301 		struct device_attribute *attr, const char *buf, size_t count)
302 {
303 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
304 	struct nvmf_ctrl_options *opts = ctrl->opts;
305 	int ctrl_loss_tmo, err;
306 
307 	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
308 	if (err)
309 		return -EINVAL;
310 
311 	if (ctrl_loss_tmo < 0)
312 		opts->max_reconnects = -1;
313 	else
314 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
315 						opts->reconnect_delay);
316 	return count;
317 }
318 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
319 	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
320 
321 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
322 		struct device_attribute *attr, char *buf)
323 {
324 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
325 
326 	if (ctrl->opts->reconnect_delay == -1)
327 		return sysfs_emit(buf, "off\n");
328 	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
329 }
330 
331 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
332 		struct device_attribute *attr, const char *buf, size_t count)
333 {
334 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
335 	unsigned int v;
336 	int err;
337 
338 	err = kstrtou32(buf, 10, &v);
339 	if (err)
340 		return err;
341 
342 	ctrl->opts->reconnect_delay = v;
343 	return count;
344 }
345 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
346 	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
347 
348 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
349 		struct device_attribute *attr, char *buf)
350 {
351 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
352 
353 	if (ctrl->opts->fast_io_fail_tmo == -1)
354 		return sysfs_emit(buf, "off\n");
355 	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
356 }
357 
358 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
359 		struct device_attribute *attr, const char *buf, size_t count)
360 {
361 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
362 	struct nvmf_ctrl_options *opts = ctrl->opts;
363 	int fast_io_fail_tmo, err;
364 
365 	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
366 	if (err)
367 		return -EINVAL;
368 
369 	if (fast_io_fail_tmo < 0)
370 		opts->fast_io_fail_tmo = -1;
371 	else
372 		opts->fast_io_fail_tmo = fast_io_fail_tmo;
373 	return count;
374 }
375 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
376 	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
377 
378 static ssize_t cntrltype_show(struct device *dev,
379 			      struct device_attribute *attr, char *buf)
380 {
381 	static const char * const type[] = {
382 		[NVME_CTRL_IO] = "io\n",
383 		[NVME_CTRL_DISC] = "discovery\n",
384 		[NVME_CTRL_ADMIN] = "admin\n",
385 	};
386 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
387 
388 	if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
389 		return sysfs_emit(buf, "reserved\n");
390 
391 	return sysfs_emit(buf, type[ctrl->cntrltype]);
392 }
393 static DEVICE_ATTR_RO(cntrltype);
394 
395 static ssize_t dctype_show(struct device *dev,
396 			   struct device_attribute *attr, char *buf)
397 {
398 	static const char * const type[] = {
399 		[NVME_DCTYPE_NOT_REPORTED] = "none\n",
400 		[NVME_DCTYPE_DDC] = "ddc\n",
401 		[NVME_DCTYPE_CDC] = "cdc\n",
402 	};
403 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
404 
405 	if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
406 		return sysfs_emit(buf, "reserved\n");
407 
408 	return sysfs_emit(buf, type[ctrl->dctype]);
409 }
410 static DEVICE_ATTR_RO(dctype);
411 
412 #ifdef CONFIG_NVME_AUTH
413 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
414 		struct device_attribute *attr, char *buf)
415 {
416 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
417 	struct nvmf_ctrl_options *opts = ctrl->opts;
418 
419 	if (!opts->dhchap_secret)
420 		return sysfs_emit(buf, "none\n");
421 	return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
422 }
423 
424 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
425 		struct device_attribute *attr, const char *buf, size_t count)
426 {
427 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
428 	struct nvmf_ctrl_options *opts = ctrl->opts;
429 	char *dhchap_secret;
430 
431 	if (!ctrl->opts->dhchap_secret)
432 		return -EINVAL;
433 	if (count < 7)
434 		return -EINVAL;
435 	if (memcmp(buf, "DHHC-1:", 7))
436 		return -EINVAL;
437 
438 	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
439 	if (!dhchap_secret)
440 		return -ENOMEM;
441 	memcpy(dhchap_secret, buf, count);
442 	nvme_auth_stop(ctrl);
443 	if (strcmp(dhchap_secret, opts->dhchap_secret)) {
444 		struct nvme_dhchap_key *key, *host_key;
445 		int ret;
446 
447 		ret = nvme_auth_generate_key(dhchap_secret, &key);
448 		if (ret) {
449 			kfree(dhchap_secret);
450 			return ret;
451 		}
452 		kfree(opts->dhchap_secret);
453 		opts->dhchap_secret = dhchap_secret;
454 		host_key = ctrl->host_key;
455 		mutex_lock(&ctrl->dhchap_auth_mutex);
456 		ctrl->host_key = key;
457 		mutex_unlock(&ctrl->dhchap_auth_mutex);
458 		nvme_auth_free_key(host_key);
459 	} else
460 		kfree(dhchap_secret);
461 	/* Start re-authentication */
462 	dev_info(ctrl->device, "re-authenticating controller\n");
463 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
464 
465 	return count;
466 }
467 
468 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
469 	nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
470 
471 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
472 		struct device_attribute *attr, char *buf)
473 {
474 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
475 	struct nvmf_ctrl_options *opts = ctrl->opts;
476 
477 	if (!opts->dhchap_ctrl_secret)
478 		return sysfs_emit(buf, "none\n");
479 	return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
480 }
481 
482 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
483 		struct device_attribute *attr, const char *buf, size_t count)
484 {
485 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
486 	struct nvmf_ctrl_options *opts = ctrl->opts;
487 	char *dhchap_secret;
488 
489 	if (!ctrl->opts->dhchap_ctrl_secret)
490 		return -EINVAL;
491 	if (count < 7)
492 		return -EINVAL;
493 	if (memcmp(buf, "DHHC-1:", 7))
494 		return -EINVAL;
495 
496 	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
497 	if (!dhchap_secret)
498 		return -ENOMEM;
499 	memcpy(dhchap_secret, buf, count);
500 	nvme_auth_stop(ctrl);
501 	if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
502 		struct nvme_dhchap_key *key, *ctrl_key;
503 		int ret;
504 
505 		ret = nvme_auth_generate_key(dhchap_secret, &key);
506 		if (ret) {
507 			kfree(dhchap_secret);
508 			return ret;
509 		}
510 		kfree(opts->dhchap_ctrl_secret);
511 		opts->dhchap_ctrl_secret = dhchap_secret;
512 		ctrl_key = ctrl->ctrl_key;
513 		mutex_lock(&ctrl->dhchap_auth_mutex);
514 		ctrl->ctrl_key = key;
515 		mutex_unlock(&ctrl->dhchap_auth_mutex);
516 		nvme_auth_free_key(ctrl_key);
517 	} else
518 		kfree(dhchap_secret);
519 	/* Start re-authentication */
520 	dev_info(ctrl->device, "re-authenticating controller\n");
521 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
522 
523 	return count;
524 }
525 
526 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
527 	nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
528 #endif
529 
530 static struct attribute *nvme_dev_attrs[] = {
531 	&dev_attr_reset_controller.attr,
532 	&dev_attr_rescan_controller.attr,
533 	&dev_attr_model.attr,
534 	&dev_attr_serial.attr,
535 	&dev_attr_firmware_rev.attr,
536 	&dev_attr_cntlid.attr,
537 	&dev_attr_delete_controller.attr,
538 	&dev_attr_transport.attr,
539 	&dev_attr_subsysnqn.attr,
540 	&dev_attr_address.attr,
541 	&dev_attr_state.attr,
542 	&dev_attr_numa_node.attr,
543 	&dev_attr_queue_count.attr,
544 	&dev_attr_sqsize.attr,
545 	&dev_attr_hostnqn.attr,
546 	&dev_attr_hostid.attr,
547 	&dev_attr_ctrl_loss_tmo.attr,
548 	&dev_attr_reconnect_delay.attr,
549 	&dev_attr_fast_io_fail_tmo.attr,
550 	&dev_attr_kato.attr,
551 	&dev_attr_cntrltype.attr,
552 	&dev_attr_dctype.attr,
553 #ifdef CONFIG_NVME_AUTH
554 	&dev_attr_dhchap_secret.attr,
555 	&dev_attr_dhchap_ctrl_secret.attr,
556 #endif
557 	NULL
558 };
559 
560 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
561 		struct attribute *a, int n)
562 {
563 	struct device *dev = container_of(kobj, struct device, kobj);
564 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
565 
566 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
567 		return 0;
568 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
569 		return 0;
570 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
571 		return 0;
572 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
573 		return 0;
574 	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
575 		return 0;
576 	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
577 		return 0;
578 	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
579 		return 0;
580 #ifdef CONFIG_NVME_AUTH
581 	if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
582 		return 0;
583 	if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
584 		return 0;
585 #endif
586 
587 	return a->mode;
588 }
589 
590 const struct attribute_group nvme_dev_attrs_group = {
591 	.attrs		= nvme_dev_attrs,
592 	.is_visible	= nvme_dev_attrs_are_visible,
593 };
594 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
595 
596 const struct attribute_group *nvme_dev_attr_groups[] = {
597 	&nvme_dev_attrs_group,
598 	NULL,
599 };
600 
601 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
602 	struct device_attribute subsys_attr_##_name = \
603 		__ATTR(_name, _mode, _show, NULL)
604 
605 static ssize_t nvme_subsys_show_nqn(struct device *dev,
606 				    struct device_attribute *attr,
607 				    char *buf)
608 {
609 	struct nvme_subsystem *subsys =
610 		container_of(dev, struct nvme_subsystem, dev);
611 
612 	return sysfs_emit(buf, "%s\n", subsys->subnqn);
613 }
614 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
615 
616 static ssize_t nvme_subsys_show_type(struct device *dev,
617 				    struct device_attribute *attr,
618 				    char *buf)
619 {
620 	struct nvme_subsystem *subsys =
621 		container_of(dev, struct nvme_subsystem, dev);
622 
623 	switch (subsys->subtype) {
624 	case NVME_NQN_DISC:
625 		return sysfs_emit(buf, "discovery\n");
626 	case NVME_NQN_NVME:
627 		return sysfs_emit(buf, "nvm\n");
628 	default:
629 		return sysfs_emit(buf, "reserved\n");
630 	}
631 }
632 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
633 
634 #define nvme_subsys_show_str_function(field)				\
635 static ssize_t subsys_##field##_show(struct device *dev,		\
636 			    struct device_attribute *attr, char *buf)	\
637 {									\
638 	struct nvme_subsystem *subsys =					\
639 		container_of(dev, struct nvme_subsystem, dev);		\
640 	return sysfs_emit(buf, "%.*s\n",				\
641 			   (int)sizeof(subsys->field), subsys->field);	\
642 }									\
643 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
644 
645 nvme_subsys_show_str_function(model);
646 nvme_subsys_show_str_function(serial);
647 nvme_subsys_show_str_function(firmware_rev);
648 
649 static struct attribute *nvme_subsys_attrs[] = {
650 	&subsys_attr_model.attr,
651 	&subsys_attr_serial.attr,
652 	&subsys_attr_firmware_rev.attr,
653 	&subsys_attr_subsysnqn.attr,
654 	&subsys_attr_subsystype.attr,
655 #ifdef CONFIG_NVME_MULTIPATH
656 	&subsys_attr_iopolicy.attr,
657 #endif
658 	NULL,
659 };
660 
661 static const struct attribute_group nvme_subsys_attrs_group = {
662 	.attrs = nvme_subsys_attrs,
663 };
664 
665 const struct attribute_group *nvme_subsys_attrs_groups[] = {
666 	&nvme_subsys_attrs_group,
667 	NULL,
668 };
669