xref: /linux/drivers/nvme/target/configfs.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <linux/nvme-keyring.h>
19 #include <crypto/hash.h>
20 #include <crypto/kpp.h>
21 #include <linux/nospec.h>
22 
23 #include "nvmet.h"
24 
25 static const struct config_item_type nvmet_host_type;
26 static const struct config_item_type nvmet_subsys_type;
27 
28 static LIST_HEAD(nvmet_ports_list);
29 struct list_head *nvmet_ports = &nvmet_ports_list;
30 
31 struct nvmet_type_name_map {
32 	u8		type;
33 	const char	*name;
34 };
35 
36 static struct nvmet_type_name_map nvmet_transport[] = {
37 	{ NVMF_TRTYPE_RDMA,	"rdma" },
38 	{ NVMF_TRTYPE_FC,	"fc" },
39 	{ NVMF_TRTYPE_TCP,	"tcp" },
40 	{ NVMF_TRTYPE_LOOP,	"loop" },
41 };
42 
43 static const struct nvmet_type_name_map nvmet_addr_family[] = {
44 	{ NVMF_ADDR_FAMILY_PCI,		"pcie" },
45 	{ NVMF_ADDR_FAMILY_IP4,		"ipv4" },
46 	{ NVMF_ADDR_FAMILY_IP6,		"ipv6" },
47 	{ NVMF_ADDR_FAMILY_IB,		"ib" },
48 	{ NVMF_ADDR_FAMILY_FC,		"fc" },
49 	{ NVMF_ADDR_FAMILY_LOOP,	"loop" },
50 };
51 
52 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
53 {
54 	if (p->enabled)
55 		pr_err("Disable port '%u' before changing attribute in %s\n",
56 		       le16_to_cpu(p->disc_addr.portid), caller);
57 	return p->enabled;
58 }
59 
60 /*
61  * nvmet_port Generic ConfigFS definitions.
62  * Used in any place in the ConfigFS tree that refers to an address.
63  */
64 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
65 {
66 	u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
67 	int i;
68 
69 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
70 		if (nvmet_addr_family[i].type == adrfam)
71 			return snprintf(page, PAGE_SIZE, "%s\n",
72 					nvmet_addr_family[i].name);
73 	}
74 
75 	return snprintf(page, PAGE_SIZE, "\n");
76 }
77 
78 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
79 		const char *page, size_t count)
80 {
81 	struct nvmet_port *port = to_nvmet_port(item);
82 	int i;
83 
84 	if (nvmet_is_port_enabled(port, __func__))
85 		return -EACCES;
86 
87 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
88 		if (sysfs_streq(page, nvmet_addr_family[i].name))
89 			goto found;
90 	}
91 
92 	pr_err("Invalid value '%s' for adrfam\n", page);
93 	return -EINVAL;
94 
95 found:
96 	port->disc_addr.adrfam = nvmet_addr_family[i].type;
97 	return count;
98 }
99 
100 CONFIGFS_ATTR(nvmet_, addr_adrfam);
101 
102 static ssize_t nvmet_addr_portid_show(struct config_item *item,
103 		char *page)
104 {
105 	__le16 portid = to_nvmet_port(item)->disc_addr.portid;
106 
107 	return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
108 }
109 
110 static ssize_t nvmet_addr_portid_store(struct config_item *item,
111 		const char *page, size_t count)
112 {
113 	struct nvmet_port *port = to_nvmet_port(item);
114 	u16 portid = 0;
115 
116 	if (kstrtou16(page, 0, &portid)) {
117 		pr_err("Invalid value '%s' for portid\n", page);
118 		return -EINVAL;
119 	}
120 
121 	if (nvmet_is_port_enabled(port, __func__))
122 		return -EACCES;
123 
124 	port->disc_addr.portid = cpu_to_le16(portid);
125 	return count;
126 }
127 
128 CONFIGFS_ATTR(nvmet_, addr_portid);
129 
130 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
131 		char *page)
132 {
133 	struct nvmet_port *port = to_nvmet_port(item);
134 
135 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
136 }
137 
138 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
139 		const char *page, size_t count)
140 {
141 	struct nvmet_port *port = to_nvmet_port(item);
142 
143 	if (count > NVMF_TRADDR_SIZE) {
144 		pr_err("Invalid value '%s' for traddr\n", page);
145 		return -EINVAL;
146 	}
147 
148 	if (nvmet_is_port_enabled(port, __func__))
149 		return -EACCES;
150 
151 	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
152 		return -EINVAL;
153 	return count;
154 }
155 
156 CONFIGFS_ATTR(nvmet_, addr_traddr);
157 
158 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
159 	{ NVMF_TREQ_NOT_SPECIFIED,	"not specified" },
160 	{ NVMF_TREQ_REQUIRED,		"required" },
161 	{ NVMF_TREQ_NOT_REQUIRED,	"not required" },
162 };
163 
164 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
165 {
166 	return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
167 }
168 
169 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
170 {
171 	u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
172 	int i;
173 
174 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
175 		if (treq == nvmet_addr_treq[i].type)
176 			return snprintf(page, PAGE_SIZE, "%s\n",
177 					nvmet_addr_treq[i].name);
178 	}
179 
180 	return snprintf(page, PAGE_SIZE, "\n");
181 }
182 
183 static ssize_t nvmet_addr_treq_store(struct config_item *item,
184 		const char *page, size_t count)
185 {
186 	struct nvmet_port *port = to_nvmet_port(item);
187 	u8 treq = nvmet_port_disc_addr_treq_mask(port);
188 	int i;
189 
190 	if (nvmet_is_port_enabled(port, __func__))
191 		return -EACCES;
192 
193 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
194 		if (sysfs_streq(page, nvmet_addr_treq[i].name))
195 			goto found;
196 	}
197 
198 	pr_err("Invalid value '%s' for treq\n", page);
199 	return -EINVAL;
200 
201 found:
202 	if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
203 	    port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
204 		switch (nvmet_addr_treq[i].type) {
205 		case NVMF_TREQ_NOT_SPECIFIED:
206 			pr_debug("treq '%s' not allowed for TLS1.3\n",
207 				 nvmet_addr_treq[i].name);
208 			return -EINVAL;
209 		case NVMF_TREQ_NOT_REQUIRED:
210 			pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
211 			break;
212 		default:
213 			break;
214 		}
215 	}
216 	treq |= nvmet_addr_treq[i].type;
217 	port->disc_addr.treq = treq;
218 	return count;
219 }
220 
221 CONFIGFS_ATTR(nvmet_, addr_treq);
222 
223 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
224 		char *page)
225 {
226 	struct nvmet_port *port = to_nvmet_port(item);
227 
228 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
229 }
230 
231 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
232 		const char *page, size_t count)
233 {
234 	struct nvmet_port *port = to_nvmet_port(item);
235 
236 	if (count > NVMF_TRSVCID_SIZE) {
237 		pr_err("Invalid value '%s' for trsvcid\n", page);
238 		return -EINVAL;
239 	}
240 	if (nvmet_is_port_enabled(port, __func__))
241 		return -EACCES;
242 
243 	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
244 		return -EINVAL;
245 	return count;
246 }
247 
248 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
249 
250 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
251 		char *page)
252 {
253 	struct nvmet_port *port = to_nvmet_port(item);
254 
255 	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
256 }
257 
258 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
259 		const char *page, size_t count)
260 {
261 	struct nvmet_port *port = to_nvmet_port(item);
262 	int ret;
263 
264 	if (nvmet_is_port_enabled(port, __func__))
265 		return -EACCES;
266 	ret = kstrtoint(page, 0, &port->inline_data_size);
267 	if (ret) {
268 		pr_err("Invalid value '%s' for inline_data_size\n", page);
269 		return -EINVAL;
270 	}
271 	return count;
272 }
273 
274 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
275 
276 #ifdef CONFIG_BLK_DEV_INTEGRITY
277 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
278 		char *page)
279 {
280 	struct nvmet_port *port = to_nvmet_port(item);
281 
282 	return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
283 }
284 
285 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
286 		const char *page, size_t count)
287 {
288 	struct nvmet_port *port = to_nvmet_port(item);
289 	bool val;
290 
291 	if (kstrtobool(page, &val))
292 		return -EINVAL;
293 
294 	if (nvmet_is_port_enabled(port, __func__))
295 		return -EACCES;
296 
297 	port->pi_enable = val;
298 	return count;
299 }
300 
301 CONFIGFS_ATTR(nvmet_, param_pi_enable);
302 #endif
303 
304 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
305 		char *page)
306 {
307 	struct nvmet_port *port = to_nvmet_port(item);
308 	int i;
309 
310 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
311 		if (port->disc_addr.trtype == nvmet_transport[i].type)
312 			return snprintf(page, PAGE_SIZE,
313 					"%s\n", nvmet_transport[i].name);
314 	}
315 
316 	return sprintf(page, "\n");
317 }
318 
319 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
320 {
321 	port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
322 	port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
323 	port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
324 }
325 
326 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
327 {
328 	port->disc_addr.tsas.tcp.sectype = sectype;
329 }
330 
331 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
332 		const char *page, size_t count)
333 {
334 	struct nvmet_port *port = to_nvmet_port(item);
335 	int i;
336 
337 	if (nvmet_is_port_enabled(port, __func__))
338 		return -EACCES;
339 
340 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
341 		if (sysfs_streq(page, nvmet_transport[i].name))
342 			goto found;
343 	}
344 
345 	pr_err("Invalid value '%s' for trtype\n", page);
346 	return -EINVAL;
347 
348 found:
349 	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
350 	port->disc_addr.trtype = nvmet_transport[i].type;
351 	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
352 		nvmet_port_init_tsas_rdma(port);
353 	else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
354 		nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
355 	return count;
356 }
357 
358 CONFIGFS_ATTR(nvmet_, addr_trtype);
359 
360 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
361 	{ NVMF_TCP_SECTYPE_NONE,	"none" },
362 	{ NVMF_TCP_SECTYPE_TLS13,	"tls1.3" },
363 };
364 
365 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
366 	{ NVMF_RDMA_QPTYPE_CONNECTED,	"connected" },
367 	{ NVMF_RDMA_QPTYPE_DATAGRAM,	"datagram"  },
368 };
369 
370 static ssize_t nvmet_addr_tsas_show(struct config_item *item,
371 		char *page)
372 {
373 	struct nvmet_port *port = to_nvmet_port(item);
374 	int i;
375 
376 	if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
377 		for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
378 			if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
379 				return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
380 		}
381 	} else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
382 		for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
383 			if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
384 				return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
385 		}
386 	}
387 	return sprintf(page, "reserved\n");
388 }
389 
390 static ssize_t nvmet_addr_tsas_store(struct config_item *item,
391 		const char *page, size_t count)
392 {
393 	struct nvmet_port *port = to_nvmet_port(item);
394 	u8 treq = nvmet_port_disc_addr_treq_mask(port);
395 	u8 sectype;
396 	int i;
397 
398 	if (nvmet_is_port_enabled(port, __func__))
399 		return -EACCES;
400 
401 	if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
402 		return -EINVAL;
403 
404 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
405 		if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
406 			sectype = nvmet_addr_tsas_tcp[i].type;
407 			goto found;
408 		}
409 	}
410 
411 	pr_err("Invalid value '%s' for tsas\n", page);
412 	return -EINVAL;
413 
414 found:
415 	if (sectype == NVMF_TCP_SECTYPE_TLS13) {
416 		if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
417 			pr_err("TLS is not supported\n");
418 			return -EINVAL;
419 		}
420 		if (!port->keyring) {
421 			pr_err("TLS keyring not configured\n");
422 			return -EINVAL;
423 		}
424 	}
425 
426 	nvmet_port_init_tsas_tcp(port, sectype);
427 	/*
428 	 * If TLS is enabled TREQ should be set to 'required' per default
429 	 */
430 	if (sectype == NVMF_TCP_SECTYPE_TLS13) {
431 		u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
432 
433 		if (sc == NVMF_TREQ_NOT_SPECIFIED)
434 			treq |= NVMF_TREQ_REQUIRED;
435 		else
436 			treq |= sc;
437 	} else {
438 		treq |= NVMF_TREQ_NOT_SPECIFIED;
439 	}
440 	port->disc_addr.treq = treq;
441 	return count;
442 }
443 
444 CONFIGFS_ATTR(nvmet_, addr_tsas);
445 
446 /*
447  * Namespace structures & file operation functions below
448  */
449 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
450 {
451 	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
452 }
453 
454 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
455 		const char *page, size_t count)
456 {
457 	struct nvmet_ns *ns = to_nvmet_ns(item);
458 	struct nvmet_subsys *subsys = ns->subsys;
459 	size_t len;
460 	int ret;
461 
462 	mutex_lock(&subsys->lock);
463 	ret = -EBUSY;
464 	if (ns->enabled)
465 		goto out_unlock;
466 
467 	ret = -EINVAL;
468 	len = strcspn(page, "\n");
469 	if (!len)
470 		goto out_unlock;
471 
472 	kfree(ns->device_path);
473 	ret = -ENOMEM;
474 	ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
475 	if (!ns->device_path)
476 		goto out_unlock;
477 
478 	mutex_unlock(&subsys->lock);
479 	return count;
480 
481 out_unlock:
482 	mutex_unlock(&subsys->lock);
483 	return ret;
484 }
485 
486 CONFIGFS_ATTR(nvmet_ns_, device_path);
487 
488 #ifdef CONFIG_PCI_P2PDMA
489 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
490 {
491 	struct nvmet_ns *ns = to_nvmet_ns(item);
492 
493 	return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
494 }
495 
496 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
497 		const char *page, size_t count)
498 {
499 	struct nvmet_ns *ns = to_nvmet_ns(item);
500 	struct pci_dev *p2p_dev = NULL;
501 	bool use_p2pmem;
502 	int ret = count;
503 	int error;
504 
505 	mutex_lock(&ns->subsys->lock);
506 	if (ns->enabled) {
507 		ret = -EBUSY;
508 		goto out_unlock;
509 	}
510 
511 	error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
512 	if (error) {
513 		ret = error;
514 		goto out_unlock;
515 	}
516 
517 	ns->use_p2pmem = use_p2pmem;
518 	pci_dev_put(ns->p2p_dev);
519 	ns->p2p_dev = p2p_dev;
520 
521 out_unlock:
522 	mutex_unlock(&ns->subsys->lock);
523 
524 	return ret;
525 }
526 
527 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
528 #endif /* CONFIG_PCI_P2PDMA */
529 
530 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
531 {
532 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
533 }
534 
535 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
536 					  const char *page, size_t count)
537 {
538 	struct nvmet_ns *ns = to_nvmet_ns(item);
539 	struct nvmet_subsys *subsys = ns->subsys;
540 	int ret = 0;
541 
542 	mutex_lock(&subsys->lock);
543 	if (ns->enabled) {
544 		ret = -EBUSY;
545 		goto out_unlock;
546 	}
547 
548 	if (uuid_parse(page, &ns->uuid))
549 		ret = -EINVAL;
550 
551 out_unlock:
552 	mutex_unlock(&subsys->lock);
553 	return ret ? ret : count;
554 }
555 
556 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
557 
558 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
559 {
560 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
561 }
562 
563 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
564 		const char *page, size_t count)
565 {
566 	struct nvmet_ns *ns = to_nvmet_ns(item);
567 	struct nvmet_subsys *subsys = ns->subsys;
568 	u8 nguid[16];
569 	const char *p = page;
570 	int i;
571 	int ret = 0;
572 
573 	mutex_lock(&subsys->lock);
574 	if (ns->enabled) {
575 		ret = -EBUSY;
576 		goto out_unlock;
577 	}
578 
579 	for (i = 0; i < 16; i++) {
580 		if (p + 2 > page + count) {
581 			ret = -EINVAL;
582 			goto out_unlock;
583 		}
584 		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
585 			ret = -EINVAL;
586 			goto out_unlock;
587 		}
588 
589 		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
590 		p += 2;
591 
592 		if (*p == '-' || *p == ':')
593 			p++;
594 	}
595 
596 	memcpy(&ns->nguid, nguid, sizeof(nguid));
597 out_unlock:
598 	mutex_unlock(&subsys->lock);
599 	return ret ? ret : count;
600 }
601 
602 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
603 
604 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
605 {
606 	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
607 }
608 
609 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
610 		const char *page, size_t count)
611 {
612 	struct nvmet_ns *ns = to_nvmet_ns(item);
613 	u32 oldgrpid, newgrpid;
614 	int ret;
615 
616 	ret = kstrtou32(page, 0, &newgrpid);
617 	if (ret)
618 		return ret;
619 
620 	if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
621 		return -EINVAL;
622 
623 	down_write(&nvmet_ana_sem);
624 	oldgrpid = ns->anagrpid;
625 	newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
626 	nvmet_ana_group_enabled[newgrpid]++;
627 	ns->anagrpid = newgrpid;
628 	nvmet_ana_group_enabled[oldgrpid]--;
629 	nvmet_ana_chgcnt++;
630 	up_write(&nvmet_ana_sem);
631 
632 	nvmet_send_ana_event(ns->subsys, NULL);
633 	return count;
634 }
635 
636 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
637 
638 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
639 {
640 	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
641 }
642 
643 static ssize_t nvmet_ns_enable_store(struct config_item *item,
644 		const char *page, size_t count)
645 {
646 	struct nvmet_ns *ns = to_nvmet_ns(item);
647 	bool enable;
648 	int ret = 0;
649 
650 	if (kstrtobool(page, &enable))
651 		return -EINVAL;
652 
653 	if (enable)
654 		ret = nvmet_ns_enable(ns);
655 	else
656 		nvmet_ns_disable(ns);
657 
658 	return ret ? ret : count;
659 }
660 
661 CONFIGFS_ATTR(nvmet_ns_, enable);
662 
663 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
664 {
665 	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
666 }
667 
668 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
669 		const char *page, size_t count)
670 {
671 	struct nvmet_ns *ns = to_nvmet_ns(item);
672 	bool val;
673 
674 	if (kstrtobool(page, &val))
675 		return -EINVAL;
676 
677 	mutex_lock(&ns->subsys->lock);
678 	if (ns->enabled) {
679 		pr_err("disable ns before setting buffered_io value.\n");
680 		mutex_unlock(&ns->subsys->lock);
681 		return -EINVAL;
682 	}
683 
684 	ns->buffered_io = val;
685 	mutex_unlock(&ns->subsys->lock);
686 	return count;
687 }
688 
689 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
690 
691 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
692 		const char *page, size_t count)
693 {
694 	struct nvmet_ns *ns = to_nvmet_ns(item);
695 	bool val;
696 
697 	if (kstrtobool(page, &val))
698 		return -EINVAL;
699 
700 	if (!val)
701 		return -EINVAL;
702 
703 	mutex_lock(&ns->subsys->lock);
704 	if (!ns->enabled) {
705 		pr_err("enable ns before revalidate.\n");
706 		mutex_unlock(&ns->subsys->lock);
707 		return -EINVAL;
708 	}
709 	if (nvmet_ns_revalidate(ns))
710 		nvmet_ns_changed(ns->subsys, ns->nsid);
711 	mutex_unlock(&ns->subsys->lock);
712 	return count;
713 }
714 
715 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
716 
717 static struct configfs_attribute *nvmet_ns_attrs[] = {
718 	&nvmet_ns_attr_device_path,
719 	&nvmet_ns_attr_device_nguid,
720 	&nvmet_ns_attr_device_uuid,
721 	&nvmet_ns_attr_ana_grpid,
722 	&nvmet_ns_attr_enable,
723 	&nvmet_ns_attr_buffered_io,
724 	&nvmet_ns_attr_revalidate_size,
725 #ifdef CONFIG_PCI_P2PDMA
726 	&nvmet_ns_attr_p2pmem,
727 #endif
728 	NULL,
729 };
730 
731 static void nvmet_ns_release(struct config_item *item)
732 {
733 	struct nvmet_ns *ns = to_nvmet_ns(item);
734 
735 	nvmet_ns_free(ns);
736 }
737 
738 static struct configfs_item_operations nvmet_ns_item_ops = {
739 	.release		= nvmet_ns_release,
740 };
741 
742 static const struct config_item_type nvmet_ns_type = {
743 	.ct_item_ops		= &nvmet_ns_item_ops,
744 	.ct_attrs		= nvmet_ns_attrs,
745 	.ct_owner		= THIS_MODULE,
746 };
747 
748 static struct config_group *nvmet_ns_make(struct config_group *group,
749 		const char *name)
750 {
751 	struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
752 	struct nvmet_ns *ns;
753 	int ret;
754 	u32 nsid;
755 
756 	ret = kstrtou32(name, 0, &nsid);
757 	if (ret)
758 		goto out;
759 
760 	ret = -EINVAL;
761 	if (nsid == 0 || nsid == NVME_NSID_ALL) {
762 		pr_err("invalid nsid %#x", nsid);
763 		goto out;
764 	}
765 
766 	ret = -ENOMEM;
767 	ns = nvmet_ns_alloc(subsys, nsid);
768 	if (!ns)
769 		goto out;
770 	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
771 
772 	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
773 
774 	return &ns->group;
775 out:
776 	return ERR_PTR(ret);
777 }
778 
779 static struct configfs_group_operations nvmet_namespaces_group_ops = {
780 	.make_group		= nvmet_ns_make,
781 };
782 
783 static const struct config_item_type nvmet_namespaces_type = {
784 	.ct_group_ops		= &nvmet_namespaces_group_ops,
785 	.ct_owner		= THIS_MODULE,
786 };
787 
788 #ifdef CONFIG_NVME_TARGET_PASSTHRU
789 
790 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
791 		char *page)
792 {
793 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
794 
795 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
796 }
797 
798 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
799 		const char *page, size_t count)
800 {
801 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
802 	size_t len;
803 	int ret;
804 
805 	mutex_lock(&subsys->lock);
806 
807 	ret = -EBUSY;
808 	if (subsys->passthru_ctrl)
809 		goto out_unlock;
810 
811 	ret = -EINVAL;
812 	len = strcspn(page, "\n");
813 	if (!len)
814 		goto out_unlock;
815 
816 	kfree(subsys->passthru_ctrl_path);
817 	ret = -ENOMEM;
818 	subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
819 	if (!subsys->passthru_ctrl_path)
820 		goto out_unlock;
821 
822 	mutex_unlock(&subsys->lock);
823 
824 	return count;
825 out_unlock:
826 	mutex_unlock(&subsys->lock);
827 	return ret;
828 }
829 CONFIGFS_ATTR(nvmet_passthru_, device_path);
830 
831 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
832 		char *page)
833 {
834 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
835 
836 	return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
837 }
838 
839 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
840 		const char *page, size_t count)
841 {
842 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
843 	bool enable;
844 	int ret = 0;
845 
846 	if (kstrtobool(page, &enable))
847 		return -EINVAL;
848 
849 	if (enable)
850 		ret = nvmet_passthru_ctrl_enable(subsys);
851 	else
852 		nvmet_passthru_ctrl_disable(subsys);
853 
854 	return ret ? ret : count;
855 }
856 CONFIGFS_ATTR(nvmet_passthru_, enable);
857 
858 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
859 		char *page)
860 {
861 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
862 }
863 
864 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
865 		const char *page, size_t count)
866 {
867 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
868 	unsigned int timeout;
869 
870 	if (kstrtouint(page, 0, &timeout))
871 		return -EINVAL;
872 	subsys->admin_timeout = timeout;
873 	return count;
874 }
875 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
876 
877 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
878 		char *page)
879 {
880 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
881 }
882 
883 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
884 		const char *page, size_t count)
885 {
886 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
887 	unsigned int timeout;
888 
889 	if (kstrtouint(page, 0, &timeout))
890 		return -EINVAL;
891 	subsys->io_timeout = timeout;
892 	return count;
893 }
894 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
895 
896 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
897 		char *page)
898 {
899 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
900 }
901 
902 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
903 		const char *page, size_t count)
904 {
905 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
906 	unsigned int clear_ids;
907 
908 	if (kstrtouint(page, 0, &clear_ids))
909 		return -EINVAL;
910 	subsys->clear_ids = clear_ids;
911 	return count;
912 }
913 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
914 
915 static struct configfs_attribute *nvmet_passthru_attrs[] = {
916 	&nvmet_passthru_attr_device_path,
917 	&nvmet_passthru_attr_enable,
918 	&nvmet_passthru_attr_admin_timeout,
919 	&nvmet_passthru_attr_io_timeout,
920 	&nvmet_passthru_attr_clear_ids,
921 	NULL,
922 };
923 
924 static const struct config_item_type nvmet_passthru_type = {
925 	.ct_attrs		= nvmet_passthru_attrs,
926 	.ct_owner		= THIS_MODULE,
927 };
928 
929 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
930 {
931 	config_group_init_type_name(&subsys->passthru_group,
932 				    "passthru", &nvmet_passthru_type);
933 	configfs_add_default_group(&subsys->passthru_group,
934 				   &subsys->group);
935 }
936 
937 #else /* CONFIG_NVME_TARGET_PASSTHRU */
938 
939 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
940 {
941 }
942 
943 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
944 
945 static int nvmet_port_subsys_allow_link(struct config_item *parent,
946 		struct config_item *target)
947 {
948 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
949 	struct nvmet_subsys *subsys;
950 	struct nvmet_subsys_link *link, *p;
951 	int ret;
952 
953 	if (target->ci_type != &nvmet_subsys_type) {
954 		pr_err("can only link subsystems into the subsystems dir.!\n");
955 		return -EINVAL;
956 	}
957 	subsys = to_subsys(target);
958 	link = kmalloc(sizeof(*link), GFP_KERNEL);
959 	if (!link)
960 		return -ENOMEM;
961 	link->subsys = subsys;
962 
963 	down_write(&nvmet_config_sem);
964 	ret = -EEXIST;
965 	list_for_each_entry(p, &port->subsystems, entry) {
966 		if (p->subsys == subsys)
967 			goto out_free_link;
968 	}
969 
970 	if (list_empty(&port->subsystems)) {
971 		ret = nvmet_enable_port(port);
972 		if (ret)
973 			goto out_free_link;
974 	}
975 
976 	list_add_tail(&link->entry, &port->subsystems);
977 	nvmet_port_disc_changed(port, subsys);
978 
979 	up_write(&nvmet_config_sem);
980 	return 0;
981 
982 out_free_link:
983 	up_write(&nvmet_config_sem);
984 	kfree(link);
985 	return ret;
986 }
987 
988 static void nvmet_port_subsys_drop_link(struct config_item *parent,
989 		struct config_item *target)
990 {
991 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
992 	struct nvmet_subsys *subsys = to_subsys(target);
993 	struct nvmet_subsys_link *p;
994 
995 	down_write(&nvmet_config_sem);
996 	list_for_each_entry(p, &port->subsystems, entry) {
997 		if (p->subsys == subsys)
998 			goto found;
999 	}
1000 	up_write(&nvmet_config_sem);
1001 	return;
1002 
1003 found:
1004 	list_del(&p->entry);
1005 	nvmet_port_del_ctrls(port, subsys);
1006 	nvmet_port_disc_changed(port, subsys);
1007 
1008 	if (list_empty(&port->subsystems))
1009 		nvmet_disable_port(port);
1010 	up_write(&nvmet_config_sem);
1011 	kfree(p);
1012 }
1013 
1014 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
1015 	.allow_link		= nvmet_port_subsys_allow_link,
1016 	.drop_link		= nvmet_port_subsys_drop_link,
1017 };
1018 
1019 static const struct config_item_type nvmet_port_subsys_type = {
1020 	.ct_item_ops		= &nvmet_port_subsys_item_ops,
1021 	.ct_owner		= THIS_MODULE,
1022 };
1023 
1024 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
1025 		struct config_item *target)
1026 {
1027 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1028 	struct nvmet_host *host;
1029 	struct nvmet_host_link *link, *p;
1030 	int ret;
1031 
1032 	if (target->ci_type != &nvmet_host_type) {
1033 		pr_err("can only link hosts into the allowed_hosts directory!\n");
1034 		return -EINVAL;
1035 	}
1036 
1037 	host = to_host(target);
1038 	link = kmalloc(sizeof(*link), GFP_KERNEL);
1039 	if (!link)
1040 		return -ENOMEM;
1041 	link->host = host;
1042 
1043 	down_write(&nvmet_config_sem);
1044 	ret = -EINVAL;
1045 	if (subsys->allow_any_host) {
1046 		pr_err("can't add hosts when allow_any_host is set!\n");
1047 		goto out_free_link;
1048 	}
1049 
1050 	ret = -EEXIST;
1051 	list_for_each_entry(p, &subsys->hosts, entry) {
1052 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1053 			goto out_free_link;
1054 	}
1055 	list_add_tail(&link->entry, &subsys->hosts);
1056 	nvmet_subsys_disc_changed(subsys, host);
1057 
1058 	up_write(&nvmet_config_sem);
1059 	return 0;
1060 out_free_link:
1061 	up_write(&nvmet_config_sem);
1062 	kfree(link);
1063 	return ret;
1064 }
1065 
1066 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
1067 		struct config_item *target)
1068 {
1069 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1070 	struct nvmet_host *host = to_host(target);
1071 	struct nvmet_host_link *p;
1072 
1073 	down_write(&nvmet_config_sem);
1074 	list_for_each_entry(p, &subsys->hosts, entry) {
1075 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1076 			goto found;
1077 	}
1078 	up_write(&nvmet_config_sem);
1079 	return;
1080 
1081 found:
1082 	list_del(&p->entry);
1083 	nvmet_subsys_disc_changed(subsys, host);
1084 
1085 	up_write(&nvmet_config_sem);
1086 	kfree(p);
1087 }
1088 
1089 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
1090 	.allow_link		= nvmet_allowed_hosts_allow_link,
1091 	.drop_link		= nvmet_allowed_hosts_drop_link,
1092 };
1093 
1094 static const struct config_item_type nvmet_allowed_hosts_type = {
1095 	.ct_item_ops		= &nvmet_allowed_hosts_item_ops,
1096 	.ct_owner		= THIS_MODULE,
1097 };
1098 
1099 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1100 		char *page)
1101 {
1102 	return snprintf(page, PAGE_SIZE, "%d\n",
1103 		to_subsys(item)->allow_any_host);
1104 }
1105 
1106 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1107 		const char *page, size_t count)
1108 {
1109 	struct nvmet_subsys *subsys = to_subsys(item);
1110 	bool allow_any_host;
1111 	int ret = 0;
1112 
1113 	if (kstrtobool(page, &allow_any_host))
1114 		return -EINVAL;
1115 
1116 	down_write(&nvmet_config_sem);
1117 	if (allow_any_host && !list_empty(&subsys->hosts)) {
1118 		pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1119 		ret = -EINVAL;
1120 		goto out_unlock;
1121 	}
1122 
1123 	if (subsys->allow_any_host != allow_any_host) {
1124 		subsys->allow_any_host = allow_any_host;
1125 		nvmet_subsys_disc_changed(subsys, NULL);
1126 	}
1127 
1128 out_unlock:
1129 	up_write(&nvmet_config_sem);
1130 	return ret ? ret : count;
1131 }
1132 
1133 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1134 
1135 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1136 					      char *page)
1137 {
1138 	struct nvmet_subsys *subsys = to_subsys(item);
1139 
1140 	if (NVME_TERTIARY(subsys->ver))
1141 		return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1142 				NVME_MAJOR(subsys->ver),
1143 				NVME_MINOR(subsys->ver),
1144 				NVME_TERTIARY(subsys->ver));
1145 
1146 	return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1147 			NVME_MAJOR(subsys->ver),
1148 			NVME_MINOR(subsys->ver));
1149 }
1150 
1151 static ssize_t
1152 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1153 		const char *page, size_t count)
1154 {
1155 	int major, minor, tertiary = 0;
1156 	int ret;
1157 
1158 	if (subsys->subsys_discovered) {
1159 		if (NVME_TERTIARY(subsys->ver))
1160 			pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1161 			       NVME_MAJOR(subsys->ver),
1162 			       NVME_MINOR(subsys->ver),
1163 			       NVME_TERTIARY(subsys->ver));
1164 		else
1165 			pr_err("Can't set version number. %llu.%llu is already assigned\n",
1166 			       NVME_MAJOR(subsys->ver),
1167 			       NVME_MINOR(subsys->ver));
1168 		return -EINVAL;
1169 	}
1170 
1171 	/* passthru subsystems use the underlying controller's version */
1172 	if (nvmet_is_passthru_subsys(subsys))
1173 		return -EINVAL;
1174 
1175 	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1176 	if (ret != 2 && ret != 3)
1177 		return -EINVAL;
1178 
1179 	subsys->ver = NVME_VS(major, minor, tertiary);
1180 
1181 	return count;
1182 }
1183 
1184 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1185 					       const char *page, size_t count)
1186 {
1187 	struct nvmet_subsys *subsys = to_subsys(item);
1188 	ssize_t ret;
1189 
1190 	down_write(&nvmet_config_sem);
1191 	mutex_lock(&subsys->lock);
1192 	ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1193 	mutex_unlock(&subsys->lock);
1194 	up_write(&nvmet_config_sem);
1195 
1196 	return ret;
1197 }
1198 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1199 
1200 /* See Section 1.5 of NVMe 1.4 */
1201 static bool nvmet_is_ascii(const char c)
1202 {
1203 	return c >= 0x20 && c <= 0x7e;
1204 }
1205 
1206 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1207 					     char *page)
1208 {
1209 	struct nvmet_subsys *subsys = to_subsys(item);
1210 
1211 	return snprintf(page, PAGE_SIZE, "%.*s\n",
1212 			NVMET_SN_MAX_SIZE, subsys->serial);
1213 }
1214 
1215 static ssize_t
1216 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1217 		const char *page, size_t count)
1218 {
1219 	int pos, len = strcspn(page, "\n");
1220 
1221 	if (subsys->subsys_discovered) {
1222 		pr_err("Can't set serial number. %s is already assigned\n",
1223 		       subsys->serial);
1224 		return -EINVAL;
1225 	}
1226 
1227 	if (!len || len > NVMET_SN_MAX_SIZE) {
1228 		pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1229 		       NVMET_SN_MAX_SIZE);
1230 		return -EINVAL;
1231 	}
1232 
1233 	for (pos = 0; pos < len; pos++) {
1234 		if (!nvmet_is_ascii(page[pos])) {
1235 			pr_err("Serial Number must contain only ASCII strings\n");
1236 			return -EINVAL;
1237 		}
1238 	}
1239 
1240 	memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1241 
1242 	return count;
1243 }
1244 
1245 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1246 					      const char *page, size_t count)
1247 {
1248 	struct nvmet_subsys *subsys = to_subsys(item);
1249 	ssize_t ret;
1250 
1251 	down_write(&nvmet_config_sem);
1252 	mutex_lock(&subsys->lock);
1253 	ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1254 	mutex_unlock(&subsys->lock);
1255 	up_write(&nvmet_config_sem);
1256 
1257 	return ret;
1258 }
1259 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1260 
1261 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1262 						 char *page)
1263 {
1264 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1265 }
1266 
1267 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1268 						  const char *page, size_t cnt)
1269 {
1270 	u16 cntlid_min;
1271 
1272 	if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1273 		return -EINVAL;
1274 
1275 	if (cntlid_min == 0)
1276 		return -EINVAL;
1277 
1278 	down_write(&nvmet_config_sem);
1279 	if (cntlid_min >= to_subsys(item)->cntlid_max)
1280 		goto out_unlock;
1281 	to_subsys(item)->cntlid_min = cntlid_min;
1282 	up_write(&nvmet_config_sem);
1283 	return cnt;
1284 
1285 out_unlock:
1286 	up_write(&nvmet_config_sem);
1287 	return -EINVAL;
1288 }
1289 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1290 
1291 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1292 						 char *page)
1293 {
1294 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1295 }
1296 
1297 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1298 						  const char *page, size_t cnt)
1299 {
1300 	u16 cntlid_max;
1301 
1302 	if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1303 		return -EINVAL;
1304 
1305 	if (cntlid_max == 0)
1306 		return -EINVAL;
1307 
1308 	down_write(&nvmet_config_sem);
1309 	if (cntlid_max <= to_subsys(item)->cntlid_min)
1310 		goto out_unlock;
1311 	to_subsys(item)->cntlid_max = cntlid_max;
1312 	up_write(&nvmet_config_sem);
1313 	return cnt;
1314 
1315 out_unlock:
1316 	up_write(&nvmet_config_sem);
1317 	return -EINVAL;
1318 }
1319 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1320 
1321 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1322 					    char *page)
1323 {
1324 	struct nvmet_subsys *subsys = to_subsys(item);
1325 
1326 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1327 }
1328 
1329 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1330 		const char *page, size_t count)
1331 {
1332 	int pos = 0, len;
1333 	char *val;
1334 
1335 	if (subsys->subsys_discovered) {
1336 		pr_err("Can't set model number. %s is already assigned\n",
1337 		       subsys->model_number);
1338 		return -EINVAL;
1339 	}
1340 
1341 	len = strcspn(page, "\n");
1342 	if (!len)
1343 		return -EINVAL;
1344 
1345 	if (len > NVMET_MN_MAX_SIZE) {
1346 		pr_err("Model number size can not exceed %d Bytes\n",
1347 		       NVMET_MN_MAX_SIZE);
1348 		return -EINVAL;
1349 	}
1350 
1351 	for (pos = 0; pos < len; pos++) {
1352 		if (!nvmet_is_ascii(page[pos]))
1353 			return -EINVAL;
1354 	}
1355 
1356 	val = kmemdup_nul(page, len, GFP_KERNEL);
1357 	if (!val)
1358 		return -ENOMEM;
1359 	kfree(subsys->model_number);
1360 	subsys->model_number = val;
1361 	return count;
1362 }
1363 
1364 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1365 					     const char *page, size_t count)
1366 {
1367 	struct nvmet_subsys *subsys = to_subsys(item);
1368 	ssize_t ret;
1369 
1370 	down_write(&nvmet_config_sem);
1371 	mutex_lock(&subsys->lock);
1372 	ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1373 	mutex_unlock(&subsys->lock);
1374 	up_write(&nvmet_config_sem);
1375 
1376 	return ret;
1377 }
1378 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1379 
1380 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1381 					    char *page)
1382 {
1383 	struct nvmet_subsys *subsys = to_subsys(item);
1384 
1385 	return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1386 }
1387 
1388 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1389 		const char *page, size_t count)
1390 {
1391 	uint32_t val = 0;
1392 	int ret;
1393 
1394 	if (subsys->subsys_discovered) {
1395 		pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1396 		      subsys->ieee_oui);
1397 		return -EINVAL;
1398 	}
1399 
1400 	ret = kstrtou32(page, 0, &val);
1401 	if (ret < 0)
1402 		return ret;
1403 
1404 	if (val >= 0x1000000)
1405 		return -EINVAL;
1406 
1407 	subsys->ieee_oui = val;
1408 
1409 	return count;
1410 }
1411 
1412 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1413 					     const char *page, size_t count)
1414 {
1415 	struct nvmet_subsys *subsys = to_subsys(item);
1416 	ssize_t ret;
1417 
1418 	down_write(&nvmet_config_sem);
1419 	mutex_lock(&subsys->lock);
1420 	ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1421 	mutex_unlock(&subsys->lock);
1422 	up_write(&nvmet_config_sem);
1423 
1424 	return ret;
1425 }
1426 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1427 
1428 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1429 					    char *page)
1430 {
1431 	struct nvmet_subsys *subsys = to_subsys(item);
1432 
1433 	return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1434 }
1435 
1436 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1437 		const char *page, size_t count)
1438 {
1439 	int pos = 0, len;
1440 	char *val;
1441 
1442 	if (subsys->subsys_discovered) {
1443 		pr_err("Can't set firmware revision. %s is already assigned\n",
1444 		       subsys->firmware_rev);
1445 		return -EINVAL;
1446 	}
1447 
1448 	len = strcspn(page, "\n");
1449 	if (!len)
1450 		return -EINVAL;
1451 
1452 	if (len > NVMET_FR_MAX_SIZE) {
1453 		pr_err("Firmware revision size can not exceed %d Bytes\n",
1454 		       NVMET_FR_MAX_SIZE);
1455 		return -EINVAL;
1456 	}
1457 
1458 	for (pos = 0; pos < len; pos++) {
1459 		if (!nvmet_is_ascii(page[pos]))
1460 			return -EINVAL;
1461 	}
1462 
1463 	val = kmemdup_nul(page, len, GFP_KERNEL);
1464 	if (!val)
1465 		return -ENOMEM;
1466 
1467 	kfree(subsys->firmware_rev);
1468 
1469 	subsys->firmware_rev = val;
1470 
1471 	return count;
1472 }
1473 
1474 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1475 					     const char *page, size_t count)
1476 {
1477 	struct nvmet_subsys *subsys = to_subsys(item);
1478 	ssize_t ret;
1479 
1480 	down_write(&nvmet_config_sem);
1481 	mutex_lock(&subsys->lock);
1482 	ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1483 	mutex_unlock(&subsys->lock);
1484 	up_write(&nvmet_config_sem);
1485 
1486 	return ret;
1487 }
1488 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1489 
1490 #ifdef CONFIG_BLK_DEV_INTEGRITY
1491 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1492 						char *page)
1493 {
1494 	return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1495 }
1496 
1497 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1498 						 const char *page, size_t count)
1499 {
1500 	struct nvmet_subsys *subsys = to_subsys(item);
1501 	bool pi_enable;
1502 
1503 	if (kstrtobool(page, &pi_enable))
1504 		return -EINVAL;
1505 
1506 	subsys->pi_support = pi_enable;
1507 	return count;
1508 }
1509 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1510 #endif
1511 
1512 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1513 					      char *page)
1514 {
1515 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1516 }
1517 
1518 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1519 					       const char *page, size_t cnt)
1520 {
1521 	struct nvmet_subsys *subsys = to_subsys(item);
1522 	struct nvmet_ctrl *ctrl;
1523 	u16 qid_max;
1524 
1525 	if (sscanf(page, "%hu\n", &qid_max) != 1)
1526 		return -EINVAL;
1527 
1528 	if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1529 		return -EINVAL;
1530 
1531 	down_write(&nvmet_config_sem);
1532 	subsys->max_qid = qid_max;
1533 
1534 	/* Force reconnect */
1535 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1536 		ctrl->ops->delete_ctrl(ctrl);
1537 	up_write(&nvmet_config_sem);
1538 
1539 	return cnt;
1540 }
1541 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1542 
1543 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1544 	&nvmet_subsys_attr_attr_allow_any_host,
1545 	&nvmet_subsys_attr_attr_version,
1546 	&nvmet_subsys_attr_attr_serial,
1547 	&nvmet_subsys_attr_attr_cntlid_min,
1548 	&nvmet_subsys_attr_attr_cntlid_max,
1549 	&nvmet_subsys_attr_attr_model,
1550 	&nvmet_subsys_attr_attr_qid_max,
1551 	&nvmet_subsys_attr_attr_ieee_oui,
1552 	&nvmet_subsys_attr_attr_firmware,
1553 #ifdef CONFIG_BLK_DEV_INTEGRITY
1554 	&nvmet_subsys_attr_attr_pi_enable,
1555 #endif
1556 	NULL,
1557 };
1558 
1559 /*
1560  * Subsystem structures & folder operation functions below
1561  */
1562 static void nvmet_subsys_release(struct config_item *item)
1563 {
1564 	struct nvmet_subsys *subsys = to_subsys(item);
1565 
1566 	nvmet_subsys_del_ctrls(subsys);
1567 	nvmet_subsys_put(subsys);
1568 }
1569 
1570 static struct configfs_item_operations nvmet_subsys_item_ops = {
1571 	.release		= nvmet_subsys_release,
1572 };
1573 
1574 static const struct config_item_type nvmet_subsys_type = {
1575 	.ct_item_ops		= &nvmet_subsys_item_ops,
1576 	.ct_attrs		= nvmet_subsys_attrs,
1577 	.ct_owner		= THIS_MODULE,
1578 };
1579 
1580 static struct config_group *nvmet_subsys_make(struct config_group *group,
1581 		const char *name)
1582 {
1583 	struct nvmet_subsys *subsys;
1584 
1585 	if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1586 		pr_err("can't create discovery subsystem through configfs\n");
1587 		return ERR_PTR(-EINVAL);
1588 	}
1589 
1590 	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1591 	if (IS_ERR(subsys))
1592 		return ERR_CAST(subsys);
1593 
1594 	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1595 
1596 	config_group_init_type_name(&subsys->namespaces_group,
1597 			"namespaces", &nvmet_namespaces_type);
1598 	configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1599 
1600 	config_group_init_type_name(&subsys->allowed_hosts_group,
1601 			"allowed_hosts", &nvmet_allowed_hosts_type);
1602 	configfs_add_default_group(&subsys->allowed_hosts_group,
1603 			&subsys->group);
1604 
1605 	nvmet_add_passthru_group(subsys);
1606 
1607 	return &subsys->group;
1608 }
1609 
1610 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1611 	.make_group		= nvmet_subsys_make,
1612 };
1613 
1614 static const struct config_item_type nvmet_subsystems_type = {
1615 	.ct_group_ops		= &nvmet_subsystems_group_ops,
1616 	.ct_owner		= THIS_MODULE,
1617 };
1618 
1619 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1620 		char *page)
1621 {
1622 	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1623 }
1624 
1625 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1626 		const char *page, size_t count)
1627 {
1628 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1629 	struct nvmet_port *port = to_nvmet_port(item);
1630 	bool enable;
1631 
1632 	if (kstrtobool(page, &enable))
1633 		goto inval;
1634 
1635 	if (enable)
1636 		nvmet_referral_enable(parent, port);
1637 	else
1638 		nvmet_referral_disable(parent, port);
1639 
1640 	return count;
1641 inval:
1642 	pr_err("Invalid value '%s' for enable\n", page);
1643 	return -EINVAL;
1644 }
1645 
1646 CONFIGFS_ATTR(nvmet_referral_, enable);
1647 
1648 /*
1649  * Discovery Service subsystem definitions
1650  */
1651 static struct configfs_attribute *nvmet_referral_attrs[] = {
1652 	&nvmet_attr_addr_adrfam,
1653 	&nvmet_attr_addr_portid,
1654 	&nvmet_attr_addr_treq,
1655 	&nvmet_attr_addr_traddr,
1656 	&nvmet_attr_addr_trsvcid,
1657 	&nvmet_attr_addr_trtype,
1658 	&nvmet_referral_attr_enable,
1659 	NULL,
1660 };
1661 
1662 static void nvmet_referral_notify(struct config_group *group,
1663 		struct config_item *item)
1664 {
1665 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1666 	struct nvmet_port *port = to_nvmet_port(item);
1667 
1668 	nvmet_referral_disable(parent, port);
1669 }
1670 
1671 static void nvmet_referral_release(struct config_item *item)
1672 {
1673 	struct nvmet_port *port = to_nvmet_port(item);
1674 
1675 	kfree(port);
1676 }
1677 
1678 static struct configfs_item_operations nvmet_referral_item_ops = {
1679 	.release	= nvmet_referral_release,
1680 };
1681 
1682 static const struct config_item_type nvmet_referral_type = {
1683 	.ct_owner	= THIS_MODULE,
1684 	.ct_attrs	= nvmet_referral_attrs,
1685 	.ct_item_ops	= &nvmet_referral_item_ops,
1686 };
1687 
1688 static struct config_group *nvmet_referral_make(
1689 		struct config_group *group, const char *name)
1690 {
1691 	struct nvmet_port *port;
1692 
1693 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1694 	if (!port)
1695 		return ERR_PTR(-ENOMEM);
1696 
1697 	INIT_LIST_HEAD(&port->entry);
1698 	config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1699 
1700 	return &port->group;
1701 }
1702 
1703 static struct configfs_group_operations nvmet_referral_group_ops = {
1704 	.make_group		= nvmet_referral_make,
1705 	.disconnect_notify	= nvmet_referral_notify,
1706 };
1707 
1708 static const struct config_item_type nvmet_referrals_type = {
1709 	.ct_owner	= THIS_MODULE,
1710 	.ct_group_ops	= &nvmet_referral_group_ops,
1711 };
1712 
1713 static struct nvmet_type_name_map nvmet_ana_state[] = {
1714 	{ NVME_ANA_OPTIMIZED,		"optimized" },
1715 	{ NVME_ANA_NONOPTIMIZED,	"non-optimized" },
1716 	{ NVME_ANA_INACCESSIBLE,	"inaccessible" },
1717 	{ NVME_ANA_PERSISTENT_LOSS,	"persistent-loss" },
1718 	{ NVME_ANA_CHANGE,		"change" },
1719 };
1720 
1721 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1722 		char *page)
1723 {
1724 	struct nvmet_ana_group *grp = to_ana_group(item);
1725 	enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1726 	int i;
1727 
1728 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1729 		if (state == nvmet_ana_state[i].type)
1730 			return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1731 	}
1732 
1733 	return sprintf(page, "\n");
1734 }
1735 
1736 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1737 		const char *page, size_t count)
1738 {
1739 	struct nvmet_ana_group *grp = to_ana_group(item);
1740 	enum nvme_ana_state *ana_state = grp->port->ana_state;
1741 	int i;
1742 
1743 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1744 		if (sysfs_streq(page, nvmet_ana_state[i].name))
1745 			goto found;
1746 	}
1747 
1748 	pr_err("Invalid value '%s' for ana_state\n", page);
1749 	return -EINVAL;
1750 
1751 found:
1752 	down_write(&nvmet_ana_sem);
1753 	ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1754 	nvmet_ana_chgcnt++;
1755 	up_write(&nvmet_ana_sem);
1756 	nvmet_port_send_ana_event(grp->port);
1757 	return count;
1758 }
1759 
1760 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1761 
1762 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1763 	&nvmet_ana_group_attr_ana_state,
1764 	NULL,
1765 };
1766 
1767 static void nvmet_ana_group_release(struct config_item *item)
1768 {
1769 	struct nvmet_ana_group *grp = to_ana_group(item);
1770 
1771 	if (grp == &grp->port->ana_default_group)
1772 		return;
1773 
1774 	down_write(&nvmet_ana_sem);
1775 	grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1776 	nvmet_ana_group_enabled[grp->grpid]--;
1777 	up_write(&nvmet_ana_sem);
1778 
1779 	nvmet_port_send_ana_event(grp->port);
1780 	kfree(grp);
1781 }
1782 
1783 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1784 	.release		= nvmet_ana_group_release,
1785 };
1786 
1787 static const struct config_item_type nvmet_ana_group_type = {
1788 	.ct_item_ops		= &nvmet_ana_group_item_ops,
1789 	.ct_attrs		= nvmet_ana_group_attrs,
1790 	.ct_owner		= THIS_MODULE,
1791 };
1792 
1793 static struct config_group *nvmet_ana_groups_make_group(
1794 		struct config_group *group, const char *name)
1795 {
1796 	struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1797 	struct nvmet_ana_group *grp;
1798 	u32 grpid;
1799 	int ret;
1800 
1801 	ret = kstrtou32(name, 0, &grpid);
1802 	if (ret)
1803 		goto out;
1804 
1805 	ret = -EINVAL;
1806 	if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1807 		goto out;
1808 
1809 	ret = -ENOMEM;
1810 	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1811 	if (!grp)
1812 		goto out;
1813 	grp->port = port;
1814 	grp->grpid = grpid;
1815 
1816 	down_write(&nvmet_ana_sem);
1817 	grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1818 	nvmet_ana_group_enabled[grpid]++;
1819 	up_write(&nvmet_ana_sem);
1820 
1821 	nvmet_port_send_ana_event(grp->port);
1822 
1823 	config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1824 	return &grp->group;
1825 out:
1826 	return ERR_PTR(ret);
1827 }
1828 
1829 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1830 	.make_group		= nvmet_ana_groups_make_group,
1831 };
1832 
1833 static const struct config_item_type nvmet_ana_groups_type = {
1834 	.ct_group_ops		= &nvmet_ana_groups_group_ops,
1835 	.ct_owner		= THIS_MODULE,
1836 };
1837 
1838 /*
1839  * Ports definitions.
1840  */
1841 static void nvmet_port_release(struct config_item *item)
1842 {
1843 	struct nvmet_port *port = to_nvmet_port(item);
1844 
1845 	/* Let inflight controllers teardown complete */
1846 	flush_workqueue(nvmet_wq);
1847 	list_del(&port->global_entry);
1848 
1849 	key_put(port->keyring);
1850 	kfree(port->ana_state);
1851 	kfree(port);
1852 }
1853 
1854 static struct configfs_attribute *nvmet_port_attrs[] = {
1855 	&nvmet_attr_addr_adrfam,
1856 	&nvmet_attr_addr_treq,
1857 	&nvmet_attr_addr_traddr,
1858 	&nvmet_attr_addr_trsvcid,
1859 	&nvmet_attr_addr_trtype,
1860 	&nvmet_attr_addr_tsas,
1861 	&nvmet_attr_param_inline_data_size,
1862 #ifdef CONFIG_BLK_DEV_INTEGRITY
1863 	&nvmet_attr_param_pi_enable,
1864 #endif
1865 	NULL,
1866 };
1867 
1868 static struct configfs_item_operations nvmet_port_item_ops = {
1869 	.release		= nvmet_port_release,
1870 };
1871 
1872 static const struct config_item_type nvmet_port_type = {
1873 	.ct_attrs		= nvmet_port_attrs,
1874 	.ct_item_ops		= &nvmet_port_item_ops,
1875 	.ct_owner		= THIS_MODULE,
1876 };
1877 
1878 static struct config_group *nvmet_ports_make(struct config_group *group,
1879 		const char *name)
1880 {
1881 	struct nvmet_port *port;
1882 	u16 portid;
1883 	u32 i;
1884 
1885 	if (kstrtou16(name, 0, &portid))
1886 		return ERR_PTR(-EINVAL);
1887 
1888 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1889 	if (!port)
1890 		return ERR_PTR(-ENOMEM);
1891 
1892 	port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1893 			sizeof(*port->ana_state), GFP_KERNEL);
1894 	if (!port->ana_state) {
1895 		kfree(port);
1896 		return ERR_PTR(-ENOMEM);
1897 	}
1898 
1899 	if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
1900 		port->keyring = key_lookup(nvme_keyring_id());
1901 		if (IS_ERR(port->keyring)) {
1902 			pr_warn("NVMe keyring not available, disabling TLS\n");
1903 			port->keyring = NULL;
1904 		}
1905 	}
1906 
1907 	for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1908 		if (i == NVMET_DEFAULT_ANA_GRPID)
1909 			port->ana_state[1] = NVME_ANA_OPTIMIZED;
1910 		else
1911 			port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1912 	}
1913 
1914 	list_add(&port->global_entry, &nvmet_ports_list);
1915 
1916 	INIT_LIST_HEAD(&port->entry);
1917 	INIT_LIST_HEAD(&port->subsystems);
1918 	INIT_LIST_HEAD(&port->referrals);
1919 	port->inline_data_size = -1;	/* < 0 == let the transport choose */
1920 
1921 	port->disc_addr.portid = cpu_to_le16(portid);
1922 	port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1923 	port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1924 	config_group_init_type_name(&port->group, name, &nvmet_port_type);
1925 
1926 	config_group_init_type_name(&port->subsys_group,
1927 			"subsystems", &nvmet_port_subsys_type);
1928 	configfs_add_default_group(&port->subsys_group, &port->group);
1929 
1930 	config_group_init_type_name(&port->referrals_group,
1931 			"referrals", &nvmet_referrals_type);
1932 	configfs_add_default_group(&port->referrals_group, &port->group);
1933 
1934 	config_group_init_type_name(&port->ana_groups_group,
1935 			"ana_groups", &nvmet_ana_groups_type);
1936 	configfs_add_default_group(&port->ana_groups_group, &port->group);
1937 
1938 	port->ana_default_group.port = port;
1939 	port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1940 	config_group_init_type_name(&port->ana_default_group.group,
1941 			__stringify(NVMET_DEFAULT_ANA_GRPID),
1942 			&nvmet_ana_group_type);
1943 	configfs_add_default_group(&port->ana_default_group.group,
1944 			&port->ana_groups_group);
1945 
1946 	return &port->group;
1947 }
1948 
1949 static struct configfs_group_operations nvmet_ports_group_ops = {
1950 	.make_group		= nvmet_ports_make,
1951 };
1952 
1953 static const struct config_item_type nvmet_ports_type = {
1954 	.ct_group_ops		= &nvmet_ports_group_ops,
1955 	.ct_owner		= THIS_MODULE,
1956 };
1957 
1958 static struct config_group nvmet_subsystems_group;
1959 static struct config_group nvmet_ports_group;
1960 
1961 #ifdef CONFIG_NVME_TARGET_AUTH
1962 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1963 		char *page)
1964 {
1965 	u8 *dhchap_secret = to_host(item)->dhchap_secret;
1966 
1967 	if (!dhchap_secret)
1968 		return sprintf(page, "\n");
1969 	return sprintf(page, "%s\n", dhchap_secret);
1970 }
1971 
1972 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1973 		const char *page, size_t count)
1974 {
1975 	struct nvmet_host *host = to_host(item);
1976 	int ret;
1977 
1978 	ret = nvmet_auth_set_key(host, page, false);
1979 	/*
1980 	 * Re-authentication is a soft state, so keep the
1981 	 * current authentication valid until the host
1982 	 * requests re-authentication.
1983 	 */
1984 	return ret < 0 ? ret : count;
1985 }
1986 
1987 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1988 
1989 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1990 		char *page)
1991 {
1992 	u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1993 
1994 	if (!dhchap_secret)
1995 		return sprintf(page, "\n");
1996 	return sprintf(page, "%s\n", dhchap_secret);
1997 }
1998 
1999 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
2000 		const char *page, size_t count)
2001 {
2002 	struct nvmet_host *host = to_host(item);
2003 	int ret;
2004 
2005 	ret = nvmet_auth_set_key(host, page, true);
2006 	/*
2007 	 * Re-authentication is a soft state, so keep the
2008 	 * current authentication valid until the host
2009 	 * requests re-authentication.
2010 	 */
2011 	return ret < 0 ? ret : count;
2012 }
2013 
2014 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
2015 
2016 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
2017 		char *page)
2018 {
2019 	struct nvmet_host *host = to_host(item);
2020 	const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
2021 
2022 	return sprintf(page, "%s\n", hash_name ? hash_name : "none");
2023 }
2024 
2025 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
2026 		const char *page, size_t count)
2027 {
2028 	struct nvmet_host *host = to_host(item);
2029 	u8 hmac_id;
2030 
2031 	hmac_id = nvme_auth_hmac_id(page);
2032 	if (hmac_id == NVME_AUTH_HASH_INVALID)
2033 		return -EINVAL;
2034 	if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
2035 		return -ENOTSUPP;
2036 	host->dhchap_hash_id = hmac_id;
2037 	return count;
2038 }
2039 
2040 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
2041 
2042 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
2043 		char *page)
2044 {
2045 	struct nvmet_host *host = to_host(item);
2046 	const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
2047 
2048 	return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
2049 }
2050 
2051 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
2052 		const char *page, size_t count)
2053 {
2054 	struct nvmet_host *host = to_host(item);
2055 	int dhgroup_id;
2056 
2057 	dhgroup_id = nvme_auth_dhgroup_id(page);
2058 	if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
2059 		return -EINVAL;
2060 	if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
2061 		const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
2062 
2063 		if (!crypto_has_kpp(kpp, 0, 0))
2064 			return -EINVAL;
2065 	}
2066 	host->dhchap_dhgroup_id = dhgroup_id;
2067 	return count;
2068 }
2069 
2070 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
2071 
2072 static struct configfs_attribute *nvmet_host_attrs[] = {
2073 	&nvmet_host_attr_dhchap_key,
2074 	&nvmet_host_attr_dhchap_ctrl_key,
2075 	&nvmet_host_attr_dhchap_hash,
2076 	&nvmet_host_attr_dhchap_dhgroup,
2077 	NULL,
2078 };
2079 #endif /* CONFIG_NVME_TARGET_AUTH */
2080 
2081 static void nvmet_host_release(struct config_item *item)
2082 {
2083 	struct nvmet_host *host = to_host(item);
2084 
2085 #ifdef CONFIG_NVME_TARGET_AUTH
2086 	kfree(host->dhchap_secret);
2087 	kfree(host->dhchap_ctrl_secret);
2088 #endif
2089 	kfree(host);
2090 }
2091 
2092 static struct configfs_item_operations nvmet_host_item_ops = {
2093 	.release		= nvmet_host_release,
2094 };
2095 
2096 static const struct config_item_type nvmet_host_type = {
2097 	.ct_item_ops		= &nvmet_host_item_ops,
2098 #ifdef CONFIG_NVME_TARGET_AUTH
2099 	.ct_attrs		= nvmet_host_attrs,
2100 #endif
2101 	.ct_owner		= THIS_MODULE,
2102 };
2103 
2104 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2105 		const char *name)
2106 {
2107 	struct nvmet_host *host;
2108 
2109 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2110 	if (!host)
2111 		return ERR_PTR(-ENOMEM);
2112 
2113 #ifdef CONFIG_NVME_TARGET_AUTH
2114 	/* Default to SHA256 */
2115 	host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2116 #endif
2117 
2118 	config_group_init_type_name(&host->group, name, &nvmet_host_type);
2119 
2120 	return &host->group;
2121 }
2122 
2123 static struct configfs_group_operations nvmet_hosts_group_ops = {
2124 	.make_group		= nvmet_hosts_make_group,
2125 };
2126 
2127 static const struct config_item_type nvmet_hosts_type = {
2128 	.ct_group_ops		= &nvmet_hosts_group_ops,
2129 	.ct_owner		= THIS_MODULE,
2130 };
2131 
2132 static struct config_group nvmet_hosts_group;
2133 
2134 static const struct config_item_type nvmet_root_type = {
2135 	.ct_owner		= THIS_MODULE,
2136 };
2137 
2138 static struct configfs_subsystem nvmet_configfs_subsystem = {
2139 	.su_group = {
2140 		.cg_item = {
2141 			.ci_namebuf	= "nvmet",
2142 			.ci_type	= &nvmet_root_type,
2143 		},
2144 	},
2145 };
2146 
2147 int __init nvmet_init_configfs(void)
2148 {
2149 	int ret;
2150 
2151 	config_group_init(&nvmet_configfs_subsystem.su_group);
2152 	mutex_init(&nvmet_configfs_subsystem.su_mutex);
2153 
2154 	config_group_init_type_name(&nvmet_subsystems_group,
2155 			"subsystems", &nvmet_subsystems_type);
2156 	configfs_add_default_group(&nvmet_subsystems_group,
2157 			&nvmet_configfs_subsystem.su_group);
2158 
2159 	config_group_init_type_name(&nvmet_ports_group,
2160 			"ports", &nvmet_ports_type);
2161 	configfs_add_default_group(&nvmet_ports_group,
2162 			&nvmet_configfs_subsystem.su_group);
2163 
2164 	config_group_init_type_name(&nvmet_hosts_group,
2165 			"hosts", &nvmet_hosts_type);
2166 	configfs_add_default_group(&nvmet_hosts_group,
2167 			&nvmet_configfs_subsystem.su_group);
2168 
2169 	ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2170 	if (ret) {
2171 		pr_err("configfs_register_subsystem: %d\n", ret);
2172 		return ret;
2173 	}
2174 
2175 	return 0;
2176 }
2177 
2178 void __exit nvmet_exit_configfs(void)
2179 {
2180 	configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2181 }
2182