xref: /linux/drivers/nvme/target/configfs.c (revision a126eca844353360ebafa9088d22865cb8e022e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <linux/nvme-keyring.h>
19 #include <crypto/hash.h>
20 #include <crypto/kpp.h>
21 #include <linux/nospec.h>
22 
23 #include "nvmet.h"
24 
25 static const struct config_item_type nvmet_host_type;
26 static const struct config_item_type nvmet_subsys_type;
27 
28 static LIST_HEAD(nvmet_ports_list);
29 struct list_head *nvmet_ports = &nvmet_ports_list;
30 
31 struct nvmet_type_name_map {
32 	u8		type;
33 	const char	*name;
34 };
35 
36 static struct nvmet_type_name_map nvmet_transport[] = {
37 	{ NVMF_TRTYPE_RDMA,	"rdma" },
38 	{ NVMF_TRTYPE_FC,	"fc" },
39 	{ NVMF_TRTYPE_TCP,	"tcp" },
40 	{ NVMF_TRTYPE_LOOP,	"loop" },
41 };
42 
43 static const struct nvmet_type_name_map nvmet_addr_family[] = {
44 	{ NVMF_ADDR_FAMILY_PCI,		"pcie" },
45 	{ NVMF_ADDR_FAMILY_IP4,		"ipv4" },
46 	{ NVMF_ADDR_FAMILY_IP6,		"ipv6" },
47 	{ NVMF_ADDR_FAMILY_IB,		"ib" },
48 	{ NVMF_ADDR_FAMILY_FC,		"fc" },
49 	{ NVMF_ADDR_FAMILY_LOOP,	"loop" },
50 };
51 
52 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
53 {
54 	if (p->enabled)
55 		pr_err("Disable port '%u' before changing attribute in %s\n",
56 		       le16_to_cpu(p->disc_addr.portid), caller);
57 	return p->enabled;
58 }
59 
60 /*
61  * nvmet_port Generic ConfigFS definitions.
62  * Used in any place in the ConfigFS tree that refers to an address.
63  */
64 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
65 {
66 	u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
67 	int i;
68 
69 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
70 		if (nvmet_addr_family[i].type == adrfam)
71 			return snprintf(page, PAGE_SIZE, "%s\n",
72 					nvmet_addr_family[i].name);
73 	}
74 
75 	return snprintf(page, PAGE_SIZE, "\n");
76 }
77 
78 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
79 		const char *page, size_t count)
80 {
81 	struct nvmet_port *port = to_nvmet_port(item);
82 	int i;
83 
84 	if (nvmet_is_port_enabled(port, __func__))
85 		return -EACCES;
86 
87 	for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
88 		if (sysfs_streq(page, nvmet_addr_family[i].name))
89 			goto found;
90 	}
91 
92 	pr_err("Invalid value '%s' for adrfam\n", page);
93 	return -EINVAL;
94 
95 found:
96 	port->disc_addr.adrfam = nvmet_addr_family[i].type;
97 	return count;
98 }
99 
100 CONFIGFS_ATTR(nvmet_, addr_adrfam);
101 
102 static ssize_t nvmet_addr_portid_show(struct config_item *item,
103 		char *page)
104 {
105 	__le16 portid = to_nvmet_port(item)->disc_addr.portid;
106 
107 	return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
108 }
109 
110 static ssize_t nvmet_addr_portid_store(struct config_item *item,
111 		const char *page, size_t count)
112 {
113 	struct nvmet_port *port = to_nvmet_port(item);
114 	u16 portid = 0;
115 
116 	if (kstrtou16(page, 0, &portid)) {
117 		pr_err("Invalid value '%s' for portid\n", page);
118 		return -EINVAL;
119 	}
120 
121 	if (nvmet_is_port_enabled(port, __func__))
122 		return -EACCES;
123 
124 	port->disc_addr.portid = cpu_to_le16(portid);
125 	return count;
126 }
127 
128 CONFIGFS_ATTR(nvmet_, addr_portid);
129 
130 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
131 		char *page)
132 {
133 	struct nvmet_port *port = to_nvmet_port(item);
134 
135 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
136 }
137 
138 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
139 		const char *page, size_t count)
140 {
141 	struct nvmet_port *port = to_nvmet_port(item);
142 
143 	if (count > NVMF_TRADDR_SIZE) {
144 		pr_err("Invalid value '%s' for traddr\n", page);
145 		return -EINVAL;
146 	}
147 
148 	if (nvmet_is_port_enabled(port, __func__))
149 		return -EACCES;
150 
151 	if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
152 		return -EINVAL;
153 	return count;
154 }
155 
156 CONFIGFS_ATTR(nvmet_, addr_traddr);
157 
158 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
159 	{ NVMF_TREQ_NOT_SPECIFIED,	"not specified" },
160 	{ NVMF_TREQ_REQUIRED,		"required" },
161 	{ NVMF_TREQ_NOT_REQUIRED,	"not required" },
162 };
163 
164 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
165 {
166 	return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
167 }
168 
169 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
170 {
171 	u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
172 	int i;
173 
174 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
175 		if (treq == nvmet_addr_treq[i].type)
176 			return snprintf(page, PAGE_SIZE, "%s\n",
177 					nvmet_addr_treq[i].name);
178 	}
179 
180 	return snprintf(page, PAGE_SIZE, "\n");
181 }
182 
183 static ssize_t nvmet_addr_treq_store(struct config_item *item,
184 		const char *page, size_t count)
185 {
186 	struct nvmet_port *port = to_nvmet_port(item);
187 	u8 treq = nvmet_port_disc_addr_treq_mask(port);
188 	int i;
189 
190 	if (nvmet_is_port_enabled(port, __func__))
191 		return -EACCES;
192 
193 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
194 		if (sysfs_streq(page, nvmet_addr_treq[i].name))
195 			goto found;
196 	}
197 
198 	pr_err("Invalid value '%s' for treq\n", page);
199 	return -EINVAL;
200 
201 found:
202 	if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
203 	    port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
204 		switch (nvmet_addr_treq[i].type) {
205 		case NVMF_TREQ_NOT_SPECIFIED:
206 			pr_debug("treq '%s' not allowed for TLS1.3\n",
207 				 nvmet_addr_treq[i].name);
208 			return -EINVAL;
209 		case NVMF_TREQ_NOT_REQUIRED:
210 			pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
211 			break;
212 		default:
213 			break;
214 		}
215 	}
216 	treq |= nvmet_addr_treq[i].type;
217 	port->disc_addr.treq = treq;
218 	return count;
219 }
220 
221 CONFIGFS_ATTR(nvmet_, addr_treq);
222 
223 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
224 		char *page)
225 {
226 	struct nvmet_port *port = to_nvmet_port(item);
227 
228 	return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
229 }
230 
231 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
232 		const char *page, size_t count)
233 {
234 	struct nvmet_port *port = to_nvmet_port(item);
235 
236 	if (count > NVMF_TRSVCID_SIZE) {
237 		pr_err("Invalid value '%s' for trsvcid\n", page);
238 		return -EINVAL;
239 	}
240 	if (nvmet_is_port_enabled(port, __func__))
241 		return -EACCES;
242 
243 	if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
244 		return -EINVAL;
245 	return count;
246 }
247 
248 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
249 
250 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
251 		char *page)
252 {
253 	struct nvmet_port *port = to_nvmet_port(item);
254 
255 	return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
256 }
257 
258 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
259 		const char *page, size_t count)
260 {
261 	struct nvmet_port *port = to_nvmet_port(item);
262 	int ret;
263 
264 	if (nvmet_is_port_enabled(port, __func__))
265 		return -EACCES;
266 	ret = kstrtoint(page, 0, &port->inline_data_size);
267 	if (ret) {
268 		pr_err("Invalid value '%s' for inline_data_size\n", page);
269 		return -EINVAL;
270 	}
271 	return count;
272 }
273 
274 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
275 
276 static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
277 		char *page)
278 {
279 	struct nvmet_port *port = to_nvmet_port(item);
280 
281 	return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
282 }
283 
284 static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
285 		const char *page, size_t count)
286 {
287 	struct nvmet_port *port = to_nvmet_port(item);
288 	int ret;
289 
290 	if (nvmet_is_port_enabled(port, __func__))
291 		return -EACCES;
292 	ret = kstrtoint(page, 0, &port->max_queue_size);
293 	if (ret) {
294 		pr_err("Invalid value '%s' for max_queue_size\n", page);
295 		return -EINVAL;
296 	}
297 	return count;
298 }
299 
300 CONFIGFS_ATTR(nvmet_, param_max_queue_size);
301 
302 #ifdef CONFIG_BLK_DEV_INTEGRITY
303 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
304 		char *page)
305 {
306 	struct nvmet_port *port = to_nvmet_port(item);
307 
308 	return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
309 }
310 
311 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
312 		const char *page, size_t count)
313 {
314 	struct nvmet_port *port = to_nvmet_port(item);
315 	bool val;
316 
317 	if (kstrtobool(page, &val))
318 		return -EINVAL;
319 
320 	if (nvmet_is_port_enabled(port, __func__))
321 		return -EACCES;
322 
323 	port->pi_enable = val;
324 	return count;
325 }
326 
327 CONFIGFS_ATTR(nvmet_, param_pi_enable);
328 #endif
329 
330 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
331 		char *page)
332 {
333 	struct nvmet_port *port = to_nvmet_port(item);
334 	int i;
335 
336 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
337 		if (port->disc_addr.trtype == nvmet_transport[i].type)
338 			return snprintf(page, PAGE_SIZE,
339 					"%s\n", nvmet_transport[i].name);
340 	}
341 
342 	return sprintf(page, "\n");
343 }
344 
345 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
346 {
347 	port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
348 	port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
349 	port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
350 }
351 
352 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
353 {
354 	port->disc_addr.tsas.tcp.sectype = sectype;
355 }
356 
357 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
358 		const char *page, size_t count)
359 {
360 	struct nvmet_port *port = to_nvmet_port(item);
361 	int i;
362 
363 	if (nvmet_is_port_enabled(port, __func__))
364 		return -EACCES;
365 
366 	for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
367 		if (sysfs_streq(page, nvmet_transport[i].name))
368 			goto found;
369 	}
370 
371 	pr_err("Invalid value '%s' for trtype\n", page);
372 	return -EINVAL;
373 
374 found:
375 	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
376 	port->disc_addr.trtype = nvmet_transport[i].type;
377 	if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
378 		nvmet_port_init_tsas_rdma(port);
379 	else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
380 		nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
381 	return count;
382 }
383 
384 CONFIGFS_ATTR(nvmet_, addr_trtype);
385 
386 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
387 	{ NVMF_TCP_SECTYPE_NONE,	"none" },
388 	{ NVMF_TCP_SECTYPE_TLS13,	"tls1.3" },
389 };
390 
391 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
392 	{ NVMF_RDMA_QPTYPE_CONNECTED,	"connected" },
393 	{ NVMF_RDMA_QPTYPE_DATAGRAM,	"datagram"  },
394 };
395 
396 static ssize_t nvmet_addr_tsas_show(struct config_item *item,
397 		char *page)
398 {
399 	struct nvmet_port *port = to_nvmet_port(item);
400 	int i;
401 
402 	if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
403 		for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
404 			if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
405 				return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
406 		}
407 	} else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
408 		for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
409 			if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
410 				return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
411 		}
412 	}
413 	return sprintf(page, "reserved\n");
414 }
415 
416 static ssize_t nvmet_addr_tsas_store(struct config_item *item,
417 		const char *page, size_t count)
418 {
419 	struct nvmet_port *port = to_nvmet_port(item);
420 	u8 treq = nvmet_port_disc_addr_treq_mask(port);
421 	u8 sectype;
422 	int i;
423 
424 	if (nvmet_is_port_enabled(port, __func__))
425 		return -EACCES;
426 
427 	if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
428 		return -EINVAL;
429 
430 	for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
431 		if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
432 			sectype = nvmet_addr_tsas_tcp[i].type;
433 			goto found;
434 		}
435 	}
436 
437 	pr_err("Invalid value '%s' for tsas\n", page);
438 	return -EINVAL;
439 
440 found:
441 	if (sectype == NVMF_TCP_SECTYPE_TLS13) {
442 		if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
443 			pr_err("TLS is not supported\n");
444 			return -EINVAL;
445 		}
446 		if (!port->keyring) {
447 			pr_err("TLS keyring not configured\n");
448 			return -EINVAL;
449 		}
450 	}
451 
452 	nvmet_port_init_tsas_tcp(port, sectype);
453 	/*
454 	 * If TLS is enabled TREQ should be set to 'required' per default
455 	 */
456 	if (sectype == NVMF_TCP_SECTYPE_TLS13) {
457 		u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
458 
459 		if (sc == NVMF_TREQ_NOT_SPECIFIED)
460 			treq |= NVMF_TREQ_REQUIRED;
461 		else
462 			treq |= sc;
463 	} else {
464 		treq |= NVMF_TREQ_NOT_SPECIFIED;
465 	}
466 	port->disc_addr.treq = treq;
467 	return count;
468 }
469 
470 CONFIGFS_ATTR(nvmet_, addr_tsas);
471 
472 /*
473  * Namespace structures & file operation functions below
474  */
475 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
476 {
477 	return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
478 }
479 
480 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
481 		const char *page, size_t count)
482 {
483 	struct nvmet_ns *ns = to_nvmet_ns(item);
484 	struct nvmet_subsys *subsys = ns->subsys;
485 	size_t len;
486 	int ret;
487 
488 	mutex_lock(&subsys->lock);
489 	ret = -EBUSY;
490 	if (ns->enabled)
491 		goto out_unlock;
492 
493 	ret = -EINVAL;
494 	len = strcspn(page, "\n");
495 	if (!len)
496 		goto out_unlock;
497 
498 	kfree(ns->device_path);
499 	ret = -ENOMEM;
500 	ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
501 	if (!ns->device_path)
502 		goto out_unlock;
503 
504 	mutex_unlock(&subsys->lock);
505 	return count;
506 
507 out_unlock:
508 	mutex_unlock(&subsys->lock);
509 	return ret;
510 }
511 
512 CONFIGFS_ATTR(nvmet_ns_, device_path);
513 
514 #ifdef CONFIG_PCI_P2PDMA
515 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
516 {
517 	struct nvmet_ns *ns = to_nvmet_ns(item);
518 
519 	return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
520 }
521 
522 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
523 		const char *page, size_t count)
524 {
525 	struct nvmet_ns *ns = to_nvmet_ns(item);
526 	struct pci_dev *p2p_dev = NULL;
527 	bool use_p2pmem;
528 	int ret = count;
529 	int error;
530 
531 	mutex_lock(&ns->subsys->lock);
532 	if (ns->enabled) {
533 		ret = -EBUSY;
534 		goto out_unlock;
535 	}
536 
537 	error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
538 	if (error) {
539 		ret = error;
540 		goto out_unlock;
541 	}
542 
543 	ns->use_p2pmem = use_p2pmem;
544 	pci_dev_put(ns->p2p_dev);
545 	ns->p2p_dev = p2p_dev;
546 
547 out_unlock:
548 	mutex_unlock(&ns->subsys->lock);
549 
550 	return ret;
551 }
552 
553 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
554 #endif /* CONFIG_PCI_P2PDMA */
555 
556 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
557 {
558 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
559 }
560 
561 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
562 					  const char *page, size_t count)
563 {
564 	struct nvmet_ns *ns = to_nvmet_ns(item);
565 	struct nvmet_subsys *subsys = ns->subsys;
566 	int ret = 0;
567 
568 	mutex_lock(&subsys->lock);
569 	if (ns->enabled) {
570 		ret = -EBUSY;
571 		goto out_unlock;
572 	}
573 
574 	if (uuid_parse(page, &ns->uuid))
575 		ret = -EINVAL;
576 
577 out_unlock:
578 	mutex_unlock(&subsys->lock);
579 	return ret ? ret : count;
580 }
581 
582 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
583 
584 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
585 {
586 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
587 }
588 
589 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
590 		const char *page, size_t count)
591 {
592 	struct nvmet_ns *ns = to_nvmet_ns(item);
593 	struct nvmet_subsys *subsys = ns->subsys;
594 	u8 nguid[16];
595 	const char *p = page;
596 	int i;
597 	int ret = 0;
598 
599 	mutex_lock(&subsys->lock);
600 	if (ns->enabled) {
601 		ret = -EBUSY;
602 		goto out_unlock;
603 	}
604 
605 	for (i = 0; i < 16; i++) {
606 		if (p + 2 > page + count) {
607 			ret = -EINVAL;
608 			goto out_unlock;
609 		}
610 		if (!isxdigit(p[0]) || !isxdigit(p[1])) {
611 			ret = -EINVAL;
612 			goto out_unlock;
613 		}
614 
615 		nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
616 		p += 2;
617 
618 		if (*p == '-' || *p == ':')
619 			p++;
620 	}
621 
622 	memcpy(&ns->nguid, nguid, sizeof(nguid));
623 out_unlock:
624 	mutex_unlock(&subsys->lock);
625 	return ret ? ret : count;
626 }
627 
628 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
629 
630 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
631 {
632 	return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
633 }
634 
635 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
636 		const char *page, size_t count)
637 {
638 	struct nvmet_ns *ns = to_nvmet_ns(item);
639 	u32 oldgrpid, newgrpid;
640 	int ret;
641 
642 	ret = kstrtou32(page, 0, &newgrpid);
643 	if (ret)
644 		return ret;
645 
646 	if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
647 		return -EINVAL;
648 
649 	down_write(&nvmet_ana_sem);
650 	oldgrpid = ns->anagrpid;
651 	newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
652 	nvmet_ana_group_enabled[newgrpid]++;
653 	ns->anagrpid = newgrpid;
654 	nvmet_ana_group_enabled[oldgrpid]--;
655 	nvmet_ana_chgcnt++;
656 	up_write(&nvmet_ana_sem);
657 
658 	nvmet_send_ana_event(ns->subsys, NULL);
659 	return count;
660 }
661 
662 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
663 
664 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
665 {
666 	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
667 }
668 
669 static ssize_t nvmet_ns_enable_store(struct config_item *item,
670 		const char *page, size_t count)
671 {
672 	struct nvmet_ns *ns = to_nvmet_ns(item);
673 	bool enable;
674 	int ret = 0;
675 
676 	if (kstrtobool(page, &enable))
677 		return -EINVAL;
678 
679 	/*
680 	 * take a global nvmet_config_sem because the disable routine has a
681 	 * window where it releases the subsys-lock, giving a chance to
682 	 * a parallel enable to concurrently execute causing the disable to
683 	 * have a misaccounting of the ns percpu_ref.
684 	 */
685 	down_write(&nvmet_config_sem);
686 	if (enable)
687 		ret = nvmet_ns_enable(ns);
688 	else
689 		nvmet_ns_disable(ns);
690 	up_write(&nvmet_config_sem);
691 
692 	return ret ? ret : count;
693 }
694 
695 CONFIGFS_ATTR(nvmet_ns_, enable);
696 
697 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
698 {
699 	return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
700 }
701 
702 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
703 		const char *page, size_t count)
704 {
705 	struct nvmet_ns *ns = to_nvmet_ns(item);
706 	bool val;
707 
708 	if (kstrtobool(page, &val))
709 		return -EINVAL;
710 
711 	mutex_lock(&ns->subsys->lock);
712 	if (ns->enabled) {
713 		pr_err("disable ns before setting buffered_io value.\n");
714 		mutex_unlock(&ns->subsys->lock);
715 		return -EINVAL;
716 	}
717 
718 	ns->buffered_io = val;
719 	mutex_unlock(&ns->subsys->lock);
720 	return count;
721 }
722 
723 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
724 
725 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
726 		const char *page, size_t count)
727 {
728 	struct nvmet_ns *ns = to_nvmet_ns(item);
729 	bool val;
730 
731 	if (kstrtobool(page, &val))
732 		return -EINVAL;
733 
734 	if (!val)
735 		return -EINVAL;
736 
737 	mutex_lock(&ns->subsys->lock);
738 	if (!ns->enabled) {
739 		pr_err("enable ns before revalidate.\n");
740 		mutex_unlock(&ns->subsys->lock);
741 		return -EINVAL;
742 	}
743 	if (nvmet_ns_revalidate(ns))
744 		nvmet_ns_changed(ns->subsys, ns->nsid);
745 	mutex_unlock(&ns->subsys->lock);
746 	return count;
747 }
748 
749 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
750 
751 static struct configfs_attribute *nvmet_ns_attrs[] = {
752 	&nvmet_ns_attr_device_path,
753 	&nvmet_ns_attr_device_nguid,
754 	&nvmet_ns_attr_device_uuid,
755 	&nvmet_ns_attr_ana_grpid,
756 	&nvmet_ns_attr_enable,
757 	&nvmet_ns_attr_buffered_io,
758 	&nvmet_ns_attr_revalidate_size,
759 #ifdef CONFIG_PCI_P2PDMA
760 	&nvmet_ns_attr_p2pmem,
761 #endif
762 	NULL,
763 };
764 
765 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
766 {
767 	struct config_item *ns_item;
768 	char name[12];
769 
770 	snprintf(name, sizeof(name), "%u", nsid);
771 	mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
772 	ns_item = config_group_find_item(&subsys->namespaces_group, name);
773 	mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
774 	return ns_item != NULL;
775 }
776 
777 static void nvmet_ns_release(struct config_item *item)
778 {
779 	struct nvmet_ns *ns = to_nvmet_ns(item);
780 
781 	nvmet_ns_free(ns);
782 }
783 
784 static struct configfs_item_operations nvmet_ns_item_ops = {
785 	.release		= nvmet_ns_release,
786 };
787 
788 static const struct config_item_type nvmet_ns_type = {
789 	.ct_item_ops		= &nvmet_ns_item_ops,
790 	.ct_attrs		= nvmet_ns_attrs,
791 	.ct_owner		= THIS_MODULE,
792 };
793 
794 static struct config_group *nvmet_ns_make(struct config_group *group,
795 		const char *name)
796 {
797 	struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
798 	struct nvmet_ns *ns;
799 	int ret;
800 	u32 nsid;
801 
802 	ret = kstrtou32(name, 0, &nsid);
803 	if (ret)
804 		goto out;
805 
806 	ret = -EINVAL;
807 	if (nsid == 0 || nsid == NVME_NSID_ALL) {
808 		pr_err("invalid nsid %#x", nsid);
809 		goto out;
810 	}
811 
812 	ret = -ENOMEM;
813 	ns = nvmet_ns_alloc(subsys, nsid);
814 	if (!ns)
815 		goto out;
816 	config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
817 
818 	pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
819 
820 	return &ns->group;
821 out:
822 	return ERR_PTR(ret);
823 }
824 
825 static struct configfs_group_operations nvmet_namespaces_group_ops = {
826 	.make_group		= nvmet_ns_make,
827 };
828 
829 static const struct config_item_type nvmet_namespaces_type = {
830 	.ct_group_ops		= &nvmet_namespaces_group_ops,
831 	.ct_owner		= THIS_MODULE,
832 };
833 
834 #ifdef CONFIG_NVME_TARGET_PASSTHRU
835 
836 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
837 		char *page)
838 {
839 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
840 
841 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
842 }
843 
844 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
845 		const char *page, size_t count)
846 {
847 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
848 	size_t len;
849 	int ret;
850 
851 	mutex_lock(&subsys->lock);
852 
853 	ret = -EBUSY;
854 	if (subsys->passthru_ctrl)
855 		goto out_unlock;
856 
857 	ret = -EINVAL;
858 	len = strcspn(page, "\n");
859 	if (!len)
860 		goto out_unlock;
861 
862 	kfree(subsys->passthru_ctrl_path);
863 	ret = -ENOMEM;
864 	subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
865 	if (!subsys->passthru_ctrl_path)
866 		goto out_unlock;
867 
868 	mutex_unlock(&subsys->lock);
869 
870 	return count;
871 out_unlock:
872 	mutex_unlock(&subsys->lock);
873 	return ret;
874 }
875 CONFIGFS_ATTR(nvmet_passthru_, device_path);
876 
877 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
878 		char *page)
879 {
880 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
881 
882 	return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
883 }
884 
885 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
886 		const char *page, size_t count)
887 {
888 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
889 	bool enable;
890 	int ret = 0;
891 
892 	if (kstrtobool(page, &enable))
893 		return -EINVAL;
894 
895 	if (enable)
896 		ret = nvmet_passthru_ctrl_enable(subsys);
897 	else
898 		nvmet_passthru_ctrl_disable(subsys);
899 
900 	return ret ? ret : count;
901 }
902 CONFIGFS_ATTR(nvmet_passthru_, enable);
903 
904 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
905 		char *page)
906 {
907 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
908 }
909 
910 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
911 		const char *page, size_t count)
912 {
913 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
914 	unsigned int timeout;
915 
916 	if (kstrtouint(page, 0, &timeout))
917 		return -EINVAL;
918 	subsys->admin_timeout = timeout;
919 	return count;
920 }
921 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
922 
923 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
924 		char *page)
925 {
926 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
927 }
928 
929 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
930 		const char *page, size_t count)
931 {
932 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
933 	unsigned int timeout;
934 
935 	if (kstrtouint(page, 0, &timeout))
936 		return -EINVAL;
937 	subsys->io_timeout = timeout;
938 	return count;
939 }
940 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
941 
942 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
943 		char *page)
944 {
945 	return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
946 }
947 
948 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
949 		const char *page, size_t count)
950 {
951 	struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
952 	unsigned int clear_ids;
953 
954 	if (kstrtouint(page, 0, &clear_ids))
955 		return -EINVAL;
956 	subsys->clear_ids = clear_ids;
957 	return count;
958 }
959 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
960 
961 static struct configfs_attribute *nvmet_passthru_attrs[] = {
962 	&nvmet_passthru_attr_device_path,
963 	&nvmet_passthru_attr_enable,
964 	&nvmet_passthru_attr_admin_timeout,
965 	&nvmet_passthru_attr_io_timeout,
966 	&nvmet_passthru_attr_clear_ids,
967 	NULL,
968 };
969 
970 static const struct config_item_type nvmet_passthru_type = {
971 	.ct_attrs		= nvmet_passthru_attrs,
972 	.ct_owner		= THIS_MODULE,
973 };
974 
975 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
976 {
977 	config_group_init_type_name(&subsys->passthru_group,
978 				    "passthru", &nvmet_passthru_type);
979 	configfs_add_default_group(&subsys->passthru_group,
980 				   &subsys->group);
981 }
982 
983 #else /* CONFIG_NVME_TARGET_PASSTHRU */
984 
985 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
986 {
987 }
988 
989 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
990 
991 static int nvmet_port_subsys_allow_link(struct config_item *parent,
992 		struct config_item *target)
993 {
994 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
995 	struct nvmet_subsys *subsys;
996 	struct nvmet_subsys_link *link, *p;
997 	int ret;
998 
999 	if (target->ci_type != &nvmet_subsys_type) {
1000 		pr_err("can only link subsystems into the subsystems dir.!\n");
1001 		return -EINVAL;
1002 	}
1003 	subsys = to_subsys(target);
1004 	link = kmalloc(sizeof(*link), GFP_KERNEL);
1005 	if (!link)
1006 		return -ENOMEM;
1007 	link->subsys = subsys;
1008 
1009 	down_write(&nvmet_config_sem);
1010 	ret = -EEXIST;
1011 	list_for_each_entry(p, &port->subsystems, entry) {
1012 		if (p->subsys == subsys)
1013 			goto out_free_link;
1014 	}
1015 
1016 	if (list_empty(&port->subsystems)) {
1017 		ret = nvmet_enable_port(port);
1018 		if (ret)
1019 			goto out_free_link;
1020 	}
1021 
1022 	list_add_tail(&link->entry, &port->subsystems);
1023 	nvmet_port_disc_changed(port, subsys);
1024 
1025 	up_write(&nvmet_config_sem);
1026 	return 0;
1027 
1028 out_free_link:
1029 	up_write(&nvmet_config_sem);
1030 	kfree(link);
1031 	return ret;
1032 }
1033 
1034 static void nvmet_port_subsys_drop_link(struct config_item *parent,
1035 		struct config_item *target)
1036 {
1037 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
1038 	struct nvmet_subsys *subsys = to_subsys(target);
1039 	struct nvmet_subsys_link *p;
1040 
1041 	down_write(&nvmet_config_sem);
1042 	list_for_each_entry(p, &port->subsystems, entry) {
1043 		if (p->subsys == subsys)
1044 			goto found;
1045 	}
1046 	up_write(&nvmet_config_sem);
1047 	return;
1048 
1049 found:
1050 	list_del(&p->entry);
1051 	nvmet_port_del_ctrls(port, subsys);
1052 	nvmet_port_disc_changed(port, subsys);
1053 
1054 	if (list_empty(&port->subsystems))
1055 		nvmet_disable_port(port);
1056 	up_write(&nvmet_config_sem);
1057 	kfree(p);
1058 }
1059 
1060 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
1061 	.allow_link		= nvmet_port_subsys_allow_link,
1062 	.drop_link		= nvmet_port_subsys_drop_link,
1063 };
1064 
1065 static const struct config_item_type nvmet_port_subsys_type = {
1066 	.ct_item_ops		= &nvmet_port_subsys_item_ops,
1067 	.ct_owner		= THIS_MODULE,
1068 };
1069 
1070 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
1071 		struct config_item *target)
1072 {
1073 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1074 	struct nvmet_host *host;
1075 	struct nvmet_host_link *link, *p;
1076 	int ret;
1077 
1078 	if (target->ci_type != &nvmet_host_type) {
1079 		pr_err("can only link hosts into the allowed_hosts directory!\n");
1080 		return -EINVAL;
1081 	}
1082 
1083 	host = to_host(target);
1084 	link = kmalloc(sizeof(*link), GFP_KERNEL);
1085 	if (!link)
1086 		return -ENOMEM;
1087 	link->host = host;
1088 
1089 	down_write(&nvmet_config_sem);
1090 	ret = -EINVAL;
1091 	if (subsys->allow_any_host) {
1092 		pr_err("can't add hosts when allow_any_host is set!\n");
1093 		goto out_free_link;
1094 	}
1095 
1096 	ret = -EEXIST;
1097 	list_for_each_entry(p, &subsys->hosts, entry) {
1098 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1099 			goto out_free_link;
1100 	}
1101 	list_add_tail(&link->entry, &subsys->hosts);
1102 	nvmet_subsys_disc_changed(subsys, host);
1103 
1104 	up_write(&nvmet_config_sem);
1105 	return 0;
1106 out_free_link:
1107 	up_write(&nvmet_config_sem);
1108 	kfree(link);
1109 	return ret;
1110 }
1111 
1112 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
1113 		struct config_item *target)
1114 {
1115 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1116 	struct nvmet_host *host = to_host(target);
1117 	struct nvmet_host_link *p;
1118 
1119 	down_write(&nvmet_config_sem);
1120 	list_for_each_entry(p, &subsys->hosts, entry) {
1121 		if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1122 			goto found;
1123 	}
1124 	up_write(&nvmet_config_sem);
1125 	return;
1126 
1127 found:
1128 	list_del(&p->entry);
1129 	nvmet_subsys_disc_changed(subsys, host);
1130 
1131 	up_write(&nvmet_config_sem);
1132 	kfree(p);
1133 }
1134 
1135 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
1136 	.allow_link		= nvmet_allowed_hosts_allow_link,
1137 	.drop_link		= nvmet_allowed_hosts_drop_link,
1138 };
1139 
1140 static const struct config_item_type nvmet_allowed_hosts_type = {
1141 	.ct_item_ops		= &nvmet_allowed_hosts_item_ops,
1142 	.ct_owner		= THIS_MODULE,
1143 };
1144 
1145 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1146 		char *page)
1147 {
1148 	return snprintf(page, PAGE_SIZE, "%d\n",
1149 		to_subsys(item)->allow_any_host);
1150 }
1151 
1152 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1153 		const char *page, size_t count)
1154 {
1155 	struct nvmet_subsys *subsys = to_subsys(item);
1156 	bool allow_any_host;
1157 	int ret = 0;
1158 
1159 	if (kstrtobool(page, &allow_any_host))
1160 		return -EINVAL;
1161 
1162 	down_write(&nvmet_config_sem);
1163 	if (allow_any_host && !list_empty(&subsys->hosts)) {
1164 		pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1165 		ret = -EINVAL;
1166 		goto out_unlock;
1167 	}
1168 
1169 	if (subsys->allow_any_host != allow_any_host) {
1170 		subsys->allow_any_host = allow_any_host;
1171 		nvmet_subsys_disc_changed(subsys, NULL);
1172 	}
1173 
1174 out_unlock:
1175 	up_write(&nvmet_config_sem);
1176 	return ret ? ret : count;
1177 }
1178 
1179 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1180 
1181 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1182 					      char *page)
1183 {
1184 	struct nvmet_subsys *subsys = to_subsys(item);
1185 
1186 	if (NVME_TERTIARY(subsys->ver))
1187 		return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1188 				NVME_MAJOR(subsys->ver),
1189 				NVME_MINOR(subsys->ver),
1190 				NVME_TERTIARY(subsys->ver));
1191 
1192 	return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1193 			NVME_MAJOR(subsys->ver),
1194 			NVME_MINOR(subsys->ver));
1195 }
1196 
1197 static ssize_t
1198 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1199 		const char *page, size_t count)
1200 {
1201 	int major, minor, tertiary = 0;
1202 	int ret;
1203 
1204 	if (subsys->subsys_discovered) {
1205 		if (NVME_TERTIARY(subsys->ver))
1206 			pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1207 			       NVME_MAJOR(subsys->ver),
1208 			       NVME_MINOR(subsys->ver),
1209 			       NVME_TERTIARY(subsys->ver));
1210 		else
1211 			pr_err("Can't set version number. %llu.%llu is already assigned\n",
1212 			       NVME_MAJOR(subsys->ver),
1213 			       NVME_MINOR(subsys->ver));
1214 		return -EINVAL;
1215 	}
1216 
1217 	/* passthru subsystems use the underlying controller's version */
1218 	if (nvmet_is_passthru_subsys(subsys))
1219 		return -EINVAL;
1220 
1221 	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1222 	if (ret != 2 && ret != 3)
1223 		return -EINVAL;
1224 
1225 	subsys->ver = NVME_VS(major, minor, tertiary);
1226 
1227 	return count;
1228 }
1229 
1230 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1231 					       const char *page, size_t count)
1232 {
1233 	struct nvmet_subsys *subsys = to_subsys(item);
1234 	ssize_t ret;
1235 
1236 	down_write(&nvmet_config_sem);
1237 	mutex_lock(&subsys->lock);
1238 	ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1239 	mutex_unlock(&subsys->lock);
1240 	up_write(&nvmet_config_sem);
1241 
1242 	return ret;
1243 }
1244 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1245 
1246 /* See Section 1.5 of NVMe 1.4 */
1247 static bool nvmet_is_ascii(const char c)
1248 {
1249 	return c >= 0x20 && c <= 0x7e;
1250 }
1251 
1252 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1253 					     char *page)
1254 {
1255 	struct nvmet_subsys *subsys = to_subsys(item);
1256 
1257 	return snprintf(page, PAGE_SIZE, "%.*s\n",
1258 			NVMET_SN_MAX_SIZE, subsys->serial);
1259 }
1260 
1261 static ssize_t
1262 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1263 		const char *page, size_t count)
1264 {
1265 	int pos, len = strcspn(page, "\n");
1266 
1267 	if (subsys->subsys_discovered) {
1268 		pr_err("Can't set serial number. %s is already assigned\n",
1269 		       subsys->serial);
1270 		return -EINVAL;
1271 	}
1272 
1273 	if (!len || len > NVMET_SN_MAX_SIZE) {
1274 		pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1275 		       NVMET_SN_MAX_SIZE);
1276 		return -EINVAL;
1277 	}
1278 
1279 	for (pos = 0; pos < len; pos++) {
1280 		if (!nvmet_is_ascii(page[pos])) {
1281 			pr_err("Serial Number must contain only ASCII strings\n");
1282 			return -EINVAL;
1283 		}
1284 	}
1285 
1286 	memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1287 
1288 	return count;
1289 }
1290 
1291 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1292 					      const char *page, size_t count)
1293 {
1294 	struct nvmet_subsys *subsys = to_subsys(item);
1295 	ssize_t ret;
1296 
1297 	down_write(&nvmet_config_sem);
1298 	mutex_lock(&subsys->lock);
1299 	ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1300 	mutex_unlock(&subsys->lock);
1301 	up_write(&nvmet_config_sem);
1302 
1303 	return ret;
1304 }
1305 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1306 
1307 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1308 						 char *page)
1309 {
1310 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1311 }
1312 
1313 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1314 						  const char *page, size_t cnt)
1315 {
1316 	u16 cntlid_min;
1317 
1318 	if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1319 		return -EINVAL;
1320 
1321 	if (cntlid_min == 0)
1322 		return -EINVAL;
1323 
1324 	down_write(&nvmet_config_sem);
1325 	if (cntlid_min > to_subsys(item)->cntlid_max)
1326 		goto out_unlock;
1327 	to_subsys(item)->cntlid_min = cntlid_min;
1328 	up_write(&nvmet_config_sem);
1329 	return cnt;
1330 
1331 out_unlock:
1332 	up_write(&nvmet_config_sem);
1333 	return -EINVAL;
1334 }
1335 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1336 
1337 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1338 						 char *page)
1339 {
1340 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1341 }
1342 
1343 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1344 						  const char *page, size_t cnt)
1345 {
1346 	u16 cntlid_max;
1347 
1348 	if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1349 		return -EINVAL;
1350 
1351 	if (cntlid_max == 0)
1352 		return -EINVAL;
1353 
1354 	down_write(&nvmet_config_sem);
1355 	if (cntlid_max < to_subsys(item)->cntlid_min)
1356 		goto out_unlock;
1357 	to_subsys(item)->cntlid_max = cntlid_max;
1358 	up_write(&nvmet_config_sem);
1359 	return cnt;
1360 
1361 out_unlock:
1362 	up_write(&nvmet_config_sem);
1363 	return -EINVAL;
1364 }
1365 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1366 
1367 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1368 					    char *page)
1369 {
1370 	struct nvmet_subsys *subsys = to_subsys(item);
1371 
1372 	return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1373 }
1374 
1375 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1376 		const char *page, size_t count)
1377 {
1378 	int pos = 0, len;
1379 	char *val;
1380 
1381 	if (subsys->subsys_discovered) {
1382 		pr_err("Can't set model number. %s is already assigned\n",
1383 		       subsys->model_number);
1384 		return -EINVAL;
1385 	}
1386 
1387 	len = strcspn(page, "\n");
1388 	if (!len)
1389 		return -EINVAL;
1390 
1391 	if (len > NVMET_MN_MAX_SIZE) {
1392 		pr_err("Model number size can not exceed %d Bytes\n",
1393 		       NVMET_MN_MAX_SIZE);
1394 		return -EINVAL;
1395 	}
1396 
1397 	for (pos = 0; pos < len; pos++) {
1398 		if (!nvmet_is_ascii(page[pos]))
1399 			return -EINVAL;
1400 	}
1401 
1402 	val = kmemdup_nul(page, len, GFP_KERNEL);
1403 	if (!val)
1404 		return -ENOMEM;
1405 	kfree(subsys->model_number);
1406 	subsys->model_number = val;
1407 	return count;
1408 }
1409 
1410 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1411 					     const char *page, size_t count)
1412 {
1413 	struct nvmet_subsys *subsys = to_subsys(item);
1414 	ssize_t ret;
1415 
1416 	down_write(&nvmet_config_sem);
1417 	mutex_lock(&subsys->lock);
1418 	ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1419 	mutex_unlock(&subsys->lock);
1420 	up_write(&nvmet_config_sem);
1421 
1422 	return ret;
1423 }
1424 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1425 
1426 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1427 					    char *page)
1428 {
1429 	struct nvmet_subsys *subsys = to_subsys(item);
1430 
1431 	return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1432 }
1433 
1434 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1435 		const char *page, size_t count)
1436 {
1437 	uint32_t val = 0;
1438 	int ret;
1439 
1440 	if (subsys->subsys_discovered) {
1441 		pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1442 		      subsys->ieee_oui);
1443 		return -EINVAL;
1444 	}
1445 
1446 	ret = kstrtou32(page, 0, &val);
1447 	if (ret < 0)
1448 		return ret;
1449 
1450 	if (val >= 0x1000000)
1451 		return -EINVAL;
1452 
1453 	subsys->ieee_oui = val;
1454 
1455 	return count;
1456 }
1457 
1458 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1459 					     const char *page, size_t count)
1460 {
1461 	struct nvmet_subsys *subsys = to_subsys(item);
1462 	ssize_t ret;
1463 
1464 	down_write(&nvmet_config_sem);
1465 	mutex_lock(&subsys->lock);
1466 	ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1467 	mutex_unlock(&subsys->lock);
1468 	up_write(&nvmet_config_sem);
1469 
1470 	return ret;
1471 }
1472 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1473 
1474 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1475 					    char *page)
1476 {
1477 	struct nvmet_subsys *subsys = to_subsys(item);
1478 
1479 	return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1480 }
1481 
1482 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1483 		const char *page, size_t count)
1484 {
1485 	int pos = 0, len;
1486 	char *val;
1487 
1488 	if (subsys->subsys_discovered) {
1489 		pr_err("Can't set firmware revision. %s is already assigned\n",
1490 		       subsys->firmware_rev);
1491 		return -EINVAL;
1492 	}
1493 
1494 	len = strcspn(page, "\n");
1495 	if (!len)
1496 		return -EINVAL;
1497 
1498 	if (len > NVMET_FR_MAX_SIZE) {
1499 		pr_err("Firmware revision size can not exceed %d Bytes\n",
1500 		       NVMET_FR_MAX_SIZE);
1501 		return -EINVAL;
1502 	}
1503 
1504 	for (pos = 0; pos < len; pos++) {
1505 		if (!nvmet_is_ascii(page[pos]))
1506 			return -EINVAL;
1507 	}
1508 
1509 	val = kmemdup_nul(page, len, GFP_KERNEL);
1510 	if (!val)
1511 		return -ENOMEM;
1512 
1513 	kfree(subsys->firmware_rev);
1514 
1515 	subsys->firmware_rev = val;
1516 
1517 	return count;
1518 }
1519 
1520 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1521 					     const char *page, size_t count)
1522 {
1523 	struct nvmet_subsys *subsys = to_subsys(item);
1524 	ssize_t ret;
1525 
1526 	down_write(&nvmet_config_sem);
1527 	mutex_lock(&subsys->lock);
1528 	ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1529 	mutex_unlock(&subsys->lock);
1530 	up_write(&nvmet_config_sem);
1531 
1532 	return ret;
1533 }
1534 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1535 
1536 #ifdef CONFIG_BLK_DEV_INTEGRITY
1537 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1538 						char *page)
1539 {
1540 	return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1541 }
1542 
1543 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1544 						 const char *page, size_t count)
1545 {
1546 	struct nvmet_subsys *subsys = to_subsys(item);
1547 	bool pi_enable;
1548 
1549 	if (kstrtobool(page, &pi_enable))
1550 		return -EINVAL;
1551 
1552 	subsys->pi_support = pi_enable;
1553 	return count;
1554 }
1555 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1556 #endif
1557 
1558 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1559 					      char *page)
1560 {
1561 	return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1562 }
1563 
1564 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1565 					       const char *page, size_t cnt)
1566 {
1567 	struct nvmet_subsys *subsys = to_subsys(item);
1568 	struct nvmet_ctrl *ctrl;
1569 	u16 qid_max;
1570 
1571 	if (sscanf(page, "%hu\n", &qid_max) != 1)
1572 		return -EINVAL;
1573 
1574 	if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1575 		return -EINVAL;
1576 
1577 	down_write(&nvmet_config_sem);
1578 	subsys->max_qid = qid_max;
1579 
1580 	/* Force reconnect */
1581 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1582 		ctrl->ops->delete_ctrl(ctrl);
1583 	up_write(&nvmet_config_sem);
1584 
1585 	return cnt;
1586 }
1587 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1588 
1589 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1590 	&nvmet_subsys_attr_attr_allow_any_host,
1591 	&nvmet_subsys_attr_attr_version,
1592 	&nvmet_subsys_attr_attr_serial,
1593 	&nvmet_subsys_attr_attr_cntlid_min,
1594 	&nvmet_subsys_attr_attr_cntlid_max,
1595 	&nvmet_subsys_attr_attr_model,
1596 	&nvmet_subsys_attr_attr_qid_max,
1597 	&nvmet_subsys_attr_attr_ieee_oui,
1598 	&nvmet_subsys_attr_attr_firmware,
1599 #ifdef CONFIG_BLK_DEV_INTEGRITY
1600 	&nvmet_subsys_attr_attr_pi_enable,
1601 #endif
1602 	NULL,
1603 };
1604 
1605 /*
1606  * Subsystem structures & folder operation functions below
1607  */
1608 static void nvmet_subsys_release(struct config_item *item)
1609 {
1610 	struct nvmet_subsys *subsys = to_subsys(item);
1611 
1612 	nvmet_subsys_del_ctrls(subsys);
1613 	nvmet_subsys_put(subsys);
1614 }
1615 
1616 static struct configfs_item_operations nvmet_subsys_item_ops = {
1617 	.release		= nvmet_subsys_release,
1618 };
1619 
1620 static const struct config_item_type nvmet_subsys_type = {
1621 	.ct_item_ops		= &nvmet_subsys_item_ops,
1622 	.ct_attrs		= nvmet_subsys_attrs,
1623 	.ct_owner		= THIS_MODULE,
1624 };
1625 
1626 static struct config_group *nvmet_subsys_make(struct config_group *group,
1627 		const char *name)
1628 {
1629 	struct nvmet_subsys *subsys;
1630 
1631 	if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1632 		pr_err("can't create discovery subsystem through configfs\n");
1633 		return ERR_PTR(-EINVAL);
1634 	}
1635 
1636 	if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) {
1637 		pr_err("can't create subsystem using unique discovery NQN\n");
1638 		return ERR_PTR(-EINVAL);
1639 	}
1640 
1641 	subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1642 	if (IS_ERR(subsys))
1643 		return ERR_CAST(subsys);
1644 
1645 	config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1646 
1647 	config_group_init_type_name(&subsys->namespaces_group,
1648 			"namespaces", &nvmet_namespaces_type);
1649 	configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1650 
1651 	config_group_init_type_name(&subsys->allowed_hosts_group,
1652 			"allowed_hosts", &nvmet_allowed_hosts_type);
1653 	configfs_add_default_group(&subsys->allowed_hosts_group,
1654 			&subsys->group);
1655 
1656 	nvmet_add_passthru_group(subsys);
1657 
1658 	return &subsys->group;
1659 }
1660 
1661 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1662 	.make_group		= nvmet_subsys_make,
1663 };
1664 
1665 static const struct config_item_type nvmet_subsystems_type = {
1666 	.ct_group_ops		= &nvmet_subsystems_group_ops,
1667 	.ct_owner		= THIS_MODULE,
1668 };
1669 
1670 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1671 		char *page)
1672 {
1673 	return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1674 }
1675 
1676 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1677 		const char *page, size_t count)
1678 {
1679 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1680 	struct nvmet_port *port = to_nvmet_port(item);
1681 	bool enable;
1682 
1683 	if (kstrtobool(page, &enable))
1684 		goto inval;
1685 
1686 	if (enable)
1687 		nvmet_referral_enable(parent, port);
1688 	else
1689 		nvmet_referral_disable(parent, port);
1690 
1691 	return count;
1692 inval:
1693 	pr_err("Invalid value '%s' for enable\n", page);
1694 	return -EINVAL;
1695 }
1696 
1697 CONFIGFS_ATTR(nvmet_referral_, enable);
1698 
1699 /*
1700  * Discovery Service subsystem definitions
1701  */
1702 static struct configfs_attribute *nvmet_referral_attrs[] = {
1703 	&nvmet_attr_addr_adrfam,
1704 	&nvmet_attr_addr_portid,
1705 	&nvmet_attr_addr_treq,
1706 	&nvmet_attr_addr_traddr,
1707 	&nvmet_attr_addr_trsvcid,
1708 	&nvmet_attr_addr_trtype,
1709 	&nvmet_referral_attr_enable,
1710 	NULL,
1711 };
1712 
1713 static void nvmet_referral_notify(struct config_group *group,
1714 		struct config_item *item)
1715 {
1716 	struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1717 	struct nvmet_port *port = to_nvmet_port(item);
1718 
1719 	nvmet_referral_disable(parent, port);
1720 }
1721 
1722 static void nvmet_referral_release(struct config_item *item)
1723 {
1724 	struct nvmet_port *port = to_nvmet_port(item);
1725 
1726 	kfree(port);
1727 }
1728 
1729 static struct configfs_item_operations nvmet_referral_item_ops = {
1730 	.release	= nvmet_referral_release,
1731 };
1732 
1733 static const struct config_item_type nvmet_referral_type = {
1734 	.ct_owner	= THIS_MODULE,
1735 	.ct_attrs	= nvmet_referral_attrs,
1736 	.ct_item_ops	= &nvmet_referral_item_ops,
1737 };
1738 
1739 static struct config_group *nvmet_referral_make(
1740 		struct config_group *group, const char *name)
1741 {
1742 	struct nvmet_port *port;
1743 
1744 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1745 	if (!port)
1746 		return ERR_PTR(-ENOMEM);
1747 
1748 	INIT_LIST_HEAD(&port->entry);
1749 	config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1750 
1751 	return &port->group;
1752 }
1753 
1754 static struct configfs_group_operations nvmet_referral_group_ops = {
1755 	.make_group		= nvmet_referral_make,
1756 	.disconnect_notify	= nvmet_referral_notify,
1757 };
1758 
1759 static const struct config_item_type nvmet_referrals_type = {
1760 	.ct_owner	= THIS_MODULE,
1761 	.ct_group_ops	= &nvmet_referral_group_ops,
1762 };
1763 
1764 static struct nvmet_type_name_map nvmet_ana_state[] = {
1765 	{ NVME_ANA_OPTIMIZED,		"optimized" },
1766 	{ NVME_ANA_NONOPTIMIZED,	"non-optimized" },
1767 	{ NVME_ANA_INACCESSIBLE,	"inaccessible" },
1768 	{ NVME_ANA_PERSISTENT_LOSS,	"persistent-loss" },
1769 	{ NVME_ANA_CHANGE,		"change" },
1770 };
1771 
1772 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1773 		char *page)
1774 {
1775 	struct nvmet_ana_group *grp = to_ana_group(item);
1776 	enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1777 	int i;
1778 
1779 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1780 		if (state == nvmet_ana_state[i].type)
1781 			return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1782 	}
1783 
1784 	return sprintf(page, "\n");
1785 }
1786 
1787 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1788 		const char *page, size_t count)
1789 {
1790 	struct nvmet_ana_group *grp = to_ana_group(item);
1791 	enum nvme_ana_state *ana_state = grp->port->ana_state;
1792 	int i;
1793 
1794 	for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1795 		if (sysfs_streq(page, nvmet_ana_state[i].name))
1796 			goto found;
1797 	}
1798 
1799 	pr_err("Invalid value '%s' for ana_state\n", page);
1800 	return -EINVAL;
1801 
1802 found:
1803 	down_write(&nvmet_ana_sem);
1804 	ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1805 	nvmet_ana_chgcnt++;
1806 	up_write(&nvmet_ana_sem);
1807 	nvmet_port_send_ana_event(grp->port);
1808 	return count;
1809 }
1810 
1811 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1812 
1813 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1814 	&nvmet_ana_group_attr_ana_state,
1815 	NULL,
1816 };
1817 
1818 static void nvmet_ana_group_release(struct config_item *item)
1819 {
1820 	struct nvmet_ana_group *grp = to_ana_group(item);
1821 
1822 	if (grp == &grp->port->ana_default_group)
1823 		return;
1824 
1825 	down_write(&nvmet_ana_sem);
1826 	grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1827 	nvmet_ana_group_enabled[grp->grpid]--;
1828 	up_write(&nvmet_ana_sem);
1829 
1830 	nvmet_port_send_ana_event(grp->port);
1831 	kfree(grp);
1832 }
1833 
1834 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1835 	.release		= nvmet_ana_group_release,
1836 };
1837 
1838 static const struct config_item_type nvmet_ana_group_type = {
1839 	.ct_item_ops		= &nvmet_ana_group_item_ops,
1840 	.ct_attrs		= nvmet_ana_group_attrs,
1841 	.ct_owner		= THIS_MODULE,
1842 };
1843 
1844 static struct config_group *nvmet_ana_groups_make_group(
1845 		struct config_group *group, const char *name)
1846 {
1847 	struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1848 	struct nvmet_ana_group *grp;
1849 	u32 grpid;
1850 	int ret;
1851 
1852 	ret = kstrtou32(name, 0, &grpid);
1853 	if (ret)
1854 		goto out;
1855 
1856 	ret = -EINVAL;
1857 	if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1858 		goto out;
1859 
1860 	ret = -ENOMEM;
1861 	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1862 	if (!grp)
1863 		goto out;
1864 	grp->port = port;
1865 	grp->grpid = grpid;
1866 
1867 	down_write(&nvmet_ana_sem);
1868 	grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1869 	nvmet_ana_group_enabled[grpid]++;
1870 	up_write(&nvmet_ana_sem);
1871 
1872 	nvmet_port_send_ana_event(grp->port);
1873 
1874 	config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1875 	return &grp->group;
1876 out:
1877 	return ERR_PTR(ret);
1878 }
1879 
1880 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1881 	.make_group		= nvmet_ana_groups_make_group,
1882 };
1883 
1884 static const struct config_item_type nvmet_ana_groups_type = {
1885 	.ct_group_ops		= &nvmet_ana_groups_group_ops,
1886 	.ct_owner		= THIS_MODULE,
1887 };
1888 
1889 /*
1890  * Ports definitions.
1891  */
1892 static void nvmet_port_release(struct config_item *item)
1893 {
1894 	struct nvmet_port *port = to_nvmet_port(item);
1895 
1896 	/* Let inflight controllers teardown complete */
1897 	flush_workqueue(nvmet_wq);
1898 	list_del(&port->global_entry);
1899 
1900 	key_put(port->keyring);
1901 	kfree(port->ana_state);
1902 	kfree(port);
1903 }
1904 
1905 static struct configfs_attribute *nvmet_port_attrs[] = {
1906 	&nvmet_attr_addr_adrfam,
1907 	&nvmet_attr_addr_treq,
1908 	&nvmet_attr_addr_traddr,
1909 	&nvmet_attr_addr_trsvcid,
1910 	&nvmet_attr_addr_trtype,
1911 	&nvmet_attr_addr_tsas,
1912 	&nvmet_attr_param_inline_data_size,
1913 	&nvmet_attr_param_max_queue_size,
1914 #ifdef CONFIG_BLK_DEV_INTEGRITY
1915 	&nvmet_attr_param_pi_enable,
1916 #endif
1917 	NULL,
1918 };
1919 
1920 static struct configfs_item_operations nvmet_port_item_ops = {
1921 	.release		= nvmet_port_release,
1922 };
1923 
1924 static const struct config_item_type nvmet_port_type = {
1925 	.ct_attrs		= nvmet_port_attrs,
1926 	.ct_item_ops		= &nvmet_port_item_ops,
1927 	.ct_owner		= THIS_MODULE,
1928 };
1929 
1930 static struct config_group *nvmet_ports_make(struct config_group *group,
1931 		const char *name)
1932 {
1933 	struct nvmet_port *port;
1934 	u16 portid;
1935 	u32 i;
1936 
1937 	if (kstrtou16(name, 0, &portid))
1938 		return ERR_PTR(-EINVAL);
1939 
1940 	port = kzalloc(sizeof(*port), GFP_KERNEL);
1941 	if (!port)
1942 		return ERR_PTR(-ENOMEM);
1943 
1944 	port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1945 			sizeof(*port->ana_state), GFP_KERNEL);
1946 	if (!port->ana_state) {
1947 		kfree(port);
1948 		return ERR_PTR(-ENOMEM);
1949 	}
1950 
1951 	if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
1952 		port->keyring = key_lookup(nvme_keyring_id());
1953 		if (IS_ERR(port->keyring)) {
1954 			pr_warn("NVMe keyring not available, disabling TLS\n");
1955 			port->keyring = NULL;
1956 		}
1957 	}
1958 
1959 	for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1960 		if (i == NVMET_DEFAULT_ANA_GRPID)
1961 			port->ana_state[1] = NVME_ANA_OPTIMIZED;
1962 		else
1963 			port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1964 	}
1965 
1966 	list_add(&port->global_entry, &nvmet_ports_list);
1967 
1968 	INIT_LIST_HEAD(&port->entry);
1969 	INIT_LIST_HEAD(&port->subsystems);
1970 	INIT_LIST_HEAD(&port->referrals);
1971 	port->inline_data_size = -1;	/* < 0 == let the transport choose */
1972 	port->max_queue_size = -1;	/* < 0 == let the transport choose */
1973 
1974 	port->disc_addr.portid = cpu_to_le16(portid);
1975 	port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1976 	port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1977 	config_group_init_type_name(&port->group, name, &nvmet_port_type);
1978 
1979 	config_group_init_type_name(&port->subsys_group,
1980 			"subsystems", &nvmet_port_subsys_type);
1981 	configfs_add_default_group(&port->subsys_group, &port->group);
1982 
1983 	config_group_init_type_name(&port->referrals_group,
1984 			"referrals", &nvmet_referrals_type);
1985 	configfs_add_default_group(&port->referrals_group, &port->group);
1986 
1987 	config_group_init_type_name(&port->ana_groups_group,
1988 			"ana_groups", &nvmet_ana_groups_type);
1989 	configfs_add_default_group(&port->ana_groups_group, &port->group);
1990 
1991 	port->ana_default_group.port = port;
1992 	port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1993 	config_group_init_type_name(&port->ana_default_group.group,
1994 			__stringify(NVMET_DEFAULT_ANA_GRPID),
1995 			&nvmet_ana_group_type);
1996 	configfs_add_default_group(&port->ana_default_group.group,
1997 			&port->ana_groups_group);
1998 
1999 	return &port->group;
2000 }
2001 
2002 static struct configfs_group_operations nvmet_ports_group_ops = {
2003 	.make_group		= nvmet_ports_make,
2004 };
2005 
2006 static const struct config_item_type nvmet_ports_type = {
2007 	.ct_group_ops		= &nvmet_ports_group_ops,
2008 	.ct_owner		= THIS_MODULE,
2009 };
2010 
2011 static struct config_group nvmet_subsystems_group;
2012 static struct config_group nvmet_ports_group;
2013 
2014 #ifdef CONFIG_NVME_TARGET_AUTH
2015 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
2016 		char *page)
2017 {
2018 	u8 *dhchap_secret;
2019 	ssize_t ret;
2020 
2021 	down_read(&nvmet_config_sem);
2022 	dhchap_secret = to_host(item)->dhchap_secret;
2023 	if (!dhchap_secret)
2024 		ret = sprintf(page, "\n");
2025 	else
2026 		ret = sprintf(page, "%s\n", dhchap_secret);
2027 	up_read(&nvmet_config_sem);
2028 	return ret;
2029 }
2030 
2031 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
2032 		const char *page, size_t count)
2033 {
2034 	struct nvmet_host *host = to_host(item);
2035 	int ret;
2036 
2037 	ret = nvmet_auth_set_key(host, page, false);
2038 	/*
2039 	 * Re-authentication is a soft state, so keep the
2040 	 * current authentication valid until the host
2041 	 * requests re-authentication.
2042 	 */
2043 	return ret < 0 ? ret : count;
2044 }
2045 
2046 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
2047 
2048 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
2049 		char *page)
2050 {
2051 	u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
2052 	ssize_t ret;
2053 
2054 	down_read(&nvmet_config_sem);
2055 	dhchap_secret = to_host(item)->dhchap_ctrl_secret;
2056 	if (!dhchap_secret)
2057 		ret = sprintf(page, "\n");
2058 	else
2059 		ret = sprintf(page, "%s\n", dhchap_secret);
2060 	up_read(&nvmet_config_sem);
2061 	return ret;
2062 }
2063 
2064 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
2065 		const char *page, size_t count)
2066 {
2067 	struct nvmet_host *host = to_host(item);
2068 	int ret;
2069 
2070 	ret = nvmet_auth_set_key(host, page, true);
2071 	/*
2072 	 * Re-authentication is a soft state, so keep the
2073 	 * current authentication valid until the host
2074 	 * requests re-authentication.
2075 	 */
2076 	return ret < 0 ? ret : count;
2077 }
2078 
2079 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
2080 
2081 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
2082 		char *page)
2083 {
2084 	struct nvmet_host *host = to_host(item);
2085 	const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
2086 
2087 	return sprintf(page, "%s\n", hash_name ? hash_name : "none");
2088 }
2089 
2090 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
2091 		const char *page, size_t count)
2092 {
2093 	struct nvmet_host *host = to_host(item);
2094 	u8 hmac_id;
2095 
2096 	hmac_id = nvme_auth_hmac_id(page);
2097 	if (hmac_id == NVME_AUTH_HASH_INVALID)
2098 		return -EINVAL;
2099 	if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
2100 		return -ENOTSUPP;
2101 	host->dhchap_hash_id = hmac_id;
2102 	return count;
2103 }
2104 
2105 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
2106 
2107 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
2108 		char *page)
2109 {
2110 	struct nvmet_host *host = to_host(item);
2111 	const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
2112 
2113 	return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
2114 }
2115 
2116 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
2117 		const char *page, size_t count)
2118 {
2119 	struct nvmet_host *host = to_host(item);
2120 	int dhgroup_id;
2121 
2122 	dhgroup_id = nvme_auth_dhgroup_id(page);
2123 	if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
2124 		return -EINVAL;
2125 	if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
2126 		const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
2127 
2128 		if (!crypto_has_kpp(kpp, 0, 0))
2129 			return -EINVAL;
2130 	}
2131 	host->dhchap_dhgroup_id = dhgroup_id;
2132 	return count;
2133 }
2134 
2135 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
2136 
2137 static struct configfs_attribute *nvmet_host_attrs[] = {
2138 	&nvmet_host_attr_dhchap_key,
2139 	&nvmet_host_attr_dhchap_ctrl_key,
2140 	&nvmet_host_attr_dhchap_hash,
2141 	&nvmet_host_attr_dhchap_dhgroup,
2142 	NULL,
2143 };
2144 #endif /* CONFIG_NVME_TARGET_AUTH */
2145 
2146 static void nvmet_host_release(struct config_item *item)
2147 {
2148 	struct nvmet_host *host = to_host(item);
2149 
2150 #ifdef CONFIG_NVME_TARGET_AUTH
2151 	kfree(host->dhchap_secret);
2152 	kfree(host->dhchap_ctrl_secret);
2153 #endif
2154 	kfree(host);
2155 }
2156 
2157 static struct configfs_item_operations nvmet_host_item_ops = {
2158 	.release		= nvmet_host_release,
2159 };
2160 
2161 static const struct config_item_type nvmet_host_type = {
2162 	.ct_item_ops		= &nvmet_host_item_ops,
2163 #ifdef CONFIG_NVME_TARGET_AUTH
2164 	.ct_attrs		= nvmet_host_attrs,
2165 #endif
2166 	.ct_owner		= THIS_MODULE,
2167 };
2168 
2169 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2170 		const char *name)
2171 {
2172 	struct nvmet_host *host;
2173 
2174 	host = kzalloc(sizeof(*host), GFP_KERNEL);
2175 	if (!host)
2176 		return ERR_PTR(-ENOMEM);
2177 
2178 #ifdef CONFIG_NVME_TARGET_AUTH
2179 	/* Default to SHA256 */
2180 	host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2181 #endif
2182 
2183 	config_group_init_type_name(&host->group, name, &nvmet_host_type);
2184 
2185 	return &host->group;
2186 }
2187 
2188 static struct configfs_group_operations nvmet_hosts_group_ops = {
2189 	.make_group		= nvmet_hosts_make_group,
2190 };
2191 
2192 static const struct config_item_type nvmet_hosts_type = {
2193 	.ct_group_ops		= &nvmet_hosts_group_ops,
2194 	.ct_owner		= THIS_MODULE,
2195 };
2196 
2197 static struct config_group nvmet_hosts_group;
2198 
2199 static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
2200 					     char *page)
2201 {
2202 	return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
2203 }
2204 
2205 static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
2206 		const char *page, size_t count)
2207 {
2208 	struct list_head *entry;
2209 	size_t len;
2210 
2211 	len = strcspn(page, "\n");
2212 	if (!len || len > NVMF_NQN_FIELD_LEN - 1)
2213 		return -EINVAL;
2214 
2215 	down_write(&nvmet_config_sem);
2216 	list_for_each(entry, &nvmet_subsystems_group.cg_children) {
2217 		struct config_item *item =
2218 			container_of(entry, struct config_item, ci_entry);
2219 
2220 		if (!strncmp(config_item_name(item), page, len)) {
2221 			pr_err("duplicate NQN %s\n", config_item_name(item));
2222 			up_write(&nvmet_config_sem);
2223 			return -EINVAL;
2224 		}
2225 	}
2226 	memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
2227 	memcpy(nvmet_disc_subsys->subsysnqn, page, len);
2228 	up_write(&nvmet_config_sem);
2229 
2230 	return len;
2231 }
2232 
2233 CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
2234 
2235 static struct configfs_attribute *nvmet_root_attrs[] = {
2236 	&nvmet_root_attr_discovery_nqn,
2237 	NULL,
2238 };
2239 
2240 static const struct config_item_type nvmet_root_type = {
2241 	.ct_attrs		= nvmet_root_attrs,
2242 	.ct_owner		= THIS_MODULE,
2243 };
2244 
2245 static struct configfs_subsystem nvmet_configfs_subsystem = {
2246 	.su_group = {
2247 		.cg_item = {
2248 			.ci_namebuf	= "nvmet",
2249 			.ci_type	= &nvmet_root_type,
2250 		},
2251 	},
2252 };
2253 
2254 int __init nvmet_init_configfs(void)
2255 {
2256 	int ret;
2257 
2258 	config_group_init(&nvmet_configfs_subsystem.su_group);
2259 	mutex_init(&nvmet_configfs_subsystem.su_mutex);
2260 
2261 	config_group_init_type_name(&nvmet_subsystems_group,
2262 			"subsystems", &nvmet_subsystems_type);
2263 	configfs_add_default_group(&nvmet_subsystems_group,
2264 			&nvmet_configfs_subsystem.su_group);
2265 
2266 	config_group_init_type_name(&nvmet_ports_group,
2267 			"ports", &nvmet_ports_type);
2268 	configfs_add_default_group(&nvmet_ports_group,
2269 			&nvmet_configfs_subsystem.su_group);
2270 
2271 	config_group_init_type_name(&nvmet_hosts_group,
2272 			"hosts", &nvmet_hosts_type);
2273 	configfs_add_default_group(&nvmet_hosts_group,
2274 			&nvmet_configfs_subsystem.su_group);
2275 
2276 	ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2277 	if (ret) {
2278 		pr_err("configfs_register_subsystem: %d\n", ret);
2279 		return ret;
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 void __exit nvmet_exit_configfs(void)
2286 {
2287 	configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2288 }
2289