xref: /linux/drivers/nvdimm/namespace_devs.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/nd.h>
17 #include "nd-core.h"
18 #include "nd.h"
19 
20 static void namespace_io_release(struct device *dev)
21 {
22 	struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
23 
24 	kfree(nsio);
25 }
26 
27 static void namespace_pmem_release(struct device *dev)
28 {
29 	struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
30 
31 	kfree(nspm->alt_name);
32 	kfree(nspm->uuid);
33 	kfree(nspm);
34 }
35 
36 static void namespace_blk_release(struct device *dev)
37 {
38 	struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
39 	struct nd_region *nd_region = to_nd_region(dev->parent);
40 
41 	if (nsblk->id >= 0)
42 		ida_simple_remove(&nd_region->ns_ida, nsblk->id);
43 	kfree(nsblk->alt_name);
44 	kfree(nsblk->uuid);
45 	kfree(nsblk->res);
46 	kfree(nsblk);
47 }
48 
49 static struct device_type namespace_io_device_type = {
50 	.name = "nd_namespace_io",
51 	.release = namespace_io_release,
52 };
53 
54 static struct device_type namespace_pmem_device_type = {
55 	.name = "nd_namespace_pmem",
56 	.release = namespace_pmem_release,
57 };
58 
59 static struct device_type namespace_blk_device_type = {
60 	.name = "nd_namespace_blk",
61 	.release = namespace_blk_release,
62 };
63 
64 static bool is_namespace_pmem(struct device *dev)
65 {
66 	return dev ? dev->type == &namespace_pmem_device_type : false;
67 }
68 
69 static bool is_namespace_blk(struct device *dev)
70 {
71 	return dev ? dev->type == &namespace_blk_device_type : false;
72 }
73 
74 static bool is_namespace_io(struct device *dev)
75 {
76 	return dev ? dev->type == &namespace_io_device_type : false;
77 }
78 
79 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
80 		char *name)
81 {
82 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
83 	const char *suffix = "";
84 
85 	if (ndns->claim && is_nd_btt(ndns->claim))
86 		suffix = "s";
87 
88 	if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev))
89 		sprintf(name, "pmem%d%s", nd_region->id, suffix);
90 	else if (is_namespace_blk(&ndns->dev)) {
91 		struct nd_namespace_blk *nsblk;
92 
93 		nsblk = to_nd_namespace_blk(&ndns->dev);
94 		sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix);
95 	} else {
96 		return NULL;
97 	}
98 
99 	return name;
100 }
101 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
102 
103 static ssize_t nstype_show(struct device *dev,
104 		struct device_attribute *attr, char *buf)
105 {
106 	struct nd_region *nd_region = to_nd_region(dev->parent);
107 
108 	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
109 }
110 static DEVICE_ATTR_RO(nstype);
111 
112 static ssize_t __alt_name_store(struct device *dev, const char *buf,
113 		const size_t len)
114 {
115 	char *input, *pos, *alt_name, **ns_altname;
116 	ssize_t rc;
117 
118 	if (is_namespace_pmem(dev)) {
119 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
120 
121 		ns_altname = &nspm->alt_name;
122 	} else if (is_namespace_blk(dev)) {
123 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
124 
125 		ns_altname = &nsblk->alt_name;
126 	} else
127 		return -ENXIO;
128 
129 	if (dev->driver || to_ndns(dev)->claim)
130 		return -EBUSY;
131 
132 	input = kmemdup(buf, len + 1, GFP_KERNEL);
133 	if (!input)
134 		return -ENOMEM;
135 
136 	input[len] = '\0';
137 	pos = strim(input);
138 	if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
139 		rc = -EINVAL;
140 		goto out;
141 	}
142 
143 	alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
144 	if (!alt_name) {
145 		rc = -ENOMEM;
146 		goto out;
147 	}
148 	kfree(*ns_altname);
149 	*ns_altname = alt_name;
150 	sprintf(*ns_altname, "%s", pos);
151 	rc = len;
152 
153 out:
154 	kfree(input);
155 	return rc;
156 }
157 
158 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
159 {
160 	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
161 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
162 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
163 	struct nd_label_id label_id;
164 	resource_size_t size = 0;
165 	struct resource *res;
166 
167 	if (!nsblk->uuid)
168 		return 0;
169 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
170 	for_each_dpa_resource(ndd, res)
171 		if (strcmp(res->name, label_id.id) == 0)
172 			size += resource_size(res);
173 	return size;
174 }
175 
176 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
177 {
178 	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
179 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
180 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
181 	struct nd_label_id label_id;
182 	struct resource *res;
183 	int count, i;
184 
185 	if (!nsblk->uuid || !nsblk->lbasize || !ndd)
186 		return false;
187 
188 	count = 0;
189 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
190 	for_each_dpa_resource(ndd, res) {
191 		if (strcmp(res->name, label_id.id) != 0)
192 			continue;
193 		/*
194 		 * Resources with unacknoweldged adjustments indicate a
195 		 * failure to update labels
196 		 */
197 		if (res->flags & DPA_RESOURCE_ADJUSTED)
198 			return false;
199 		count++;
200 	}
201 
202 	/* These values match after a successful label update */
203 	if (count != nsblk->num_resources)
204 		return false;
205 
206 	for (i = 0; i < nsblk->num_resources; i++) {
207 		struct resource *found = NULL;
208 
209 		for_each_dpa_resource(ndd, res)
210 			if (res == nsblk->res[i]) {
211 				found = res;
212 				break;
213 			}
214 		/* stale resource */
215 		if (!found)
216 			return false;
217 	}
218 
219 	return true;
220 }
221 
222 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
223 {
224 	resource_size_t size;
225 
226 	nvdimm_bus_lock(&nsblk->common.dev);
227 	size = __nd_namespace_blk_validate(nsblk);
228 	nvdimm_bus_unlock(&nsblk->common.dev);
229 
230 	return size;
231 }
232 EXPORT_SYMBOL(nd_namespace_blk_validate);
233 
234 
235 static int nd_namespace_label_update(struct nd_region *nd_region,
236 		struct device *dev)
237 {
238 	dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
239 			"namespace must be idle during label update\n");
240 	if (dev->driver || to_ndns(dev)->claim)
241 		return 0;
242 
243 	/*
244 	 * Only allow label writes that will result in a valid namespace
245 	 * or deletion of an existing namespace.
246 	 */
247 	if (is_namespace_pmem(dev)) {
248 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
249 		resource_size_t size = resource_size(&nspm->nsio.res);
250 
251 		if (size == 0 && nspm->uuid)
252 			/* delete allocation */;
253 		else if (!nspm->uuid)
254 			return 0;
255 
256 		return nd_pmem_namespace_label_update(nd_region, nspm, size);
257 	} else if (is_namespace_blk(dev)) {
258 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
259 		resource_size_t size = nd_namespace_blk_size(nsblk);
260 
261 		if (size == 0 && nsblk->uuid)
262 			/* delete allocation */;
263 		else if (!nsblk->uuid || !nsblk->lbasize)
264 			return 0;
265 
266 		return nd_blk_namespace_label_update(nd_region, nsblk, size);
267 	} else
268 		return -ENXIO;
269 }
270 
271 static ssize_t alt_name_store(struct device *dev,
272 		struct device_attribute *attr, const char *buf, size_t len)
273 {
274 	struct nd_region *nd_region = to_nd_region(dev->parent);
275 	ssize_t rc;
276 
277 	device_lock(dev);
278 	nvdimm_bus_lock(dev);
279 	wait_nvdimm_bus_probe_idle(dev);
280 	rc = __alt_name_store(dev, buf, len);
281 	if (rc >= 0)
282 		rc = nd_namespace_label_update(nd_region, dev);
283 	dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
284 	nvdimm_bus_unlock(dev);
285 	device_unlock(dev);
286 
287 	return rc < 0 ? rc : len;
288 }
289 
290 static ssize_t alt_name_show(struct device *dev,
291 		struct device_attribute *attr, char *buf)
292 {
293 	char *ns_altname;
294 
295 	if (is_namespace_pmem(dev)) {
296 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
297 
298 		ns_altname = nspm->alt_name;
299 	} else if (is_namespace_blk(dev)) {
300 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
301 
302 		ns_altname = nsblk->alt_name;
303 	} else
304 		return -ENXIO;
305 
306 	return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
307 }
308 static DEVICE_ATTR_RW(alt_name);
309 
310 static int scan_free(struct nd_region *nd_region,
311 		struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
312 		resource_size_t n)
313 {
314 	bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
315 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
316 	int rc = 0;
317 
318 	while (n) {
319 		struct resource *res, *last;
320 		resource_size_t new_start;
321 
322 		last = NULL;
323 		for_each_dpa_resource(ndd, res)
324 			if (strcmp(res->name, label_id->id) == 0)
325 				last = res;
326 		res = last;
327 		if (!res)
328 			return 0;
329 
330 		if (n >= resource_size(res)) {
331 			n -= resource_size(res);
332 			nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
333 			nvdimm_free_dpa(ndd, res);
334 			/* retry with last resource deleted */
335 			continue;
336 		}
337 
338 		/*
339 		 * Keep BLK allocations relegated to high DPA as much as
340 		 * possible
341 		 */
342 		if (is_blk)
343 			new_start = res->start + n;
344 		else
345 			new_start = res->start;
346 
347 		rc = adjust_resource(res, new_start, resource_size(res) - n);
348 		if (rc == 0)
349 			res->flags |= DPA_RESOURCE_ADJUSTED;
350 		nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
351 		break;
352 	}
353 
354 	return rc;
355 }
356 
357 /**
358  * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
359  * @nd_region: the set of dimms to reclaim @n bytes from
360  * @label_id: unique identifier for the namespace consuming this dpa range
361  * @n: number of bytes per-dimm to release
362  *
363  * Assumes resources are ordered.  Starting from the end try to
364  * adjust_resource() the allocation to @n, but if @n is larger than the
365  * allocation delete it and find the 'new' last allocation in the label
366  * set.
367  */
368 static int shrink_dpa_allocation(struct nd_region *nd_region,
369 		struct nd_label_id *label_id, resource_size_t n)
370 {
371 	int i;
372 
373 	for (i = 0; i < nd_region->ndr_mappings; i++) {
374 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
375 		int rc;
376 
377 		rc = scan_free(nd_region, nd_mapping, label_id, n);
378 		if (rc)
379 			return rc;
380 	}
381 
382 	return 0;
383 }
384 
385 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
386 		struct nd_region *nd_region, struct nd_mapping *nd_mapping,
387 		resource_size_t n)
388 {
389 	bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
390 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
391 	resource_size_t first_dpa;
392 	struct resource *res;
393 	int rc = 0;
394 
395 	/* allocate blk from highest dpa first */
396 	if (is_blk)
397 		first_dpa = nd_mapping->start + nd_mapping->size - n;
398 	else
399 		first_dpa = nd_mapping->start;
400 
401 	/* first resource allocation for this label-id or dimm */
402 	res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
403 	if (!res)
404 		rc = -EBUSY;
405 
406 	nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
407 	return rc ? n : 0;
408 }
409 
410 static bool space_valid(bool is_pmem, bool is_reserve,
411 		struct nd_label_id *label_id, struct resource *res)
412 {
413 	/*
414 	 * For BLK-space any space is valid, for PMEM-space, it must be
415 	 * contiguous with an existing allocation unless we are
416 	 * reserving pmem.
417 	 */
418 	if (is_reserve || !is_pmem)
419 		return true;
420 	if (!res || strcmp(res->name, label_id->id) == 0)
421 		return true;
422 	return false;
423 }
424 
425 enum alloc_loc {
426 	ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
427 };
428 
429 static resource_size_t scan_allocate(struct nd_region *nd_region,
430 		struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
431 		resource_size_t n)
432 {
433 	resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
434 	bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
435 	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
436 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
437 	const resource_size_t to_allocate = n;
438 	struct resource *res;
439 	int first;
440 
441  retry:
442 	first = 0;
443 	for_each_dpa_resource(ndd, res) {
444 		resource_size_t allocate, available = 0, free_start, free_end;
445 		struct resource *next = res->sibling, *new_res = NULL;
446 		enum alloc_loc loc = ALLOC_ERR;
447 		const char *action;
448 		int rc = 0;
449 
450 		/* ignore resources outside this nd_mapping */
451 		if (res->start > mapping_end)
452 			continue;
453 		if (res->end < nd_mapping->start)
454 			continue;
455 
456 		/* space at the beginning of the mapping */
457 		if (!first++ && res->start > nd_mapping->start) {
458 			free_start = nd_mapping->start;
459 			available = res->start - free_start;
460 			if (space_valid(is_pmem, is_reserve, label_id, NULL))
461 				loc = ALLOC_BEFORE;
462 		}
463 
464 		/* space between allocations */
465 		if (!loc && next) {
466 			free_start = res->start + resource_size(res);
467 			free_end = min(mapping_end, next->start - 1);
468 			if (space_valid(is_pmem, is_reserve, label_id, res)
469 					&& free_start < free_end) {
470 				available = free_end + 1 - free_start;
471 				loc = ALLOC_MID;
472 			}
473 		}
474 
475 		/* space at the end of the mapping */
476 		if (!loc && !next) {
477 			free_start = res->start + resource_size(res);
478 			free_end = mapping_end;
479 			if (space_valid(is_pmem, is_reserve, label_id, res)
480 					&& free_start < free_end) {
481 				available = free_end + 1 - free_start;
482 				loc = ALLOC_AFTER;
483 			}
484 		}
485 
486 		if (!loc || !available)
487 			continue;
488 		allocate = min(available, n);
489 		switch (loc) {
490 		case ALLOC_BEFORE:
491 			if (strcmp(res->name, label_id->id) == 0) {
492 				/* adjust current resource up */
493 				if (is_pmem && !is_reserve)
494 					return n;
495 				rc = adjust_resource(res, res->start - allocate,
496 						resource_size(res) + allocate);
497 				action = "cur grow up";
498 			} else
499 				action = "allocate";
500 			break;
501 		case ALLOC_MID:
502 			if (strcmp(next->name, label_id->id) == 0) {
503 				/* adjust next resource up */
504 				if (is_pmem && !is_reserve)
505 					return n;
506 				rc = adjust_resource(next, next->start
507 						- allocate, resource_size(next)
508 						+ allocate);
509 				new_res = next;
510 				action = "next grow up";
511 			} else if (strcmp(res->name, label_id->id) == 0) {
512 				action = "grow down";
513 			} else
514 				action = "allocate";
515 			break;
516 		case ALLOC_AFTER:
517 			if (strcmp(res->name, label_id->id) == 0)
518 				action = "grow down";
519 			else
520 				action = "allocate";
521 			break;
522 		default:
523 			return n;
524 		}
525 
526 		if (strcmp(action, "allocate") == 0) {
527 			/* BLK allocate bottom up */
528 			if (!is_pmem)
529 				free_start += available - allocate;
530 			else if (!is_reserve && free_start != nd_mapping->start)
531 				return n;
532 
533 			new_res = nvdimm_allocate_dpa(ndd, label_id,
534 					free_start, allocate);
535 			if (!new_res)
536 				rc = -EBUSY;
537 		} else if (strcmp(action, "grow down") == 0) {
538 			/* adjust current resource down */
539 			rc = adjust_resource(res, res->start, resource_size(res)
540 					+ allocate);
541 			if (rc == 0)
542 				res->flags |= DPA_RESOURCE_ADJUSTED;
543 		}
544 
545 		if (!new_res)
546 			new_res = res;
547 
548 		nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
549 				action, loc, rc);
550 
551 		if (rc)
552 			return n;
553 
554 		n -= allocate;
555 		if (n) {
556 			/*
557 			 * Retry scan with newly inserted resources.
558 			 * For example, if we did an ALLOC_BEFORE
559 			 * insertion there may also have been space
560 			 * available for an ALLOC_AFTER insertion, so we
561 			 * need to check this same resource again
562 			 */
563 			goto retry;
564 		} else
565 			return 0;
566 	}
567 
568 	/*
569 	 * If we allocated nothing in the BLK case it may be because we are in
570 	 * an initial "pmem-reserve pass".  Only do an initial BLK allocation
571 	 * when none of the DPA space is reserved.
572 	 */
573 	if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
574 		return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
575 	return n;
576 }
577 
578 static int merge_dpa(struct nd_region *nd_region,
579 		struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
580 {
581 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
582 	struct resource *res;
583 
584 	if (strncmp("pmem", label_id->id, 4) == 0)
585 		return 0;
586  retry:
587 	for_each_dpa_resource(ndd, res) {
588 		int rc;
589 		struct resource *next = res->sibling;
590 		resource_size_t end = res->start + resource_size(res);
591 
592 		if (!next || strcmp(res->name, label_id->id) != 0
593 				|| strcmp(next->name, label_id->id) != 0
594 				|| end != next->start)
595 			continue;
596 		end += resource_size(next);
597 		nvdimm_free_dpa(ndd, next);
598 		rc = adjust_resource(res, res->start, end - res->start);
599 		nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
600 		if (rc)
601 			return rc;
602 		res->flags |= DPA_RESOURCE_ADJUSTED;
603 		goto retry;
604 	}
605 
606 	return 0;
607 }
608 
609 static int __reserve_free_pmem(struct device *dev, void *data)
610 {
611 	struct nvdimm *nvdimm = data;
612 	struct nd_region *nd_region;
613 	struct nd_label_id label_id;
614 	int i;
615 
616 	if (!is_nd_pmem(dev))
617 		return 0;
618 
619 	nd_region = to_nd_region(dev);
620 	if (nd_region->ndr_mappings == 0)
621 		return 0;
622 
623 	memset(&label_id, 0, sizeof(label_id));
624 	strcat(label_id.id, "pmem-reserve");
625 	for (i = 0; i < nd_region->ndr_mappings; i++) {
626 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
627 		resource_size_t n, rem = 0;
628 
629 		if (nd_mapping->nvdimm != nvdimm)
630 			continue;
631 
632 		n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
633 		if (n == 0)
634 			return 0;
635 		rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
636 		dev_WARN_ONCE(&nd_region->dev, rem,
637 				"pmem reserve underrun: %#llx of %#llx bytes\n",
638 				(unsigned long long) n - rem,
639 				(unsigned long long) n);
640 		return rem ? -ENXIO : 0;
641 	}
642 
643 	return 0;
644 }
645 
646 static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
647 		struct nd_mapping *nd_mapping)
648 {
649 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
650 	struct resource *res, *_res;
651 
652 	for_each_dpa_resource_safe(ndd, res, _res)
653 		if (strcmp(res->name, "pmem-reserve") == 0)
654 			nvdimm_free_dpa(ndd, res);
655 }
656 
657 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
658 		struct nd_mapping *nd_mapping)
659 {
660 	struct nvdimm *nvdimm = nd_mapping->nvdimm;
661 	int rc;
662 
663 	rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
664 			__reserve_free_pmem);
665 	if (rc)
666 		release_free_pmem(nvdimm_bus, nd_mapping);
667 	return rc;
668 }
669 
670 /**
671  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
672  * @nd_region: the set of dimms to allocate @n more bytes from
673  * @label_id: unique identifier for the namespace consuming this dpa range
674  * @n: number of bytes per-dimm to add to the existing allocation
675  *
676  * Assumes resources are ordered.  For BLK regions, first consume
677  * BLK-only available DPA free space, then consume PMEM-aliased DPA
678  * space starting at the highest DPA.  For PMEM regions start
679  * allocations from the start of an interleave set and end at the first
680  * BLK allocation or the end of the interleave set, whichever comes
681  * first.
682  */
683 static int grow_dpa_allocation(struct nd_region *nd_region,
684 		struct nd_label_id *label_id, resource_size_t n)
685 {
686 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
687 	bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
688 	int i;
689 
690 	for (i = 0; i < nd_region->ndr_mappings; i++) {
691 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
692 		resource_size_t rem = n;
693 		int rc, j;
694 
695 		/*
696 		 * In the BLK case try once with all unallocated PMEM
697 		 * reserved, and once without
698 		 */
699 		for (j = is_pmem; j < 2; j++) {
700 			bool blk_only = j == 0;
701 
702 			if (blk_only) {
703 				rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
704 				if (rc)
705 					return rc;
706 			}
707 			rem = scan_allocate(nd_region, nd_mapping,
708 					label_id, rem);
709 			if (blk_only)
710 				release_free_pmem(nvdimm_bus, nd_mapping);
711 
712 			/* try again and allow encroachments into PMEM */
713 			if (rem == 0)
714 				break;
715 		}
716 
717 		dev_WARN_ONCE(&nd_region->dev, rem,
718 				"allocation underrun: %#llx of %#llx bytes\n",
719 				(unsigned long long) n - rem,
720 				(unsigned long long) n);
721 		if (rem)
722 			return -ENXIO;
723 
724 		rc = merge_dpa(nd_region, nd_mapping, label_id);
725 		if (rc)
726 			return rc;
727 	}
728 
729 	return 0;
730 }
731 
732 static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
733 		struct nd_namespace_pmem *nspm, resource_size_t size)
734 {
735 	struct resource *res = &nspm->nsio.res;
736 
737 	res->start = nd_region->ndr_start;
738 	res->end = nd_region->ndr_start + size - 1;
739 }
740 
741 static ssize_t __size_store(struct device *dev, unsigned long long val)
742 {
743 	resource_size_t allocated = 0, available = 0;
744 	struct nd_region *nd_region = to_nd_region(dev->parent);
745 	struct nd_mapping *nd_mapping;
746 	struct nvdimm_drvdata *ndd;
747 	struct nd_label_id label_id;
748 	u32 flags = 0, remainder;
749 	u8 *uuid = NULL;
750 	int rc, i;
751 
752 	if (dev->driver || to_ndns(dev)->claim)
753 		return -EBUSY;
754 
755 	if (is_namespace_pmem(dev)) {
756 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
757 
758 		uuid = nspm->uuid;
759 	} else if (is_namespace_blk(dev)) {
760 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
761 
762 		uuid = nsblk->uuid;
763 		flags = NSLABEL_FLAG_LOCAL;
764 	}
765 
766 	/*
767 	 * We need a uuid for the allocation-label and dimm(s) on which
768 	 * to store the label.
769 	 */
770 	if (!uuid || nd_region->ndr_mappings == 0)
771 		return -ENXIO;
772 
773 	div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
774 	if (remainder) {
775 		dev_dbg(dev, "%llu is not %dK aligned\n", val,
776 				(SZ_4K * nd_region->ndr_mappings) / SZ_1K);
777 		return -EINVAL;
778 	}
779 
780 	nd_label_gen_id(&label_id, uuid, flags);
781 	for (i = 0; i < nd_region->ndr_mappings; i++) {
782 		nd_mapping = &nd_region->mapping[i];
783 		ndd = to_ndd(nd_mapping);
784 
785 		/*
786 		 * All dimms in an interleave set, or the base dimm for a blk
787 		 * region, need to be enabled for the size to be changed.
788 		 */
789 		if (!ndd)
790 			return -ENXIO;
791 
792 		allocated += nvdimm_allocated_dpa(ndd, &label_id);
793 	}
794 	available = nd_region_available_dpa(nd_region);
795 
796 	if (val > available + allocated)
797 		return -ENOSPC;
798 
799 	if (val == allocated)
800 		return 0;
801 
802 	val = div_u64(val, nd_region->ndr_mappings);
803 	allocated = div_u64(allocated, nd_region->ndr_mappings);
804 	if (val < allocated)
805 		rc = shrink_dpa_allocation(nd_region, &label_id,
806 				allocated - val);
807 	else
808 		rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
809 
810 	if (rc)
811 		return rc;
812 
813 	if (is_namespace_pmem(dev)) {
814 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
815 
816 		nd_namespace_pmem_set_size(nd_region, nspm,
817 				val * nd_region->ndr_mappings);
818 	} else if (is_namespace_blk(dev)) {
819 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
820 
821 		/*
822 		 * Try to delete the namespace if we deleted all of its
823 		 * allocation, this is not the seed device for the
824 		 * region, and it is not actively claimed by a btt
825 		 * instance.
826 		 */
827 		if (val == 0 && nd_region->ns_seed != dev
828 				&& !nsblk->common.claim)
829 			nd_device_unregister(dev, ND_ASYNC);
830 	}
831 
832 	return rc;
833 }
834 
835 static ssize_t size_store(struct device *dev,
836 		struct device_attribute *attr, const char *buf, size_t len)
837 {
838 	struct nd_region *nd_region = to_nd_region(dev->parent);
839 	unsigned long long val;
840 	u8 **uuid = NULL;
841 	int rc;
842 
843 	rc = kstrtoull(buf, 0, &val);
844 	if (rc)
845 		return rc;
846 
847 	device_lock(dev);
848 	nvdimm_bus_lock(dev);
849 	wait_nvdimm_bus_probe_idle(dev);
850 	rc = __size_store(dev, val);
851 	if (rc >= 0)
852 		rc = nd_namespace_label_update(nd_region, dev);
853 
854 	if (is_namespace_pmem(dev)) {
855 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
856 
857 		uuid = &nspm->uuid;
858 	} else if (is_namespace_blk(dev)) {
859 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
860 
861 		uuid = &nsblk->uuid;
862 	}
863 
864 	if (rc == 0 && val == 0 && uuid) {
865 		/* setting size zero == 'delete namespace' */
866 		kfree(*uuid);
867 		*uuid = NULL;
868 	}
869 
870 	dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
871 			? "fail" : "success", rc);
872 
873 	nvdimm_bus_unlock(dev);
874 	device_unlock(dev);
875 
876 	return rc < 0 ? rc : len;
877 }
878 
879 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
880 {
881 	struct device *dev = &ndns->dev;
882 
883 	if (is_namespace_pmem(dev)) {
884 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
885 
886 		return resource_size(&nspm->nsio.res);
887 	} else if (is_namespace_blk(dev)) {
888 		return nd_namespace_blk_size(to_nd_namespace_blk(dev));
889 	} else if (is_namespace_io(dev)) {
890 		struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
891 
892 		return resource_size(&nsio->res);
893 	} else
894 		WARN_ONCE(1, "unknown namespace type\n");
895 	return 0;
896 }
897 
898 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
899 {
900 	resource_size_t size;
901 
902 	nvdimm_bus_lock(&ndns->dev);
903 	size = __nvdimm_namespace_capacity(ndns);
904 	nvdimm_bus_unlock(&ndns->dev);
905 
906 	return size;
907 }
908 EXPORT_SYMBOL(nvdimm_namespace_capacity);
909 
910 static ssize_t size_show(struct device *dev,
911 		struct device_attribute *attr, char *buf)
912 {
913 	return sprintf(buf, "%llu\n", (unsigned long long)
914 			nvdimm_namespace_capacity(to_ndns(dev)));
915 }
916 static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
917 
918 static ssize_t uuid_show(struct device *dev,
919 		struct device_attribute *attr, char *buf)
920 {
921 	u8 *uuid;
922 
923 	if (is_namespace_pmem(dev)) {
924 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
925 
926 		uuid = nspm->uuid;
927 	} else if (is_namespace_blk(dev)) {
928 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
929 
930 		uuid = nsblk->uuid;
931 	} else
932 		return -ENXIO;
933 
934 	if (uuid)
935 		return sprintf(buf, "%pUb\n", uuid);
936 	return sprintf(buf, "\n");
937 }
938 
939 /**
940  * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
941  * @nd_region: parent region so we can updates all dimms in the set
942  * @dev: namespace type for generating label_id
943  * @new_uuid: incoming uuid
944  * @old_uuid: reference to the uuid storage location in the namespace object
945  */
946 static int namespace_update_uuid(struct nd_region *nd_region,
947 		struct device *dev, u8 *new_uuid, u8 **old_uuid)
948 {
949 	u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
950 	struct nd_label_id old_label_id;
951 	struct nd_label_id new_label_id;
952 	int i;
953 
954 	if (!nd_is_uuid_unique(dev, new_uuid))
955 		return -EINVAL;
956 
957 	if (*old_uuid == NULL)
958 		goto out;
959 
960 	/*
961 	 * If we've already written a label with this uuid, then it's
962 	 * too late to rename because we can't reliably update the uuid
963 	 * without losing the old namespace.  Userspace must delete this
964 	 * namespace to abandon the old uuid.
965 	 */
966 	for (i = 0; i < nd_region->ndr_mappings; i++) {
967 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
968 
969 		/*
970 		 * This check by itself is sufficient because old_uuid
971 		 * would be NULL above if this uuid did not exist in the
972 		 * currently written set.
973 		 *
974 		 * FIXME: can we delete uuid with zero dpa allocated?
975 		 */
976 		if (nd_mapping->labels)
977 			return -EBUSY;
978 	}
979 
980 	nd_label_gen_id(&old_label_id, *old_uuid, flags);
981 	nd_label_gen_id(&new_label_id, new_uuid, flags);
982 	for (i = 0; i < nd_region->ndr_mappings; i++) {
983 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
984 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
985 		struct resource *res;
986 
987 		for_each_dpa_resource(ndd, res)
988 			if (strcmp(res->name, old_label_id.id) == 0)
989 				sprintf((void *) res->name, "%s",
990 						new_label_id.id);
991 	}
992 	kfree(*old_uuid);
993  out:
994 	*old_uuid = new_uuid;
995 	return 0;
996 }
997 
998 static ssize_t uuid_store(struct device *dev,
999 		struct device_attribute *attr, const char *buf, size_t len)
1000 {
1001 	struct nd_region *nd_region = to_nd_region(dev->parent);
1002 	u8 *uuid = NULL;
1003 	ssize_t rc = 0;
1004 	u8 **ns_uuid;
1005 
1006 	if (is_namespace_pmem(dev)) {
1007 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1008 
1009 		ns_uuid = &nspm->uuid;
1010 	} else if (is_namespace_blk(dev)) {
1011 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1012 
1013 		ns_uuid = &nsblk->uuid;
1014 	} else
1015 		return -ENXIO;
1016 
1017 	device_lock(dev);
1018 	nvdimm_bus_lock(dev);
1019 	wait_nvdimm_bus_probe_idle(dev);
1020 	if (to_ndns(dev)->claim)
1021 		rc = -EBUSY;
1022 	if (rc >= 0)
1023 		rc = nd_uuid_store(dev, &uuid, buf, len);
1024 	if (rc >= 0)
1025 		rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1026 	if (rc >= 0)
1027 		rc = nd_namespace_label_update(nd_region, dev);
1028 	else
1029 		kfree(uuid);
1030 	dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1031 			rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1032 	nvdimm_bus_unlock(dev);
1033 	device_unlock(dev);
1034 
1035 	return rc < 0 ? rc : len;
1036 }
1037 static DEVICE_ATTR_RW(uuid);
1038 
1039 static ssize_t resource_show(struct device *dev,
1040 		struct device_attribute *attr, char *buf)
1041 {
1042 	struct resource *res;
1043 
1044 	if (is_namespace_pmem(dev)) {
1045 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1046 
1047 		res = &nspm->nsio.res;
1048 	} else if (is_namespace_io(dev)) {
1049 		struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1050 
1051 		res = &nsio->res;
1052 	} else
1053 		return -ENXIO;
1054 
1055 	/* no address to convey if the namespace has no allocation */
1056 	if (resource_size(res) == 0)
1057 		return -ENXIO;
1058 	return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1059 }
1060 static DEVICE_ATTR_RO(resource);
1061 
1062 static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1063 	4096, 4104, 4160, 4224, 0 };
1064 
1065 static ssize_t sector_size_show(struct device *dev,
1066 		struct device_attribute *attr, char *buf)
1067 {
1068 	struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1069 
1070 	if (!is_namespace_blk(dev))
1071 		return -ENXIO;
1072 
1073 	return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1074 }
1075 
1076 static ssize_t sector_size_store(struct device *dev,
1077 		struct device_attribute *attr, const char *buf, size_t len)
1078 {
1079 	struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1080 	struct nd_region *nd_region = to_nd_region(dev->parent);
1081 	ssize_t rc = 0;
1082 
1083 	if (!is_namespace_blk(dev))
1084 		return -ENXIO;
1085 
1086 	device_lock(dev);
1087 	nvdimm_bus_lock(dev);
1088 	if (to_ndns(dev)->claim)
1089 		rc = -EBUSY;
1090 	if (rc >= 0)
1091 		rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1092 				ns_lbasize_supported);
1093 	if (rc >= 0)
1094 		rc = nd_namespace_label_update(nd_region, dev);
1095 	dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1096 			rc, rc < 0 ? "tried" : "wrote", buf,
1097 			buf[len - 1] == '\n' ? "" : "\n");
1098 	nvdimm_bus_unlock(dev);
1099 	device_unlock(dev);
1100 
1101 	return rc ? rc : len;
1102 }
1103 static DEVICE_ATTR_RW(sector_size);
1104 
1105 static ssize_t dpa_extents_show(struct device *dev,
1106 		struct device_attribute *attr, char *buf)
1107 {
1108 	struct nd_region *nd_region = to_nd_region(dev->parent);
1109 	struct nd_label_id label_id;
1110 	int count = 0, i;
1111 	u8 *uuid = NULL;
1112 	u32 flags = 0;
1113 
1114 	nvdimm_bus_lock(dev);
1115 	if (is_namespace_pmem(dev)) {
1116 		struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1117 
1118 		uuid = nspm->uuid;
1119 		flags = 0;
1120 	} else if (is_namespace_blk(dev)) {
1121 		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1122 
1123 		uuid = nsblk->uuid;
1124 		flags = NSLABEL_FLAG_LOCAL;
1125 	}
1126 
1127 	if (!uuid)
1128 		goto out;
1129 
1130 	nd_label_gen_id(&label_id, uuid, flags);
1131 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1132 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1133 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1134 		struct resource *res;
1135 
1136 		for_each_dpa_resource(ndd, res)
1137 			if (strcmp(res->name, label_id.id) == 0)
1138 				count++;
1139 	}
1140  out:
1141 	nvdimm_bus_unlock(dev);
1142 
1143 	return sprintf(buf, "%d\n", count);
1144 }
1145 static DEVICE_ATTR_RO(dpa_extents);
1146 
1147 static ssize_t holder_show(struct device *dev,
1148 		struct device_attribute *attr, char *buf)
1149 {
1150 	struct nd_namespace_common *ndns = to_ndns(dev);
1151 	ssize_t rc;
1152 
1153 	device_lock(dev);
1154 	rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1155 	device_unlock(dev);
1156 
1157 	return rc;
1158 }
1159 static DEVICE_ATTR_RO(holder);
1160 
1161 static ssize_t force_raw_store(struct device *dev,
1162 		struct device_attribute *attr, const char *buf, size_t len)
1163 {
1164 	bool force_raw;
1165 	int rc = strtobool(buf, &force_raw);
1166 
1167 	if (rc)
1168 		return rc;
1169 
1170 	to_ndns(dev)->force_raw = force_raw;
1171 	return len;
1172 }
1173 
1174 static ssize_t force_raw_show(struct device *dev,
1175 		struct device_attribute *attr, char *buf)
1176 {
1177 	return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1178 }
1179 static DEVICE_ATTR_RW(force_raw);
1180 
1181 static struct attribute *nd_namespace_attributes[] = {
1182 	&dev_attr_nstype.attr,
1183 	&dev_attr_size.attr,
1184 	&dev_attr_uuid.attr,
1185 	&dev_attr_holder.attr,
1186 	&dev_attr_resource.attr,
1187 	&dev_attr_alt_name.attr,
1188 	&dev_attr_force_raw.attr,
1189 	&dev_attr_sector_size.attr,
1190 	&dev_attr_dpa_extents.attr,
1191 	NULL,
1192 };
1193 
1194 static umode_t namespace_visible(struct kobject *kobj,
1195 		struct attribute *a, int n)
1196 {
1197 	struct device *dev = container_of(kobj, struct device, kobj);
1198 
1199 	if (a == &dev_attr_resource.attr) {
1200 		if (is_namespace_blk(dev))
1201 			return 0;
1202 		return a->mode;
1203 	}
1204 
1205 	if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1206 		if (a == &dev_attr_size.attr)
1207 			return S_IWUSR | S_IRUGO;
1208 
1209 		if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1210 			return 0;
1211 
1212 		return a->mode;
1213 	}
1214 
1215 	if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1216 			|| a == &dev_attr_holder.attr
1217 			|| a == &dev_attr_force_raw.attr)
1218 		return a->mode;
1219 
1220 	return 0;
1221 }
1222 
1223 static struct attribute_group nd_namespace_attribute_group = {
1224 	.attrs = nd_namespace_attributes,
1225 	.is_visible = namespace_visible,
1226 };
1227 
1228 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1229 	&nd_device_attribute_group,
1230 	&nd_namespace_attribute_group,
1231 	&nd_numa_attribute_group,
1232 	NULL,
1233 };
1234 
1235 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1236 {
1237 	struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1238 	struct nd_namespace_common *ndns;
1239 	resource_size_t size;
1240 
1241 	if (nd_btt) {
1242 		ndns = nd_btt->ndns;
1243 		if (!ndns)
1244 			return ERR_PTR(-ENODEV);
1245 
1246 		/*
1247 		 * Flush any in-progess probes / removals in the driver
1248 		 * for the raw personality of this namespace.
1249 		 */
1250 		device_lock(&ndns->dev);
1251 		device_unlock(&ndns->dev);
1252 		if (ndns->dev.driver) {
1253 			dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1254 					dev_name(&nd_btt->dev));
1255 			return ERR_PTR(-EBUSY);
1256 		}
1257 		if (dev_WARN_ONCE(&ndns->dev, ndns->claim != &nd_btt->dev,
1258 					"host (%s) vs claim (%s) mismatch\n",
1259 					dev_name(&nd_btt->dev),
1260 					dev_name(ndns->claim)))
1261 			return ERR_PTR(-ENXIO);
1262 	} else {
1263 		ndns = to_ndns(dev);
1264 		if (ndns->claim) {
1265 			dev_dbg(dev, "claimed by %s, failing probe\n",
1266 				dev_name(ndns->claim));
1267 
1268 			return ERR_PTR(-ENXIO);
1269 		}
1270 	}
1271 
1272 	size = nvdimm_namespace_capacity(ndns);
1273 	if (size < ND_MIN_NAMESPACE_SIZE) {
1274 		dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1275 				&size, ND_MIN_NAMESPACE_SIZE);
1276 		return ERR_PTR(-ENODEV);
1277 	}
1278 
1279 	if (is_namespace_pmem(&ndns->dev)) {
1280 		struct nd_namespace_pmem *nspm;
1281 
1282 		nspm = to_nd_namespace_pmem(&ndns->dev);
1283 		if (!nspm->uuid) {
1284 			dev_dbg(&ndns->dev, "%s: uuid not set\n", __func__);
1285 			return ERR_PTR(-ENODEV);
1286 		}
1287 	} else if (is_namespace_blk(&ndns->dev)) {
1288 		struct nd_namespace_blk *nsblk;
1289 
1290 		nsblk = to_nd_namespace_blk(&ndns->dev);
1291 		if (!nd_namespace_blk_validate(nsblk))
1292 			return ERR_PTR(-ENODEV);
1293 	}
1294 
1295 	return ndns;
1296 }
1297 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1298 
1299 static struct device **create_namespace_io(struct nd_region *nd_region)
1300 {
1301 	struct nd_namespace_io *nsio;
1302 	struct device *dev, **devs;
1303 	struct resource *res;
1304 
1305 	nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1306 	if (!nsio)
1307 		return NULL;
1308 
1309 	devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1310 	if (!devs) {
1311 		kfree(nsio);
1312 		return NULL;
1313 	}
1314 
1315 	dev = &nsio->common.dev;
1316 	dev->type = &namespace_io_device_type;
1317 	dev->parent = &nd_region->dev;
1318 	res = &nsio->res;
1319 	res->name = dev_name(&nd_region->dev);
1320 	res->flags = IORESOURCE_MEM;
1321 	res->start = nd_region->ndr_start;
1322 	res->end = res->start + nd_region->ndr_size - 1;
1323 
1324 	devs[0] = dev;
1325 	return devs;
1326 }
1327 
1328 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1329 		u64 cookie, u16 pos)
1330 {
1331 	struct nd_namespace_label *found = NULL;
1332 	int i;
1333 
1334 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1335 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1336 		struct nd_namespace_label *nd_label;
1337 		bool found_uuid = false;
1338 		int l;
1339 
1340 		for_each_label(l, nd_label, nd_mapping->labels) {
1341 			u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1342 			u16 position = __le16_to_cpu(nd_label->position);
1343 			u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1344 
1345 			if (isetcookie != cookie)
1346 				continue;
1347 
1348 			if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1349 				continue;
1350 
1351 			if (found_uuid) {
1352 				dev_dbg(to_ndd(nd_mapping)->dev,
1353 						"%s duplicate entry for uuid\n",
1354 						__func__);
1355 				return false;
1356 			}
1357 			found_uuid = true;
1358 			if (nlabel != nd_region->ndr_mappings)
1359 				continue;
1360 			if (position != pos)
1361 				continue;
1362 			found = nd_label;
1363 			break;
1364 		}
1365 		if (found)
1366 			break;
1367 	}
1368 	return found != NULL;
1369 }
1370 
1371 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1372 {
1373 	struct nd_namespace_label *select = NULL;
1374 	int i;
1375 
1376 	if (!pmem_id)
1377 		return -ENODEV;
1378 
1379 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1380 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1381 		struct nd_namespace_label *nd_label;
1382 		u64 hw_start, hw_end, pmem_start, pmem_end;
1383 		int l;
1384 
1385 		for_each_label(l, nd_label, nd_mapping->labels)
1386 			if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1387 				break;
1388 
1389 		if (!nd_label) {
1390 			WARN_ON(1);
1391 			return -EINVAL;
1392 		}
1393 
1394 		select = nd_label;
1395 		/*
1396 		 * Check that this label is compliant with the dpa
1397 		 * range published in NFIT
1398 		 */
1399 		hw_start = nd_mapping->start;
1400 		hw_end = hw_start + nd_mapping->size;
1401 		pmem_start = __le64_to_cpu(select->dpa);
1402 		pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1403 		if (pmem_start == hw_start && pmem_end <= hw_end)
1404 			/* pass */;
1405 		else
1406 			return -EINVAL;
1407 
1408 		nd_mapping->labels[0] = select;
1409 		nd_mapping->labels[1] = NULL;
1410 	}
1411 	return 0;
1412 }
1413 
1414 /**
1415  * find_pmem_label_set - validate interleave set labelling, retrieve label0
1416  * @nd_region: region with mappings to validate
1417  */
1418 static int find_pmem_label_set(struct nd_region *nd_region,
1419 		struct nd_namespace_pmem *nspm)
1420 {
1421 	u64 cookie = nd_region_interleave_set_cookie(nd_region);
1422 	struct nd_namespace_label *nd_label;
1423 	u8 select_id[NSLABEL_UUID_LEN];
1424 	resource_size_t size = 0;
1425 	u8 *pmem_id = NULL;
1426 	int rc = -ENODEV, l;
1427 	u16 i;
1428 
1429 	if (cookie == 0)
1430 		return -ENXIO;
1431 
1432 	/*
1433 	 * Find a complete set of labels by uuid.  By definition we can start
1434 	 * with any mapping as the reference label
1435 	 */
1436 	for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1437 		u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1438 
1439 		if (isetcookie != cookie)
1440 			continue;
1441 
1442 		for (i = 0; nd_region->ndr_mappings; i++)
1443 			if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1444 						cookie, i))
1445 				break;
1446 		if (i < nd_region->ndr_mappings) {
1447 			/*
1448 			 * Give up if we don't find an instance of a
1449 			 * uuid at each position (from 0 to
1450 			 * nd_region->ndr_mappings - 1), or if we find a
1451 			 * dimm with two instances of the same uuid.
1452 			 */
1453 			rc = -EINVAL;
1454 			goto err;
1455 		} else if (pmem_id) {
1456 			/*
1457 			 * If there is more than one valid uuid set, we
1458 			 * need userspace to clean this up.
1459 			 */
1460 			rc = -EBUSY;
1461 			goto err;
1462 		}
1463 		memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1464 		pmem_id = select_id;
1465 	}
1466 
1467 	/*
1468 	 * Fix up each mapping's 'labels' to have the validated pmem label for
1469 	 * that position at labels[0], and NULL at labels[1].  In the process,
1470 	 * check that the namespace aligns with interleave-set.  We know
1471 	 * that it does not overlap with any blk namespaces by virtue of
1472 	 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1473 	 * succeeded).
1474 	 */
1475 	rc = select_pmem_id(nd_region, pmem_id);
1476 	if (rc)
1477 		goto err;
1478 
1479 	/* Calculate total size and populate namespace properties from label0 */
1480 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1481 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1482 		struct nd_namespace_label *label0 = nd_mapping->labels[0];
1483 
1484 		size += __le64_to_cpu(label0->rawsize);
1485 		if (__le16_to_cpu(label0->position) != 0)
1486 			continue;
1487 		WARN_ON(nspm->alt_name || nspm->uuid);
1488 		nspm->alt_name = kmemdup((void __force *) label0->name,
1489 				NSLABEL_NAME_LEN, GFP_KERNEL);
1490 		nspm->uuid = kmemdup((void __force *) label0->uuid,
1491 				NSLABEL_UUID_LEN, GFP_KERNEL);
1492 	}
1493 
1494 	if (!nspm->alt_name || !nspm->uuid) {
1495 		rc = -ENOMEM;
1496 		goto err;
1497 	}
1498 
1499 	nd_namespace_pmem_set_size(nd_region, nspm, size);
1500 
1501 	return 0;
1502  err:
1503 	switch (rc) {
1504 	case -EINVAL:
1505 		dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1506 		break;
1507 	case -ENODEV:
1508 		dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1509 		break;
1510 	default:
1511 		dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1512 				__func__, rc);
1513 		break;
1514 	}
1515 	return rc;
1516 }
1517 
1518 static struct device **create_namespace_pmem(struct nd_region *nd_region)
1519 {
1520 	struct nd_namespace_pmem *nspm;
1521 	struct device *dev, **devs;
1522 	struct resource *res;
1523 	int rc;
1524 
1525 	nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1526 	if (!nspm)
1527 		return NULL;
1528 
1529 	dev = &nspm->nsio.common.dev;
1530 	dev->type = &namespace_pmem_device_type;
1531 	dev->parent = &nd_region->dev;
1532 	res = &nspm->nsio.res;
1533 	res->name = dev_name(&nd_region->dev);
1534 	res->flags = IORESOURCE_MEM;
1535 	rc = find_pmem_label_set(nd_region, nspm);
1536 	if (rc == -ENODEV) {
1537 		int i;
1538 
1539 		/* Pass, try to permit namespace creation... */
1540 		for (i = 0; i < nd_region->ndr_mappings; i++) {
1541 			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1542 
1543 			kfree(nd_mapping->labels);
1544 			nd_mapping->labels = NULL;
1545 		}
1546 
1547 		/* Publish a zero-sized namespace for userspace to configure. */
1548 		nd_namespace_pmem_set_size(nd_region, nspm, 0);
1549 
1550 		rc = 0;
1551 	} else if (rc)
1552 		goto err;
1553 
1554 	devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1555 	if (!devs)
1556 		goto err;
1557 
1558 	devs[0] = dev;
1559 	return devs;
1560 
1561  err:
1562 	namespace_pmem_release(&nspm->nsio.common.dev);
1563 	return NULL;
1564 }
1565 
1566 struct resource *nsblk_add_resource(struct nd_region *nd_region,
1567 		struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1568 		resource_size_t start)
1569 {
1570 	struct nd_label_id label_id;
1571 	struct resource *res;
1572 
1573 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1574 	res = krealloc(nsblk->res,
1575 			sizeof(void *) * (nsblk->num_resources + 1),
1576 			GFP_KERNEL);
1577 	if (!res)
1578 		return NULL;
1579 	nsblk->res = (struct resource **) res;
1580 	for_each_dpa_resource(ndd, res)
1581 		if (strcmp(res->name, label_id.id) == 0
1582 				&& res->start == start) {
1583 			nsblk->res[nsblk->num_resources++] = res;
1584 			return res;
1585 		}
1586 	return NULL;
1587 }
1588 
1589 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1590 {
1591 	struct nd_namespace_blk *nsblk;
1592 	struct device *dev;
1593 
1594 	if (!is_nd_blk(&nd_region->dev))
1595 		return NULL;
1596 
1597 	nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1598 	if (!nsblk)
1599 		return NULL;
1600 
1601 	dev = &nsblk->common.dev;
1602 	dev->type = &namespace_blk_device_type;
1603 	nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1604 	if (nsblk->id < 0) {
1605 		kfree(nsblk);
1606 		return NULL;
1607 	}
1608 	dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1609 	dev->parent = &nd_region->dev;
1610 	dev->groups = nd_namespace_attribute_groups;
1611 
1612 	return &nsblk->common.dev;
1613 }
1614 
1615 void nd_region_create_blk_seed(struct nd_region *nd_region)
1616 {
1617 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1618 	nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1619 	/*
1620 	 * Seed creation failures are not fatal, provisioning is simply
1621 	 * disabled until memory becomes available
1622 	 */
1623 	if (!nd_region->ns_seed)
1624 		dev_err(&nd_region->dev, "failed to create blk namespace\n");
1625 	else
1626 		nd_device_register(nd_region->ns_seed);
1627 }
1628 
1629 void nd_region_create_btt_seed(struct nd_region *nd_region)
1630 {
1631 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1632 	nd_region->btt_seed = nd_btt_create(nd_region);
1633 	/*
1634 	 * Seed creation failures are not fatal, provisioning is simply
1635 	 * disabled until memory becomes available
1636 	 */
1637 	if (!nd_region->btt_seed)
1638 		dev_err(&nd_region->dev, "failed to create btt namespace\n");
1639 }
1640 
1641 static struct device **create_namespace_blk(struct nd_region *nd_region)
1642 {
1643 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1644 	struct nd_namespace_label *nd_label;
1645 	struct device *dev, **devs = NULL;
1646 	struct nd_namespace_blk *nsblk;
1647 	struct nvdimm_drvdata *ndd;
1648 	int i, l, count = 0;
1649 	struct resource *res;
1650 
1651 	if (nd_region->ndr_mappings == 0)
1652 		return NULL;
1653 
1654 	ndd = to_ndd(nd_mapping);
1655 	for_each_label(l, nd_label, nd_mapping->labels) {
1656 		u32 flags = __le32_to_cpu(nd_label->flags);
1657 		char *name[NSLABEL_NAME_LEN];
1658 		struct device **__devs;
1659 
1660 		if (flags & NSLABEL_FLAG_LOCAL)
1661 			/* pass */;
1662 		else
1663 			continue;
1664 
1665 		for (i = 0; i < count; i++) {
1666 			nsblk = to_nd_namespace_blk(devs[i]);
1667 			if (memcmp(nsblk->uuid, nd_label->uuid,
1668 						NSLABEL_UUID_LEN) == 0) {
1669 				res = nsblk_add_resource(nd_region, ndd, nsblk,
1670 						__le64_to_cpu(nd_label->dpa));
1671 				if (!res)
1672 					goto err;
1673 				nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1674 					dev_name(&nsblk->common.dev));
1675 				break;
1676 			}
1677 		}
1678 		if (i < count)
1679 			continue;
1680 		__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1681 		if (!__devs)
1682 			goto err;
1683 		memcpy(__devs, devs, sizeof(dev) * count);
1684 		kfree(devs);
1685 		devs = __devs;
1686 
1687 		nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1688 		if (!nsblk)
1689 			goto err;
1690 		dev = &nsblk->common.dev;
1691 		dev->type = &namespace_blk_device_type;
1692 		dev->parent = &nd_region->dev;
1693 		dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1694 		devs[count++] = dev;
1695 		nsblk->id = -1;
1696 		nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1697 		nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1698 				GFP_KERNEL);
1699 		if (!nsblk->uuid)
1700 			goto err;
1701 		memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1702 		if (name[0])
1703 			nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1704 					GFP_KERNEL);
1705 		res = nsblk_add_resource(nd_region, ndd, nsblk,
1706 				__le64_to_cpu(nd_label->dpa));
1707 		if (!res)
1708 			goto err;
1709 		nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1710 				dev_name(&nsblk->common.dev));
1711 	}
1712 
1713 	dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1714 			__func__, count, count == 1 ? "" : "s");
1715 
1716 	if (count == 0) {
1717 		/* Publish a zero-sized namespace for userspace to configure. */
1718 		for (i = 0; i < nd_region->ndr_mappings; i++) {
1719 			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1720 
1721 			kfree(nd_mapping->labels);
1722 			nd_mapping->labels = NULL;
1723 		}
1724 
1725 		devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1726 		if (!devs)
1727 			goto err;
1728 		nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1729 		if (!nsblk)
1730 			goto err;
1731 		dev = &nsblk->common.dev;
1732 		dev->type = &namespace_blk_device_type;
1733 		dev->parent = &nd_region->dev;
1734 		devs[count++] = dev;
1735 	}
1736 
1737 	return devs;
1738 
1739 err:
1740 	for (i = 0; i < count; i++) {
1741 		nsblk = to_nd_namespace_blk(devs[i]);
1742 		namespace_blk_release(&nsblk->common.dev);
1743 	}
1744 	kfree(devs);
1745 	return NULL;
1746 }
1747 
1748 static int init_active_labels(struct nd_region *nd_region)
1749 {
1750 	int i;
1751 
1752 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1753 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1754 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1755 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
1756 		int count, j;
1757 
1758 		/*
1759 		 * If the dimm is disabled then prevent the region from
1760 		 * being activated if it aliases DPA.
1761 		 */
1762 		if (!ndd) {
1763 			if ((nvdimm->flags & NDD_ALIASING) == 0)
1764 				return 0;
1765 			dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1766 					dev_name(&nd_mapping->nvdimm->dev));
1767 			return -ENXIO;
1768 		}
1769 		nd_mapping->ndd = ndd;
1770 		atomic_inc(&nvdimm->busy);
1771 		get_ndd(ndd);
1772 
1773 		count = nd_label_active_count(ndd);
1774 		dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1775 		if (!count)
1776 			continue;
1777 		nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1778 				GFP_KERNEL);
1779 		if (!nd_mapping->labels)
1780 			return -ENOMEM;
1781 		for (j = 0; j < count; j++) {
1782 			struct nd_namespace_label *label;
1783 
1784 			label = nd_label_active(ndd, j);
1785 			nd_mapping->labels[j] = label;
1786 		}
1787 	}
1788 
1789 	return 0;
1790 }
1791 
1792 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1793 {
1794 	struct device **devs = NULL;
1795 	int i, rc = 0, type;
1796 
1797 	*err = 0;
1798 	nvdimm_bus_lock(&nd_region->dev);
1799 	rc = init_active_labels(nd_region);
1800 	if (rc) {
1801 		nvdimm_bus_unlock(&nd_region->dev);
1802 		return rc;
1803 	}
1804 
1805 	type = nd_region_to_nstype(nd_region);
1806 	switch (type) {
1807 	case ND_DEVICE_NAMESPACE_IO:
1808 		devs = create_namespace_io(nd_region);
1809 		break;
1810 	case ND_DEVICE_NAMESPACE_PMEM:
1811 		devs = create_namespace_pmem(nd_region);
1812 		break;
1813 	case ND_DEVICE_NAMESPACE_BLK:
1814 		devs = create_namespace_blk(nd_region);
1815 		break;
1816 	default:
1817 		break;
1818 	}
1819 	nvdimm_bus_unlock(&nd_region->dev);
1820 
1821 	if (!devs)
1822 		return -ENODEV;
1823 
1824 	for (i = 0; devs[i]; i++) {
1825 		struct device *dev = devs[i];
1826 		int id;
1827 
1828 		if (type == ND_DEVICE_NAMESPACE_BLK) {
1829 			struct nd_namespace_blk *nsblk;
1830 
1831 			nsblk = to_nd_namespace_blk(dev);
1832 			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
1833 					GFP_KERNEL);
1834 			nsblk->id = id;
1835 		} else
1836 			id = i;
1837 
1838 		if (id < 0)
1839 			break;
1840 		dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
1841 		dev->groups = nd_namespace_attribute_groups;
1842 		nd_device_register(dev);
1843 	}
1844 	if (i)
1845 		nd_region->ns_seed = devs[0];
1846 
1847 	if (devs[i]) {
1848 		int j;
1849 
1850 		for (j = i; devs[j]; j++) {
1851 			struct device *dev = devs[j];
1852 
1853 			device_initialize(dev);
1854 			put_device(dev);
1855 		}
1856 		*err = j - i;
1857 		/*
1858 		 * All of the namespaces we tried to register failed, so
1859 		 * fail region activation.
1860 		 */
1861 		if (*err == 0)
1862 			rc = -ENODEV;
1863 	}
1864 	kfree(devs);
1865 
1866 	if (rc == -ENODEV)
1867 		return rc;
1868 
1869 	return i;
1870 }
1871