xref: /linux/drivers/nvdimm/label.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/slab.h>
16 #include <linux/io.h>
17 #include <linux/nd.h>
18 #include "nd-core.h"
19 #include "label.h"
20 #include "nd.h"
21 
22 static u32 best_seq(u32 a, u32 b)
23 {
24 	a &= NSINDEX_SEQ_MASK;
25 	b &= NSINDEX_SEQ_MASK;
26 
27 	if (a == 0 || a == b)
28 		return b;
29 	else if (b == 0)
30 		return a;
31 	else if (nd_inc_seq(a) == b)
32 		return b;
33 	else
34 		return a;
35 }
36 
37 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
38 {
39 	u32 index_span;
40 
41 	if (ndd->nsindex_size)
42 		return ndd->nsindex_size;
43 
44 	/*
45 	 * The minimum index space is 512 bytes, with that amount of
46 	 * index we can describe ~1400 labels which is less than a byte
47 	 * of overhead per label.  Round up to a byte of overhead per
48 	 * label and determine the size of the index region.  Yes, this
49 	 * starts to waste space at larger config_sizes, but it's
50 	 * unlikely we'll ever see anything but 128K.
51 	 */
52 	index_span = ndd->nsarea.config_size / 129;
53 	index_span /= NSINDEX_ALIGN * 2;
54 	ndd->nsindex_size = index_span * NSINDEX_ALIGN;
55 
56 	return ndd->nsindex_size;
57 }
58 
59 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
60 {
61 	return ndd->nsarea.config_size / 129;
62 }
63 
64 int nd_label_validate(struct nvdimm_drvdata *ndd)
65 {
66 	/*
67 	 * On media label format consists of two index blocks followed
68 	 * by an array of labels.  None of these structures are ever
69 	 * updated in place.  A sequence number tracks the current
70 	 * active index and the next one to write, while labels are
71 	 * written to free slots.
72 	 *
73 	 *     +------------+
74 	 *     |            |
75 	 *     |  nsindex0  |
76 	 *     |            |
77 	 *     +------------+
78 	 *     |            |
79 	 *     |  nsindex1  |
80 	 *     |            |
81 	 *     +------------+
82 	 *     |   label0   |
83 	 *     +------------+
84 	 *     |   label1   |
85 	 *     +------------+
86 	 *     |            |
87 	 *      ....nslot...
88 	 *     |            |
89 	 *     +------------+
90 	 *     |   labelN   |
91 	 *     +------------+
92 	 */
93 	struct nd_namespace_index *nsindex[] = {
94 		to_namespace_index(ndd, 0),
95 		to_namespace_index(ndd, 1),
96 	};
97 	const int num_index = ARRAY_SIZE(nsindex);
98 	struct device *dev = ndd->dev;
99 	bool valid[2] = { 0 };
100 	int i, num_valid = 0;
101 	u32 seq;
102 
103 	for (i = 0; i < num_index; i++) {
104 		u32 nslot;
105 		u8 sig[NSINDEX_SIG_LEN];
106 		u64 sum_save, sum, size;
107 
108 		memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
109 		if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
110 			dev_dbg(dev, "%s: nsindex%d signature invalid\n",
111 					__func__, i);
112 			continue;
113 		}
114 		sum_save = __le64_to_cpu(nsindex[i]->checksum);
115 		nsindex[i]->checksum = __cpu_to_le64(0);
116 		sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
117 		nsindex[i]->checksum = __cpu_to_le64(sum_save);
118 		if (sum != sum_save) {
119 			dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
120 					__func__, i);
121 			continue;
122 		}
123 
124 		seq = __le32_to_cpu(nsindex[i]->seq);
125 		if ((seq & NSINDEX_SEQ_MASK) == 0) {
126 			dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
127 					__func__, i, seq);
128 			continue;
129 		}
130 
131 		/* sanity check the index against expected values */
132 		if (__le64_to_cpu(nsindex[i]->myoff)
133 				!= i * sizeof_namespace_index(ndd)) {
134 			dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
135 					__func__, i, (unsigned long long)
136 					__le64_to_cpu(nsindex[i]->myoff));
137 			continue;
138 		}
139 		if (__le64_to_cpu(nsindex[i]->otheroff)
140 				!= (!i) * sizeof_namespace_index(ndd)) {
141 			dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
142 					__func__, i, (unsigned long long)
143 					__le64_to_cpu(nsindex[i]->otheroff));
144 			continue;
145 		}
146 
147 		size = __le64_to_cpu(nsindex[i]->mysize);
148 		if (size > sizeof_namespace_index(ndd)
149 				|| size < sizeof(struct nd_namespace_index)) {
150 			dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
151 					__func__, i, size);
152 			continue;
153 		}
154 
155 		nslot = __le32_to_cpu(nsindex[i]->nslot);
156 		if (nslot * sizeof(struct nd_namespace_label)
157 				+ 2 * sizeof_namespace_index(ndd)
158 				> ndd->nsarea.config_size) {
159 			dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
160 					__func__, i, nslot,
161 					ndd->nsarea.config_size);
162 			continue;
163 		}
164 		valid[i] = true;
165 		num_valid++;
166 	}
167 
168 	switch (num_valid) {
169 	case 0:
170 		break;
171 	case 1:
172 		for (i = 0; i < num_index; i++)
173 			if (valid[i])
174 				return i;
175 		/* can't have num_valid > 0 but valid[] = { false, false } */
176 		WARN_ON(1);
177 		break;
178 	default:
179 		/* pick the best index... */
180 		seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
181 				__le32_to_cpu(nsindex[1]->seq));
182 		if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
183 			return 1;
184 		else
185 			return 0;
186 		break;
187 	}
188 
189 	return -1;
190 }
191 
192 void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
193 		struct nd_namespace_index *src)
194 {
195 	if (dst && src)
196 		/* pass */;
197 	else
198 		return;
199 
200 	memcpy(dst, src, sizeof_namespace_index(ndd));
201 }
202 
203 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
204 {
205 	void *base = to_namespace_index(ndd, 0);
206 
207 	return base + 2 * sizeof_namespace_index(ndd);
208 }
209 
210 static int to_slot(struct nvdimm_drvdata *ndd,
211 		struct nd_namespace_label *nd_label)
212 {
213 	return nd_label - nd_label_base(ndd);
214 }
215 
216 #define for_each_clear_bit_le(bit, addr, size) \
217 	for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
218 	     (bit) < (size);                                    \
219 	     (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
220 
221 /**
222  * preamble_index - common variable initialization for nd_label_* routines
223  * @ndd: dimm container for the relevant label set
224  * @idx: namespace_index index
225  * @nsindex_out: on return set to the currently active namespace index
226  * @free: on return set to the free label bitmap in the index
227  * @nslot: on return set to the number of slots in the label space
228  */
229 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
230 		struct nd_namespace_index **nsindex_out,
231 		unsigned long **free, u32 *nslot)
232 {
233 	struct nd_namespace_index *nsindex;
234 
235 	nsindex = to_namespace_index(ndd, idx);
236 	if (nsindex == NULL)
237 		return false;
238 
239 	*free = (unsigned long *) nsindex->free;
240 	*nslot = __le32_to_cpu(nsindex->nslot);
241 	*nsindex_out = nsindex;
242 
243 	return true;
244 }
245 
246 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
247 {
248 	if (!label_id || !uuid)
249 		return NULL;
250 	snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
251 			flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
252 	return label_id->id;
253 }
254 
255 static bool preamble_current(struct nvdimm_drvdata *ndd,
256 		struct nd_namespace_index **nsindex,
257 		unsigned long **free, u32 *nslot)
258 {
259 	return preamble_index(ndd, ndd->ns_current, nsindex,
260 			free, nslot);
261 }
262 
263 static bool preamble_next(struct nvdimm_drvdata *ndd,
264 		struct nd_namespace_index **nsindex,
265 		unsigned long **free, u32 *nslot)
266 {
267 	return preamble_index(ndd, ndd->ns_next, nsindex,
268 			free, nslot);
269 }
270 
271 static bool slot_valid(struct nd_namespace_label *nd_label, u32 slot)
272 {
273 	/* check that we are written where we expect to be written */
274 	if (slot != __le32_to_cpu(nd_label->slot))
275 		return false;
276 
277 	/* check that DPA allocations are page aligned */
278 	if ((__le64_to_cpu(nd_label->dpa)
279 				| __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
280 		return false;
281 
282 	return true;
283 }
284 
285 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
286 {
287 	struct nd_namespace_index *nsindex;
288 	unsigned long *free;
289 	u32 nslot, slot;
290 
291 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
292 		return 0; /* no label, nothing to reserve */
293 
294 	for_each_clear_bit_le(slot, free, nslot) {
295 		struct nd_namespace_label *nd_label;
296 		struct nd_region *nd_region = NULL;
297 		u8 label_uuid[NSLABEL_UUID_LEN];
298 		struct nd_label_id label_id;
299 		struct resource *res;
300 		u32 flags;
301 
302 		nd_label = nd_label_base(ndd) + slot;
303 
304 		if (!slot_valid(nd_label, slot))
305 			continue;
306 
307 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
308 		flags = __le32_to_cpu(nd_label->flags);
309 		nd_label_gen_id(&label_id, label_uuid, flags);
310 		res = nvdimm_allocate_dpa(ndd, &label_id,
311 				__le64_to_cpu(nd_label->dpa),
312 				__le64_to_cpu(nd_label->rawsize));
313 		nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
314 		if (!res)
315 			return -EBUSY;
316 	}
317 
318 	return 0;
319 }
320 
321 int nd_label_active_count(struct nvdimm_drvdata *ndd)
322 {
323 	struct nd_namespace_index *nsindex;
324 	unsigned long *free;
325 	u32 nslot, slot;
326 	int count = 0;
327 
328 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
329 		return 0;
330 
331 	for_each_clear_bit_le(slot, free, nslot) {
332 		struct nd_namespace_label *nd_label;
333 
334 		nd_label = nd_label_base(ndd) + slot;
335 
336 		if (!slot_valid(nd_label, slot)) {
337 			u32 label_slot = __le32_to_cpu(nd_label->slot);
338 			u64 size = __le64_to_cpu(nd_label->rawsize);
339 			u64 dpa = __le64_to_cpu(nd_label->dpa);
340 
341 			dev_dbg(ndd->dev,
342 				"%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
343 					__func__, slot, label_slot, dpa, size);
344 			continue;
345 		}
346 		count++;
347 	}
348 	return count;
349 }
350 
351 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
352 {
353 	struct nd_namespace_index *nsindex;
354 	unsigned long *free;
355 	u32 nslot, slot;
356 
357 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
358 		return NULL;
359 
360 	for_each_clear_bit_le(slot, free, nslot) {
361 		struct nd_namespace_label *nd_label;
362 
363 		nd_label = nd_label_base(ndd) + slot;
364 		if (!slot_valid(nd_label, slot))
365 			continue;
366 
367 		if (n-- == 0)
368 			return nd_label_base(ndd) + slot;
369 	}
370 
371 	return NULL;
372 }
373 
374 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
375 {
376 	struct nd_namespace_index *nsindex;
377 	unsigned long *free;
378 	u32 nslot, slot;
379 
380 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
381 		return UINT_MAX;
382 
383 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
384 
385 	slot = find_next_bit_le(free, nslot, 0);
386 	if (slot == nslot)
387 		return UINT_MAX;
388 
389 	clear_bit_le(slot, free);
390 
391 	return slot;
392 }
393 
394 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
395 {
396 	struct nd_namespace_index *nsindex;
397 	unsigned long *free;
398 	u32 nslot;
399 
400 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
401 		return false;
402 
403 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
404 
405 	if (slot < nslot)
406 		return !test_and_set_bit_le(slot, free);
407 	return false;
408 }
409 
410 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
411 {
412 	struct nd_namespace_index *nsindex;
413 	unsigned long *free;
414 	u32 nslot;
415 
416 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
417 
418 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
419 		return nvdimm_num_label_slots(ndd);
420 
421 	return bitmap_weight(free, nslot);
422 }
423 
424 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
425 		unsigned long flags)
426 {
427 	struct nd_namespace_index *nsindex;
428 	unsigned long offset;
429 	u64 checksum;
430 	u32 nslot;
431 	int rc;
432 
433 	nsindex = to_namespace_index(ndd, index);
434 	if (flags & ND_NSINDEX_INIT)
435 		nslot = nvdimm_num_label_slots(ndd);
436 	else
437 		nslot = __le32_to_cpu(nsindex->nslot);
438 
439 	memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
440 	nsindex->flags = __cpu_to_le32(0);
441 	nsindex->seq = __cpu_to_le32(seq);
442 	offset = (unsigned long) nsindex
443 		- (unsigned long) to_namespace_index(ndd, 0);
444 	nsindex->myoff = __cpu_to_le64(offset);
445 	nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
446 	offset = (unsigned long) to_namespace_index(ndd,
447 			nd_label_next_nsindex(index))
448 		- (unsigned long) to_namespace_index(ndd, 0);
449 	nsindex->otheroff = __cpu_to_le64(offset);
450 	offset = (unsigned long) nd_label_base(ndd)
451 		- (unsigned long) to_namespace_index(ndd, 0);
452 	nsindex->labeloff = __cpu_to_le64(offset);
453 	nsindex->nslot = __cpu_to_le32(nslot);
454 	nsindex->major = __cpu_to_le16(1);
455 	nsindex->minor = __cpu_to_le16(1);
456 	nsindex->checksum = __cpu_to_le64(0);
457 	if (flags & ND_NSINDEX_INIT) {
458 		unsigned long *free = (unsigned long *) nsindex->free;
459 		u32 nfree = ALIGN(nslot, BITS_PER_LONG);
460 		int last_bits, i;
461 
462 		memset(nsindex->free, 0xff, nfree / 8);
463 		for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
464 			clear_bit_le(nslot + i, free);
465 	}
466 	checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
467 	nsindex->checksum = __cpu_to_le64(checksum);
468 	rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
469 			nsindex, sizeof_namespace_index(ndd));
470 	if (rc < 0)
471 		return rc;
472 
473 	if (flags & ND_NSINDEX_INIT)
474 		return 0;
475 
476 	/* copy the index we just wrote to the new 'next' */
477 	WARN_ON(index != ndd->ns_next);
478 	nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
479 	ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
480 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
481 	WARN_ON(ndd->ns_current == ndd->ns_next);
482 
483 	return 0;
484 }
485 
486 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
487 		struct nd_namespace_label *nd_label)
488 {
489 	return (unsigned long) nd_label
490 		- (unsigned long) to_namespace_index(ndd, 0);
491 }
492 
493 static int __pmem_label_update(struct nd_region *nd_region,
494 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
495 		int pos)
496 {
497 	u64 cookie = nd_region_interleave_set_cookie(nd_region), rawsize;
498 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
499 	struct nd_namespace_label *victim_label;
500 	struct nd_namespace_label *nd_label;
501 	struct nd_namespace_index *nsindex;
502 	unsigned long *free;
503 	u32 nslot, slot;
504 	size_t offset;
505 	int rc;
506 
507 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
508 		return -ENXIO;
509 
510 	/* allocate and write the label to the staging (next) index */
511 	slot = nd_label_alloc_slot(ndd);
512 	if (slot == UINT_MAX)
513 		return -ENXIO;
514 	dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
515 
516 	nd_label = nd_label_base(ndd) + slot;
517 	memset(nd_label, 0, sizeof(struct nd_namespace_label));
518 	memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
519 	if (nspm->alt_name)
520 		memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
521 	nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
522 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
523 	nd_label->position = __cpu_to_le16(pos);
524 	nd_label->isetcookie = __cpu_to_le64(cookie);
525 	rawsize = div_u64(resource_size(&nspm->nsio.res),
526 			nd_region->ndr_mappings);
527 	nd_label->rawsize = __cpu_to_le64(rawsize);
528 	nd_label->dpa = __cpu_to_le64(nd_mapping->start);
529 	nd_label->slot = __cpu_to_le32(slot);
530 
531 	/* update label */
532 	offset = nd_label_offset(ndd, nd_label);
533 	rc = nvdimm_set_config_data(ndd, offset, nd_label,
534 			sizeof(struct nd_namespace_label));
535 	if (rc < 0)
536 		return rc;
537 
538 	/* Garbage collect the previous label */
539 	victim_label = nd_mapping->labels[0];
540 	if (victim_label) {
541 		slot = to_slot(ndd, victim_label);
542 		nd_label_free_slot(ndd, slot);
543 		dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
544 	}
545 
546 	/* update index */
547 	rc = nd_label_write_index(ndd, ndd->ns_next,
548 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
549 	if (rc < 0)
550 		return rc;
551 
552 	nd_mapping->labels[0] = nd_label;
553 
554 	return 0;
555 }
556 
557 static void del_label(struct nd_mapping *nd_mapping, int l)
558 {
559 	struct nd_namespace_label *next_label, *nd_label;
560 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
561 	unsigned int slot;
562 	int j;
563 
564 	nd_label = nd_mapping->labels[l];
565 	slot = to_slot(ndd, nd_label);
566 	dev_vdbg(ndd->dev, "%s: clear: %d\n", __func__, slot);
567 
568 	for (j = l; (next_label = nd_mapping->labels[j + 1]); j++)
569 		nd_mapping->labels[j] = next_label;
570 	nd_mapping->labels[j] = NULL;
571 }
572 
573 static bool is_old_resource(struct resource *res, struct resource **list, int n)
574 {
575 	int i;
576 
577 	if (res->flags & DPA_RESOURCE_ADJUSTED)
578 		return false;
579 	for (i = 0; i < n; i++)
580 		if (res == list[i])
581 			return true;
582 	return false;
583 }
584 
585 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
586 		struct nd_namespace_label *nd_label)
587 {
588 	struct resource *res;
589 
590 	for_each_dpa_resource(ndd, res) {
591 		if (res->start != __le64_to_cpu(nd_label->dpa))
592 			continue;
593 		if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
594 			continue;
595 		return res;
596 	}
597 
598 	return NULL;
599 }
600 
601 /*
602  * 1/ Account all the labels that can be freed after this update
603  * 2/ Allocate and write the label to the staging (next) index
604  * 3/ Record the resources in the namespace device
605  */
606 static int __blk_label_update(struct nd_region *nd_region,
607 		struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
608 		int num_labels)
609 {
610 	int i, l, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
611 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
612 	struct nd_namespace_label *nd_label;
613 	struct nd_namespace_index *nsindex;
614 	unsigned long *free, *victim_map = NULL;
615 	struct resource *res, **old_res_list;
616 	struct nd_label_id label_id;
617 	u8 uuid[NSLABEL_UUID_LEN];
618 	u32 nslot, slot;
619 
620 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
621 		return -ENXIO;
622 
623 	old_res_list = nsblk->res;
624 	nfree = nd_label_nfree(ndd);
625 	old_num_resources = nsblk->num_resources;
626 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
627 
628 	/*
629 	 * We need to loop over the old resources a few times, which seems a
630 	 * bit inefficient, but we need to know that we have the label
631 	 * space before we start mutating the tracking structures.
632 	 * Otherwise the recovery method of last resort for userspace is
633 	 * disable and re-enable the parent region.
634 	 */
635 	alloc = 0;
636 	for_each_dpa_resource(ndd, res) {
637 		if (strcmp(res->name, label_id.id) != 0)
638 			continue;
639 		if (!is_old_resource(res, old_res_list, old_num_resources))
640 			alloc++;
641 	}
642 
643 	victims = 0;
644 	if (old_num_resources) {
645 		/* convert old local-label-map to dimm-slot victim-map */
646 		victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
647 				GFP_KERNEL);
648 		if (!victim_map)
649 			return -ENOMEM;
650 
651 		/* mark unused labels for garbage collection */
652 		for_each_clear_bit_le(slot, free, nslot) {
653 			nd_label = nd_label_base(ndd) + slot;
654 			memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
655 			if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
656 				continue;
657 			res = to_resource(ndd, nd_label);
658 			if (res && is_old_resource(res, old_res_list,
659 						old_num_resources))
660 				continue;
661 			slot = to_slot(ndd, nd_label);
662 			set_bit(slot, victim_map);
663 			victims++;
664 		}
665 	}
666 
667 	/* don't allow updates that consume the last label */
668 	if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
669 		dev_info(&nsblk->common.dev, "insufficient label space\n");
670 		kfree(victim_map);
671 		return -ENOSPC;
672 	}
673 	/* from here on we need to abort on error */
674 
675 
676 	/* assign all resources to the namespace before writing the labels */
677 	nsblk->res = NULL;
678 	nsblk->num_resources = 0;
679 	for_each_dpa_resource(ndd, res) {
680 		if (strcmp(res->name, label_id.id) != 0)
681 			continue;
682 		if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
683 			rc = -ENOMEM;
684 			goto abort;
685 		}
686 	}
687 
688 	for (i = 0; i < nsblk->num_resources; i++) {
689 		size_t offset;
690 
691 		res = nsblk->res[i];
692 		if (is_old_resource(res, old_res_list, old_num_resources))
693 			continue; /* carry-over */
694 		slot = nd_label_alloc_slot(ndd);
695 		if (slot == UINT_MAX)
696 			goto abort;
697 		dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
698 
699 		nd_label = nd_label_base(ndd) + slot;
700 		memset(nd_label, 0, sizeof(struct nd_namespace_label));
701 		memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
702 		if (nsblk->alt_name)
703 			memcpy(nd_label->name, nsblk->alt_name,
704 					NSLABEL_NAME_LEN);
705 		nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
706 		nd_label->nlabel = __cpu_to_le16(0); /* N/A */
707 		nd_label->position = __cpu_to_le16(0); /* N/A */
708 		nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
709 		nd_label->dpa = __cpu_to_le64(res->start);
710 		nd_label->rawsize = __cpu_to_le64(resource_size(res));
711 		nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
712 		nd_label->slot = __cpu_to_le32(slot);
713 
714 		/* update label */
715 		offset = nd_label_offset(ndd, nd_label);
716 		rc = nvdimm_set_config_data(ndd, offset, nd_label,
717 				sizeof(struct nd_namespace_label));
718 		if (rc < 0)
719 			goto abort;
720 	}
721 
722 	/* free up now unused slots in the new index */
723 	for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
724 		dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
725 		nd_label_free_slot(ndd, slot);
726 	}
727 
728 	/* update index */
729 	rc = nd_label_write_index(ndd, ndd->ns_next,
730 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
731 	if (rc)
732 		goto abort;
733 
734 	/*
735 	 * Now that the on-dimm labels are up to date, fix up the tracking
736 	 * entries in nd_mapping->labels
737 	 */
738 	nlabel = 0;
739 	for_each_label(l, nd_label, nd_mapping->labels) {
740 		nlabel++;
741 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
742 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
743 			continue;
744 		nlabel--;
745 		del_label(nd_mapping, l);
746 		l--; /* retry with the new label at this index */
747 	}
748 	if (nlabel + nsblk->num_resources > num_labels) {
749 		/*
750 		 * Bug, we can't end up with more resources than
751 		 * available labels
752 		 */
753 		WARN_ON_ONCE(1);
754 		rc = -ENXIO;
755 		goto out;
756 	}
757 
758 	for_each_clear_bit_le(slot, free, nslot) {
759 		nd_label = nd_label_base(ndd) + slot;
760 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
761 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
762 			continue;
763 		res = to_resource(ndd, nd_label);
764 		res->flags &= ~DPA_RESOURCE_ADJUSTED;
765 		dev_vdbg(&nsblk->common.dev, "assign label[%d] slot: %d\n",
766 				l, slot);
767 		nd_mapping->labels[l++] = nd_label;
768 	}
769 	nd_mapping->labels[l] = NULL;
770 
771  out:
772 	kfree(old_res_list);
773 	kfree(victim_map);
774 	return rc;
775 
776  abort:
777 	/*
778 	 * 1/ repair the allocated label bitmap in the index
779 	 * 2/ restore the resource list
780 	 */
781 	nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
782 	kfree(nsblk->res);
783 	nsblk->res = old_res_list;
784 	nsblk->num_resources = old_num_resources;
785 	old_res_list = NULL;
786 	goto out;
787 }
788 
789 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
790 {
791 	int i, l, old_num_labels = 0;
792 	struct nd_namespace_index *nsindex;
793 	struct nd_namespace_label *nd_label;
794 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
795 	size_t size = (num_labels + 1) * sizeof(struct nd_namespace_label *);
796 
797 	for_each_label(l, nd_label, nd_mapping->labels)
798 		old_num_labels++;
799 
800 	/*
801 	 * We need to preserve all the old labels for the mapping so
802 	 * they can be garbage collected after writing the new labels.
803 	 */
804 	if (num_labels > old_num_labels) {
805 		struct nd_namespace_label **labels;
806 
807 		labels = krealloc(nd_mapping->labels, size, GFP_KERNEL);
808 		if (!labels)
809 			return -ENOMEM;
810 		nd_mapping->labels = labels;
811 	}
812 	if (!nd_mapping->labels)
813 		return -ENOMEM;
814 
815 	for (i = old_num_labels; i <= num_labels; i++)
816 		nd_mapping->labels[i] = NULL;
817 
818 	if (ndd->ns_current == -1 || ndd->ns_next == -1)
819 		/* pass */;
820 	else
821 		return max(num_labels, old_num_labels);
822 
823 	nsindex = to_namespace_index(ndd, 0);
824 	memset(nsindex, 0, ndd->nsarea.config_size);
825 	for (i = 0; i < 2; i++) {
826 		int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
827 
828 		if (rc)
829 			return rc;
830 	}
831 	ndd->ns_next = 1;
832 	ndd->ns_current = 0;
833 
834 	return max(num_labels, old_num_labels);
835 }
836 
837 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
838 {
839 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
840 	struct nd_namespace_label *nd_label;
841 	struct nd_namespace_index *nsindex;
842 	u8 label_uuid[NSLABEL_UUID_LEN];
843 	int l, num_freed = 0;
844 	unsigned long *free;
845 	u32 nslot, slot;
846 
847 	if (!uuid)
848 		return 0;
849 
850 	/* no index || no labels == nothing to delete */
851 	if (!preamble_next(ndd, &nsindex, &free, &nslot)
852 			|| !nd_mapping->labels)
853 		return 0;
854 
855 	for_each_label(l, nd_label, nd_mapping->labels) {
856 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
857 		if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
858 			continue;
859 		slot = to_slot(ndd, nd_label);
860 		nd_label_free_slot(ndd, slot);
861 		dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
862 		del_label(nd_mapping, l);
863 		num_freed++;
864 		l--; /* retry with new label at this index */
865 	}
866 
867 	if (num_freed > l) {
868 		/*
869 		 * num_freed will only ever be > l when we delete the last
870 		 * label
871 		 */
872 		kfree(nd_mapping->labels);
873 		nd_mapping->labels = NULL;
874 		dev_dbg(ndd->dev, "%s: no more labels\n", __func__);
875 	}
876 
877 	return nd_label_write_index(ndd, ndd->ns_next,
878 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
879 }
880 
881 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
882 		struct nd_namespace_pmem *nspm, resource_size_t size)
883 {
884 	int i;
885 
886 	for (i = 0; i < nd_region->ndr_mappings; i++) {
887 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
888 		int rc;
889 
890 		if (size == 0) {
891 			rc = del_labels(nd_mapping, nspm->uuid);
892 			if (rc)
893 				return rc;
894 			continue;
895 		}
896 
897 		rc = init_labels(nd_mapping, 1);
898 		if (rc < 0)
899 			return rc;
900 
901 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
902 		if (rc)
903 			return rc;
904 	}
905 
906 	return 0;
907 }
908 
909 int nd_blk_namespace_label_update(struct nd_region *nd_region,
910 		struct nd_namespace_blk *nsblk, resource_size_t size)
911 {
912 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
913 	struct resource *res;
914 	int count = 0;
915 
916 	if (size == 0)
917 		return del_labels(nd_mapping, nsblk->uuid);
918 
919 	for_each_dpa_resource(to_ndd(nd_mapping), res)
920 		count++;
921 
922 	count = init_labels(nd_mapping, count);
923 	if (count < 0)
924 		return count;
925 
926 	return __blk_label_update(nd_region, nd_mapping, nsblk, count);
927 }
928