xref: /linux/drivers/nvdimm/btt.c (revision bf070bb0e6c62ba3075db0a666763ba52c677102)
1 /*
2  * Block Translation Table
3  * Copyright (c) 2014-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
24 #include <linux/fs.h>
25 #include <linux/nd.h>
26 #include <linux/backing-dev.h>
27 #include "btt.h"
28 #include "nd.h"
29 
30 enum log_ent_request {
31 	LOG_NEW_ENT = 0,
32 	LOG_OLD_ENT
33 };
34 
35 static struct device *to_dev(struct arena_info *arena)
36 {
37 	return &arena->nd_btt->dev;
38 }
39 
40 static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
41 {
42 	return offset + nd_btt->initial_offset;
43 }
44 
45 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
46 		void *buf, size_t n, unsigned long flags)
47 {
48 	struct nd_btt *nd_btt = arena->nd_btt;
49 	struct nd_namespace_common *ndns = nd_btt->ndns;
50 
51 	/* arena offsets may be shifted from the base of the device */
52 	offset = adjust_initial_offset(nd_btt, offset);
53 	return nvdimm_read_bytes(ndns, offset, buf, n, flags);
54 }
55 
56 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
57 		void *buf, size_t n, unsigned long flags)
58 {
59 	struct nd_btt *nd_btt = arena->nd_btt;
60 	struct nd_namespace_common *ndns = nd_btt->ndns;
61 
62 	/* arena offsets may be shifted from the base of the device */
63 	offset = adjust_initial_offset(nd_btt, offset);
64 	return nvdimm_write_bytes(ndns, offset, buf, n, flags);
65 }
66 
67 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
68 {
69 	int ret;
70 
71 	/*
72 	 * infooff and info2off should always be at least 512B aligned.
73 	 * We rely on that to make sure rw_bytes does error clearing
74 	 * correctly, so make sure that is the case.
75 	 */
76 	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
77 		"arena->infooff: %#llx is unaligned\n", arena->infooff);
78 	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
79 		"arena->info2off: %#llx is unaligned\n", arena->info2off);
80 
81 	ret = arena_write_bytes(arena, arena->info2off, super,
82 			sizeof(struct btt_sb), 0);
83 	if (ret)
84 		return ret;
85 
86 	return arena_write_bytes(arena, arena->infooff, super,
87 			sizeof(struct btt_sb), 0);
88 }
89 
90 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
91 {
92 	return arena_read_bytes(arena, arena->infooff, super,
93 			sizeof(struct btt_sb), 0);
94 }
95 
96 /*
97  * 'raw' version of btt_map write
98  * Assumptions:
99  *   mapping is in little-endian
100  *   mapping contains 'E' and 'Z' flags as desired
101  */
102 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
103 		unsigned long flags)
104 {
105 	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
106 
107 	if (unlikely(lba >= arena->external_nlba))
108 		dev_err_ratelimited(to_dev(arena),
109 			"%s: lba %#x out of range (max: %#x)\n",
110 			__func__, lba, arena->external_nlba);
111 	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
112 }
113 
114 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
115 			u32 z_flag, u32 e_flag, unsigned long rwb_flags)
116 {
117 	u32 ze;
118 	__le32 mapping_le;
119 
120 	/*
121 	 * This 'mapping' is supposed to be just the LBA mapping, without
122 	 * any flags set, so strip the flag bits.
123 	 */
124 	mapping = ent_lba(mapping);
125 
126 	ze = (z_flag << 1) + e_flag;
127 	switch (ze) {
128 	case 0:
129 		/*
130 		 * We want to set neither of the Z or E flags, and
131 		 * in the actual layout, this means setting the bit
132 		 * positions of both to '1' to indicate a 'normal'
133 		 * map entry
134 		 */
135 		mapping |= MAP_ENT_NORMAL;
136 		break;
137 	case 1:
138 		mapping |= (1 << MAP_ERR_SHIFT);
139 		break;
140 	case 2:
141 		mapping |= (1 << MAP_TRIM_SHIFT);
142 		break;
143 	default:
144 		/*
145 		 * The case where Z and E are both sent in as '1' could be
146 		 * construed as a valid 'normal' case, but we decide not to,
147 		 * to avoid confusion
148 		 */
149 		dev_err_ratelimited(to_dev(arena),
150 			"Invalid use of Z and E flags\n");
151 		return -EIO;
152 	}
153 
154 	mapping_le = cpu_to_le32(mapping);
155 	return __btt_map_write(arena, lba, mapping_le, rwb_flags);
156 }
157 
158 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
159 			int *trim, int *error, unsigned long rwb_flags)
160 {
161 	int ret;
162 	__le32 in;
163 	u32 raw_mapping, postmap, ze, z_flag, e_flag;
164 	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
165 
166 	if (unlikely(lba >= arena->external_nlba))
167 		dev_err_ratelimited(to_dev(arena),
168 			"%s: lba %#x out of range (max: %#x)\n",
169 			__func__, lba, arena->external_nlba);
170 
171 	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
172 	if (ret)
173 		return ret;
174 
175 	raw_mapping = le32_to_cpu(in);
176 
177 	z_flag = ent_z_flag(raw_mapping);
178 	e_flag = ent_e_flag(raw_mapping);
179 	ze = (z_flag << 1) + e_flag;
180 	postmap = ent_lba(raw_mapping);
181 
182 	/* Reuse the {z,e}_flag variables for *trim and *error */
183 	z_flag = 0;
184 	e_flag = 0;
185 
186 	switch (ze) {
187 	case 0:
188 		/* Initial state. Return postmap = premap */
189 		*mapping = lba;
190 		break;
191 	case 1:
192 		*mapping = postmap;
193 		e_flag = 1;
194 		break;
195 	case 2:
196 		*mapping = postmap;
197 		z_flag = 1;
198 		break;
199 	case 3:
200 		*mapping = postmap;
201 		break;
202 	default:
203 		return -EIO;
204 	}
205 
206 	if (trim)
207 		*trim = z_flag;
208 	if (error)
209 		*error = e_flag;
210 
211 	return ret;
212 }
213 
214 static int btt_log_read_pair(struct arena_info *arena, u32 lane,
215 			struct log_entry *ent)
216 {
217 	return arena_read_bytes(arena,
218 			arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
219 			2 * LOG_ENT_SIZE, 0);
220 }
221 
222 static struct dentry *debugfs_root;
223 
224 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
225 				int idx)
226 {
227 	char dirname[32];
228 	struct dentry *d;
229 
230 	/* If for some reason, parent bttN was not created, exit */
231 	if (!parent)
232 		return;
233 
234 	snprintf(dirname, 32, "arena%d", idx);
235 	d = debugfs_create_dir(dirname, parent);
236 	if (IS_ERR_OR_NULL(d))
237 		return;
238 	a->debugfs_dir = d;
239 
240 	debugfs_create_x64("size", S_IRUGO, d, &a->size);
241 	debugfs_create_x64("external_lba_start", S_IRUGO, d,
242 				&a->external_lba_start);
243 	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
244 	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
245 				&a->internal_lbasize);
246 	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
247 	debugfs_create_u32("external_lbasize", S_IRUGO, d,
248 				&a->external_lbasize);
249 	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
250 	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
251 	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
252 	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
253 	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
254 	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
255 	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
256 	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
257 	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
258 	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
259 }
260 
261 static void btt_debugfs_init(struct btt *btt)
262 {
263 	int i = 0;
264 	struct arena_info *arena;
265 
266 	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
267 						debugfs_root);
268 	if (IS_ERR_OR_NULL(btt->debugfs_dir))
269 		return;
270 
271 	list_for_each_entry(arena, &btt->arena_list, list) {
272 		arena_debugfs_init(arena, btt->debugfs_dir, i);
273 		i++;
274 	}
275 }
276 
277 /*
278  * This function accepts two log entries, and uses the
279  * sequence number to find the 'older' entry.
280  * It also updates the sequence number in this old entry to
281  * make it the 'new' one if the mark_flag is set.
282  * Finally, it returns which of the entries was the older one.
283  *
284  * TODO The logic feels a bit kludge-y. make it better..
285  */
286 static int btt_log_get_old(struct log_entry *ent)
287 {
288 	int old;
289 
290 	/*
291 	 * the first ever time this is seen, the entry goes into [0]
292 	 * the next time, the following logic works out to put this
293 	 * (next) entry into [1]
294 	 */
295 	if (ent[0].seq == 0) {
296 		ent[0].seq = cpu_to_le32(1);
297 		return 0;
298 	}
299 
300 	if (ent[0].seq == ent[1].seq)
301 		return -EINVAL;
302 	if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
303 		return -EINVAL;
304 
305 	if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
306 		if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
307 			old = 0;
308 		else
309 			old = 1;
310 	} else {
311 		if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
312 			old = 1;
313 		else
314 			old = 0;
315 	}
316 
317 	return old;
318 }
319 
320 /*
321  * This function copies the desired (old/new) log entry into ent if
322  * it is not NULL. It returns the sub-slot number (0 or 1)
323  * where the desired log entry was found. Negative return values
324  * indicate errors.
325  */
326 static int btt_log_read(struct arena_info *arena, u32 lane,
327 			struct log_entry *ent, int old_flag)
328 {
329 	int ret;
330 	int old_ent, ret_ent;
331 	struct log_entry log[2];
332 
333 	ret = btt_log_read_pair(arena, lane, log);
334 	if (ret)
335 		return -EIO;
336 
337 	old_ent = btt_log_get_old(log);
338 	if (old_ent < 0 || old_ent > 1) {
339 		dev_err(to_dev(arena),
340 				"log corruption (%d): lane %d seq [%d, %d]\n",
341 			old_ent, lane, log[0].seq, log[1].seq);
342 		/* TODO set error state? */
343 		return -EIO;
344 	}
345 
346 	ret_ent = (old_flag ? old_ent : (1 - old_ent));
347 
348 	if (ent != NULL)
349 		memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
350 
351 	return ret_ent;
352 }
353 
354 /*
355  * This function commits a log entry to media
356  * It does _not_ prepare the freelist entry for the next write
357  * btt_flog_write is the wrapper for updating the freelist elements
358  */
359 static int __btt_log_write(struct arena_info *arena, u32 lane,
360 			u32 sub, struct log_entry *ent, unsigned long flags)
361 {
362 	int ret;
363 	/*
364 	 * Ignore the padding in log_entry for calculating log_half.
365 	 * The entry is 'committed' when we write the sequence number,
366 	 * and we want to ensure that that is the last thing written.
367 	 * We don't bother writing the padding as that would be extra
368 	 * media wear and write amplification
369 	 */
370 	unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
371 	u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
372 	void *src = ent;
373 
374 	/* split the 16B write into atomic, durable halves */
375 	ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
376 	if (ret)
377 		return ret;
378 
379 	ns_off += log_half;
380 	src += log_half;
381 	return arena_write_bytes(arena, ns_off, src, log_half, flags);
382 }
383 
384 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
385 			struct log_entry *ent)
386 {
387 	int ret;
388 
389 	ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
390 	if (ret)
391 		return ret;
392 
393 	/* prepare the next free entry */
394 	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
395 	if (++(arena->freelist[lane].seq) == 4)
396 		arena->freelist[lane].seq = 1;
397 	if (ent_e_flag(ent->old_map))
398 		arena->freelist[lane].has_err = 1;
399 	arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
400 
401 	return ret;
402 }
403 
404 /*
405  * This function initializes the BTT map to the initial state, which is
406  * all-zeroes, and indicates an identity mapping
407  */
408 static int btt_map_init(struct arena_info *arena)
409 {
410 	int ret = -EINVAL;
411 	void *zerobuf;
412 	size_t offset = 0;
413 	size_t chunk_size = SZ_2M;
414 	size_t mapsize = arena->logoff - arena->mapoff;
415 
416 	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
417 	if (!zerobuf)
418 		return -ENOMEM;
419 
420 	/*
421 	 * mapoff should always be at least 512B  aligned. We rely on that to
422 	 * make sure rw_bytes does error clearing correctly, so make sure that
423 	 * is the case.
424 	 */
425 	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
426 		"arena->mapoff: %#llx is unaligned\n", arena->mapoff);
427 
428 	while (mapsize) {
429 		size_t size = min(mapsize, chunk_size);
430 
431 		dev_WARN_ONCE(to_dev(arena), size < 512,
432 			"chunk size: %#zx is unaligned\n", size);
433 		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
434 				size, 0);
435 		if (ret)
436 			goto free;
437 
438 		offset += size;
439 		mapsize -= size;
440 		cond_resched();
441 	}
442 
443  free:
444 	kfree(zerobuf);
445 	return ret;
446 }
447 
448 /*
449  * This function initializes the BTT log with 'fake' entries pointing
450  * to the initial reserved set of blocks as being free
451  */
452 static int btt_log_init(struct arena_info *arena)
453 {
454 	size_t logsize = arena->info2off - arena->logoff;
455 	size_t chunk_size = SZ_4K, offset = 0;
456 	struct log_entry log;
457 	void *zerobuf;
458 	int ret;
459 	u32 i;
460 
461 	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
462 	if (!zerobuf)
463 		return -ENOMEM;
464 	/*
465 	 * logoff should always be at least 512B  aligned. We rely on that to
466 	 * make sure rw_bytes does error clearing correctly, so make sure that
467 	 * is the case.
468 	 */
469 	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
470 		"arena->logoff: %#llx is unaligned\n", arena->logoff);
471 
472 	while (logsize) {
473 		size_t size = min(logsize, chunk_size);
474 
475 		dev_WARN_ONCE(to_dev(arena), size < 512,
476 			"chunk size: %#zx is unaligned\n", size);
477 		ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
478 				size, 0);
479 		if (ret)
480 			goto free;
481 
482 		offset += size;
483 		logsize -= size;
484 		cond_resched();
485 	}
486 
487 	for (i = 0; i < arena->nfree; i++) {
488 		log.lba = cpu_to_le32(i);
489 		log.old_map = cpu_to_le32(arena->external_nlba + i);
490 		log.new_map = cpu_to_le32(arena->external_nlba + i);
491 		log.seq = cpu_to_le32(LOG_SEQ_INIT);
492 		ret = __btt_log_write(arena, i, 0, &log, 0);
493 		if (ret)
494 			goto free;
495 	}
496 
497  free:
498 	kfree(zerobuf);
499 	return ret;
500 }
501 
502 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
503 {
504 	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
505 }
506 
507 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
508 {
509 	int ret = 0;
510 
511 	if (arena->freelist[lane].has_err) {
512 		void *zero_page = page_address(ZERO_PAGE(0));
513 		u32 lba = arena->freelist[lane].block;
514 		u64 nsoff = to_namespace_offset(arena, lba);
515 		unsigned long len = arena->sector_size;
516 
517 		mutex_lock(&arena->err_lock);
518 
519 		while (len) {
520 			unsigned long chunk = min(len, PAGE_SIZE);
521 
522 			ret = arena_write_bytes(arena, nsoff, zero_page,
523 				chunk, 0);
524 			if (ret)
525 				break;
526 			len -= chunk;
527 			nsoff += chunk;
528 			if (len == 0)
529 				arena->freelist[lane].has_err = 0;
530 		}
531 		mutex_unlock(&arena->err_lock);
532 	}
533 	return ret;
534 }
535 
536 static int btt_freelist_init(struct arena_info *arena)
537 {
538 	int old, new, ret;
539 	u32 i, map_entry;
540 	struct log_entry log_new, log_old;
541 
542 	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
543 					GFP_KERNEL);
544 	if (!arena->freelist)
545 		return -ENOMEM;
546 
547 	for (i = 0; i < arena->nfree; i++) {
548 		old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
549 		if (old < 0)
550 			return old;
551 
552 		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
553 		if (new < 0)
554 			return new;
555 
556 		/* sub points to the next one to be overwritten */
557 		arena->freelist[i].sub = 1 - new;
558 		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
559 		arena->freelist[i].block = le32_to_cpu(log_new.old_map);
560 
561 		/*
562 		 * FIXME: if error clearing fails during init, we want to make
563 		 * the BTT read-only
564 		 */
565 		if (ent_e_flag(log_new.old_map)) {
566 			ret = arena_clear_freelist_error(arena, i);
567 			if (ret)
568 				dev_err_ratelimited(to_dev(arena),
569 					"Unable to clear known errors\n");
570 		}
571 
572 		/* This implies a newly created or untouched flog entry */
573 		if (log_new.old_map == log_new.new_map)
574 			continue;
575 
576 		/* Check if map recovery is needed */
577 		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
578 				NULL, NULL, 0);
579 		if (ret)
580 			return ret;
581 		if ((le32_to_cpu(log_new.new_map) != map_entry) &&
582 				(le32_to_cpu(log_new.old_map) == map_entry)) {
583 			/*
584 			 * Last transaction wrote the flog, but wasn't able
585 			 * to complete the map write. So fix up the map.
586 			 */
587 			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
588 					le32_to_cpu(log_new.new_map), 0, 0, 0);
589 			if (ret)
590 				return ret;
591 		}
592 	}
593 
594 	return 0;
595 }
596 
597 static int btt_rtt_init(struct arena_info *arena)
598 {
599 	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
600 	if (arena->rtt == NULL)
601 		return -ENOMEM;
602 
603 	return 0;
604 }
605 
606 static int btt_maplocks_init(struct arena_info *arena)
607 {
608 	u32 i;
609 
610 	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
611 				GFP_KERNEL);
612 	if (!arena->map_locks)
613 		return -ENOMEM;
614 
615 	for (i = 0; i < arena->nfree; i++)
616 		spin_lock_init(&arena->map_locks[i].lock);
617 
618 	return 0;
619 }
620 
621 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
622 				size_t start, size_t arena_off)
623 {
624 	struct arena_info *arena;
625 	u64 logsize, mapsize, datasize;
626 	u64 available = size;
627 
628 	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
629 	if (!arena)
630 		return NULL;
631 	arena->nd_btt = btt->nd_btt;
632 	arena->sector_size = btt->sector_size;
633 
634 	if (!size)
635 		return arena;
636 
637 	arena->size = size;
638 	arena->external_lba_start = start;
639 	arena->external_lbasize = btt->lbasize;
640 	arena->internal_lbasize = roundup(arena->external_lbasize,
641 					INT_LBASIZE_ALIGNMENT);
642 	arena->nfree = BTT_DEFAULT_NFREE;
643 	arena->version_major = btt->nd_btt->version_major;
644 	arena->version_minor = btt->nd_btt->version_minor;
645 
646 	if (available % BTT_PG_SIZE)
647 		available -= (available % BTT_PG_SIZE);
648 
649 	/* Two pages are reserved for the super block and its copy */
650 	available -= 2 * BTT_PG_SIZE;
651 
652 	/* The log takes a fixed amount of space based on nfree */
653 	logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
654 				BTT_PG_SIZE);
655 	available -= logsize;
656 
657 	/* Calculate optimal split between map and data area */
658 	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
659 			arena->internal_lbasize + MAP_ENT_SIZE);
660 	arena->external_nlba = arena->internal_nlba - arena->nfree;
661 
662 	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
663 	datasize = available - mapsize;
664 
665 	/* 'Absolute' values, relative to start of storage space */
666 	arena->infooff = arena_off;
667 	arena->dataoff = arena->infooff + BTT_PG_SIZE;
668 	arena->mapoff = arena->dataoff + datasize;
669 	arena->logoff = arena->mapoff + mapsize;
670 	arena->info2off = arena->logoff + logsize;
671 	return arena;
672 }
673 
674 static void free_arenas(struct btt *btt)
675 {
676 	struct arena_info *arena, *next;
677 
678 	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
679 		list_del(&arena->list);
680 		kfree(arena->rtt);
681 		kfree(arena->map_locks);
682 		kfree(arena->freelist);
683 		debugfs_remove_recursive(arena->debugfs_dir);
684 		kfree(arena);
685 	}
686 }
687 
688 /*
689  * This function reads an existing valid btt superblock and
690  * populates the corresponding arena_info struct
691  */
692 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
693 				u64 arena_off)
694 {
695 	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
696 	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
697 	arena->external_nlba = le32_to_cpu(super->external_nlba);
698 	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
699 	arena->nfree = le32_to_cpu(super->nfree);
700 	arena->version_major = le16_to_cpu(super->version_major);
701 	arena->version_minor = le16_to_cpu(super->version_minor);
702 
703 	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
704 			le64_to_cpu(super->nextoff));
705 	arena->infooff = arena_off;
706 	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
707 	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
708 	arena->logoff = arena_off + le64_to_cpu(super->logoff);
709 	arena->info2off = arena_off + le64_to_cpu(super->info2off);
710 
711 	arena->size = (le64_to_cpu(super->nextoff) > 0)
712 		? (le64_to_cpu(super->nextoff))
713 		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
714 
715 	arena->flags = le32_to_cpu(super->flags);
716 }
717 
718 static int discover_arenas(struct btt *btt)
719 {
720 	int ret = 0;
721 	struct arena_info *arena;
722 	struct btt_sb *super;
723 	size_t remaining = btt->rawsize;
724 	u64 cur_nlba = 0;
725 	size_t cur_off = 0;
726 	int num_arenas = 0;
727 
728 	super = kzalloc(sizeof(*super), GFP_KERNEL);
729 	if (!super)
730 		return -ENOMEM;
731 
732 	while (remaining) {
733 		/* Alloc memory for arena */
734 		arena = alloc_arena(btt, 0, 0, 0);
735 		if (!arena) {
736 			ret = -ENOMEM;
737 			goto out_super;
738 		}
739 
740 		arena->infooff = cur_off;
741 		ret = btt_info_read(arena, super);
742 		if (ret)
743 			goto out;
744 
745 		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
746 			if (remaining == btt->rawsize) {
747 				btt->init_state = INIT_NOTFOUND;
748 				dev_info(to_dev(arena), "No existing arenas\n");
749 				goto out;
750 			} else {
751 				dev_err(to_dev(arena),
752 						"Found corrupted metadata!\n");
753 				ret = -ENODEV;
754 				goto out;
755 			}
756 		}
757 
758 		arena->external_lba_start = cur_nlba;
759 		parse_arena_meta(arena, super, cur_off);
760 
761 		mutex_init(&arena->err_lock);
762 		ret = btt_freelist_init(arena);
763 		if (ret)
764 			goto out;
765 
766 		ret = btt_rtt_init(arena);
767 		if (ret)
768 			goto out;
769 
770 		ret = btt_maplocks_init(arena);
771 		if (ret)
772 			goto out;
773 
774 		list_add_tail(&arena->list, &btt->arena_list);
775 
776 		remaining -= arena->size;
777 		cur_off += arena->size;
778 		cur_nlba += arena->external_nlba;
779 		num_arenas++;
780 
781 		if (arena->nextoff == 0)
782 			break;
783 	}
784 	btt->num_arenas = num_arenas;
785 	btt->nlba = cur_nlba;
786 	btt->init_state = INIT_READY;
787 
788 	kfree(super);
789 	return ret;
790 
791  out:
792 	kfree(arena);
793 	free_arenas(btt);
794  out_super:
795 	kfree(super);
796 	return ret;
797 }
798 
799 static int create_arenas(struct btt *btt)
800 {
801 	size_t remaining = btt->rawsize;
802 	size_t cur_off = 0;
803 
804 	while (remaining) {
805 		struct arena_info *arena;
806 		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
807 
808 		remaining -= arena_size;
809 		if (arena_size < ARENA_MIN_SIZE)
810 			break;
811 
812 		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
813 		if (!arena) {
814 			free_arenas(btt);
815 			return -ENOMEM;
816 		}
817 		btt->nlba += arena->external_nlba;
818 		if (remaining >= ARENA_MIN_SIZE)
819 			arena->nextoff = arena->size;
820 		else
821 			arena->nextoff = 0;
822 		cur_off += arena_size;
823 		list_add_tail(&arena->list, &btt->arena_list);
824 	}
825 
826 	return 0;
827 }
828 
829 /*
830  * This function completes arena initialization by writing
831  * all the metadata.
832  * It is only called for an uninitialized arena when a write
833  * to that arena occurs for the first time.
834  */
835 static int btt_arena_write_layout(struct arena_info *arena)
836 {
837 	int ret;
838 	u64 sum;
839 	struct btt_sb *super;
840 	struct nd_btt *nd_btt = arena->nd_btt;
841 	const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
842 
843 	ret = btt_map_init(arena);
844 	if (ret)
845 		return ret;
846 
847 	ret = btt_log_init(arena);
848 	if (ret)
849 		return ret;
850 
851 	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
852 	if (!super)
853 		return -ENOMEM;
854 
855 	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
856 	memcpy(super->uuid, nd_btt->uuid, 16);
857 	memcpy(super->parent_uuid, parent_uuid, 16);
858 	super->flags = cpu_to_le32(arena->flags);
859 	super->version_major = cpu_to_le16(arena->version_major);
860 	super->version_minor = cpu_to_le16(arena->version_minor);
861 	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
862 	super->external_nlba = cpu_to_le32(arena->external_nlba);
863 	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
864 	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
865 	super->nfree = cpu_to_le32(arena->nfree);
866 	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
867 	super->nextoff = cpu_to_le64(arena->nextoff);
868 	/*
869 	 * Subtract arena->infooff (arena start) so numbers are relative
870 	 * to 'this' arena
871 	 */
872 	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
873 	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
874 	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
875 	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
876 
877 	super->flags = 0;
878 	sum = nd_sb_checksum((struct nd_gen_sb *) super);
879 	super->checksum = cpu_to_le64(sum);
880 
881 	ret = btt_info_write(arena, super);
882 
883 	kfree(super);
884 	return ret;
885 }
886 
887 /*
888  * This function completes the initialization for the BTT namespace
889  * such that it is ready to accept IOs
890  */
891 static int btt_meta_init(struct btt *btt)
892 {
893 	int ret = 0;
894 	struct arena_info *arena;
895 
896 	mutex_lock(&btt->init_lock);
897 	list_for_each_entry(arena, &btt->arena_list, list) {
898 		ret = btt_arena_write_layout(arena);
899 		if (ret)
900 			goto unlock;
901 
902 		ret = btt_freelist_init(arena);
903 		if (ret)
904 			goto unlock;
905 
906 		ret = btt_rtt_init(arena);
907 		if (ret)
908 			goto unlock;
909 
910 		ret = btt_maplocks_init(arena);
911 		if (ret)
912 			goto unlock;
913 	}
914 
915 	btt->init_state = INIT_READY;
916 
917  unlock:
918 	mutex_unlock(&btt->init_lock);
919 	return ret;
920 }
921 
922 static u32 btt_meta_size(struct btt *btt)
923 {
924 	return btt->lbasize - btt->sector_size;
925 }
926 
927 /*
928  * This function calculates the arena in which the given LBA lies
929  * by doing a linear walk. This is acceptable since we expect only
930  * a few arenas. If we have backing devices that get much larger,
931  * we can construct a balanced binary tree of arenas at init time
932  * so that this range search becomes faster.
933  */
934 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
935 				struct arena_info **arena)
936 {
937 	struct arena_info *arena_list;
938 	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
939 
940 	list_for_each_entry(arena_list, &btt->arena_list, list) {
941 		if (lba < arena_list->external_nlba) {
942 			*arena = arena_list;
943 			*premap = lba;
944 			return 0;
945 		}
946 		lba -= arena_list->external_nlba;
947 	}
948 
949 	return -EIO;
950 }
951 
952 /*
953  * The following (lock_map, unlock_map) are mostly just to improve
954  * readability, since they index into an array of locks
955  */
956 static void lock_map(struct arena_info *arena, u32 premap)
957 		__acquires(&arena->map_locks[idx].lock)
958 {
959 	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
960 
961 	spin_lock(&arena->map_locks[idx].lock);
962 }
963 
964 static void unlock_map(struct arena_info *arena, u32 premap)
965 		__releases(&arena->map_locks[idx].lock)
966 {
967 	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
968 
969 	spin_unlock(&arena->map_locks[idx].lock);
970 }
971 
972 static int btt_data_read(struct arena_info *arena, struct page *page,
973 			unsigned int off, u32 lba, u32 len)
974 {
975 	int ret;
976 	u64 nsoff = to_namespace_offset(arena, lba);
977 	void *mem = kmap_atomic(page);
978 
979 	ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
980 	kunmap_atomic(mem);
981 
982 	return ret;
983 }
984 
985 static int btt_data_write(struct arena_info *arena, u32 lba,
986 			struct page *page, unsigned int off, u32 len)
987 {
988 	int ret;
989 	u64 nsoff = to_namespace_offset(arena, lba);
990 	void *mem = kmap_atomic(page);
991 
992 	ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
993 	kunmap_atomic(mem);
994 
995 	return ret;
996 }
997 
998 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
999 {
1000 	void *mem = kmap_atomic(page);
1001 
1002 	memset(mem + off, 0, len);
1003 	kunmap_atomic(mem);
1004 }
1005 
1006 #ifdef CONFIG_BLK_DEV_INTEGRITY
1007 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1008 			struct arena_info *arena, u32 postmap, int rw)
1009 {
1010 	unsigned int len = btt_meta_size(btt);
1011 	u64 meta_nsoff;
1012 	int ret = 0;
1013 
1014 	if (bip == NULL)
1015 		return 0;
1016 
1017 	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1018 
1019 	while (len) {
1020 		unsigned int cur_len;
1021 		struct bio_vec bv;
1022 		void *mem;
1023 
1024 		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1025 		/*
1026 		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1027 		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1028 		 * can use those directly
1029 		 */
1030 
1031 		cur_len = min(len, bv.bv_len);
1032 		mem = kmap_atomic(bv.bv_page);
1033 		if (rw)
1034 			ret = arena_write_bytes(arena, meta_nsoff,
1035 					mem + bv.bv_offset, cur_len,
1036 					NVDIMM_IO_ATOMIC);
1037 		else
1038 			ret = arena_read_bytes(arena, meta_nsoff,
1039 					mem + bv.bv_offset, cur_len,
1040 					NVDIMM_IO_ATOMIC);
1041 
1042 		kunmap_atomic(mem);
1043 		if (ret)
1044 			return ret;
1045 
1046 		len -= cur_len;
1047 		meta_nsoff += cur_len;
1048 		if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1049 			return -EIO;
1050 	}
1051 
1052 	return ret;
1053 }
1054 
1055 #else /* CONFIG_BLK_DEV_INTEGRITY */
1056 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1057 			struct arena_info *arena, u32 postmap, int rw)
1058 {
1059 	return 0;
1060 }
1061 #endif
1062 
1063 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1064 			struct page *page, unsigned int off, sector_t sector,
1065 			unsigned int len)
1066 {
1067 	int ret = 0;
1068 	int t_flag, e_flag;
1069 	struct arena_info *arena = NULL;
1070 	u32 lane = 0, premap, postmap;
1071 
1072 	while (len) {
1073 		u32 cur_len;
1074 
1075 		lane = nd_region_acquire_lane(btt->nd_region);
1076 
1077 		ret = lba_to_arena(btt, sector, &premap, &arena);
1078 		if (ret)
1079 			goto out_lane;
1080 
1081 		cur_len = min(btt->sector_size, len);
1082 
1083 		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1084 				NVDIMM_IO_ATOMIC);
1085 		if (ret)
1086 			goto out_lane;
1087 
1088 		/*
1089 		 * We loop to make sure that the post map LBA didn't change
1090 		 * from under us between writing the RTT and doing the actual
1091 		 * read.
1092 		 */
1093 		while (1) {
1094 			u32 new_map;
1095 			int new_t, new_e;
1096 
1097 			if (t_flag) {
1098 				zero_fill_data(page, off, cur_len);
1099 				goto out_lane;
1100 			}
1101 
1102 			if (e_flag) {
1103 				ret = -EIO;
1104 				goto out_lane;
1105 			}
1106 
1107 			arena->rtt[lane] = RTT_VALID | postmap;
1108 			/*
1109 			 * Barrier to make sure this write is not reordered
1110 			 * to do the verification map_read before the RTT store
1111 			 */
1112 			barrier();
1113 
1114 			ret = btt_map_read(arena, premap, &new_map, &new_t,
1115 						&new_e, NVDIMM_IO_ATOMIC);
1116 			if (ret)
1117 				goto out_rtt;
1118 
1119 			if ((postmap == new_map) && (t_flag == new_t) &&
1120 					(e_flag == new_e))
1121 				break;
1122 
1123 			postmap = new_map;
1124 			t_flag = new_t;
1125 			e_flag = new_e;
1126 		}
1127 
1128 		ret = btt_data_read(arena, page, off, postmap, cur_len);
1129 		if (ret) {
1130 			int rc;
1131 
1132 			/* Media error - set the e_flag */
1133 			rc = btt_map_write(arena, premap, postmap, 0, 1,
1134 				NVDIMM_IO_ATOMIC);
1135 			goto out_rtt;
1136 		}
1137 
1138 		if (bip) {
1139 			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1140 			if (ret)
1141 				goto out_rtt;
1142 		}
1143 
1144 		arena->rtt[lane] = RTT_INVALID;
1145 		nd_region_release_lane(btt->nd_region, lane);
1146 
1147 		len -= cur_len;
1148 		off += cur_len;
1149 		sector += btt->sector_size >> SECTOR_SHIFT;
1150 	}
1151 
1152 	return 0;
1153 
1154  out_rtt:
1155 	arena->rtt[lane] = RTT_INVALID;
1156  out_lane:
1157 	nd_region_release_lane(btt->nd_region, lane);
1158 	return ret;
1159 }
1160 
1161 /*
1162  * Normally, arena_{read,write}_bytes will take care of the initial offset
1163  * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1164  * we need the final, raw namespace offset here
1165  */
1166 static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1167 		u32 postmap)
1168 {
1169 	u64 nsoff = adjust_initial_offset(arena->nd_btt,
1170 			to_namespace_offset(arena, postmap));
1171 	sector_t phys_sector = nsoff >> 9;
1172 
1173 	return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1174 }
1175 
1176 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1177 			sector_t sector, struct page *page, unsigned int off,
1178 			unsigned int len)
1179 {
1180 	int ret = 0;
1181 	struct arena_info *arena = NULL;
1182 	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1183 	struct log_entry log;
1184 	int sub;
1185 
1186 	while (len) {
1187 		u32 cur_len;
1188 		int e_flag;
1189 
1190  retry:
1191 		lane = nd_region_acquire_lane(btt->nd_region);
1192 
1193 		ret = lba_to_arena(btt, sector, &premap, &arena);
1194 		if (ret)
1195 			goto out_lane;
1196 		cur_len = min(btt->sector_size, len);
1197 
1198 		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1199 			ret = -EIO;
1200 			goto out_lane;
1201 		}
1202 
1203 		if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1204 			arena->freelist[lane].has_err = 1;
1205 
1206 		if (mutex_is_locked(&arena->err_lock)
1207 				|| arena->freelist[lane].has_err) {
1208 			nd_region_release_lane(btt->nd_region, lane);
1209 
1210 			ret = arena_clear_freelist_error(arena, lane);
1211 			if (ret)
1212 				return ret;
1213 
1214 			/* OK to acquire a different lane/free block */
1215 			goto retry;
1216 		}
1217 
1218 		new_postmap = arena->freelist[lane].block;
1219 
1220 		/* Wait if the new block is being read from */
1221 		for (i = 0; i < arena->nfree; i++)
1222 			while (arena->rtt[i] == (RTT_VALID | new_postmap))
1223 				cpu_relax();
1224 
1225 
1226 		if (new_postmap >= arena->internal_nlba) {
1227 			ret = -EIO;
1228 			goto out_lane;
1229 		}
1230 
1231 		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1232 		if (ret)
1233 			goto out_lane;
1234 
1235 		if (bip) {
1236 			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1237 						WRITE);
1238 			if (ret)
1239 				goto out_lane;
1240 		}
1241 
1242 		lock_map(arena, premap);
1243 		ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1244 				NVDIMM_IO_ATOMIC);
1245 		if (ret)
1246 			goto out_map;
1247 		if (old_postmap >= arena->internal_nlba) {
1248 			ret = -EIO;
1249 			goto out_map;
1250 		}
1251 		if (e_flag)
1252 			set_e_flag(old_postmap);
1253 
1254 		log.lba = cpu_to_le32(premap);
1255 		log.old_map = cpu_to_le32(old_postmap);
1256 		log.new_map = cpu_to_le32(new_postmap);
1257 		log.seq = cpu_to_le32(arena->freelist[lane].seq);
1258 		sub = arena->freelist[lane].sub;
1259 		ret = btt_flog_write(arena, lane, sub, &log);
1260 		if (ret)
1261 			goto out_map;
1262 
1263 		ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1264 			NVDIMM_IO_ATOMIC);
1265 		if (ret)
1266 			goto out_map;
1267 
1268 		unlock_map(arena, premap);
1269 		nd_region_release_lane(btt->nd_region, lane);
1270 
1271 		if (e_flag) {
1272 			ret = arena_clear_freelist_error(arena, lane);
1273 			if (ret)
1274 				return ret;
1275 		}
1276 
1277 		len -= cur_len;
1278 		off += cur_len;
1279 		sector += btt->sector_size >> SECTOR_SHIFT;
1280 	}
1281 
1282 	return 0;
1283 
1284  out_map:
1285 	unlock_map(arena, premap);
1286  out_lane:
1287 	nd_region_release_lane(btt->nd_region, lane);
1288 	return ret;
1289 }
1290 
1291 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1292 			struct page *page, unsigned int len, unsigned int off,
1293 			bool is_write, sector_t sector)
1294 {
1295 	int ret;
1296 
1297 	if (!is_write) {
1298 		ret = btt_read_pg(btt, bip, page, off, sector, len);
1299 		flush_dcache_page(page);
1300 	} else {
1301 		flush_dcache_page(page);
1302 		ret = btt_write_pg(btt, bip, sector, page, off, len);
1303 	}
1304 
1305 	return ret;
1306 }
1307 
1308 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1309 {
1310 	struct bio_integrity_payload *bip = bio_integrity(bio);
1311 	struct btt *btt = q->queuedata;
1312 	struct bvec_iter iter;
1313 	unsigned long start;
1314 	struct bio_vec bvec;
1315 	int err = 0;
1316 	bool do_acct;
1317 
1318 	if (!bio_integrity_prep(bio))
1319 		return BLK_QC_T_NONE;
1320 
1321 	do_acct = nd_iostat_start(bio, &start);
1322 	bio_for_each_segment(bvec, bio, iter) {
1323 		unsigned int len = bvec.bv_len;
1324 
1325 		if (len > PAGE_SIZE || len < btt->sector_size ||
1326 				len % btt->sector_size) {
1327 			dev_err_ratelimited(&btt->nd_btt->dev,
1328 				"unaligned bio segment (len: %d)\n", len);
1329 			bio->bi_status = BLK_STS_IOERR;
1330 			break;
1331 		}
1332 
1333 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1334 				  op_is_write(bio_op(bio)), iter.bi_sector);
1335 		if (err) {
1336 			dev_err(&btt->nd_btt->dev,
1337 					"io error in %s sector %lld, len %d,\n",
1338 					(op_is_write(bio_op(bio))) ? "WRITE" :
1339 					"READ",
1340 					(unsigned long long) iter.bi_sector, len);
1341 			bio->bi_status = errno_to_blk_status(err);
1342 			break;
1343 		}
1344 	}
1345 	if (do_acct)
1346 		nd_iostat_end(bio, start);
1347 
1348 	bio_endio(bio);
1349 	return BLK_QC_T_NONE;
1350 }
1351 
1352 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1353 		struct page *page, bool is_write)
1354 {
1355 	struct btt *btt = bdev->bd_disk->private_data;
1356 	int rc;
1357 	unsigned int len;
1358 
1359 	len = hpage_nr_pages(page) * PAGE_SIZE;
1360 	rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
1361 	if (rc == 0)
1362 		page_endio(page, is_write, 0);
1363 
1364 	return rc;
1365 }
1366 
1367 
1368 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1369 {
1370 	/* some standard values */
1371 	geo->heads = 1 << 6;
1372 	geo->sectors = 1 << 5;
1373 	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1374 	return 0;
1375 }
1376 
1377 static const struct block_device_operations btt_fops = {
1378 	.owner =		THIS_MODULE,
1379 	.rw_page =		btt_rw_page,
1380 	.getgeo =		btt_getgeo,
1381 	.revalidate_disk =	nvdimm_revalidate_disk,
1382 };
1383 
1384 static int btt_blk_init(struct btt *btt)
1385 {
1386 	struct nd_btt *nd_btt = btt->nd_btt;
1387 	struct nd_namespace_common *ndns = nd_btt->ndns;
1388 
1389 	/* create a new disk and request queue for btt */
1390 	btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1391 	if (!btt->btt_queue)
1392 		return -ENOMEM;
1393 
1394 	btt->btt_disk = alloc_disk(0);
1395 	if (!btt->btt_disk) {
1396 		blk_cleanup_queue(btt->btt_queue);
1397 		return -ENOMEM;
1398 	}
1399 
1400 	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1401 	btt->btt_disk->first_minor = 0;
1402 	btt->btt_disk->fops = &btt_fops;
1403 	btt->btt_disk->private_data = btt;
1404 	btt->btt_disk->queue = btt->btt_queue;
1405 	btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1406 	btt->btt_disk->queue->backing_dev_info->capabilities |=
1407 			BDI_CAP_SYNCHRONOUS_IO;
1408 
1409 	blk_queue_make_request(btt->btt_queue, btt_make_request);
1410 	blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1411 	blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1412 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1413 	btt->btt_queue->queuedata = btt;
1414 
1415 	set_capacity(btt->btt_disk, 0);
1416 	device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1417 	if (btt_meta_size(btt)) {
1418 		int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1419 
1420 		if (rc) {
1421 			del_gendisk(btt->btt_disk);
1422 			put_disk(btt->btt_disk);
1423 			blk_cleanup_queue(btt->btt_queue);
1424 			return rc;
1425 		}
1426 	}
1427 	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1428 	btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1429 	revalidate_disk(btt->btt_disk);
1430 
1431 	return 0;
1432 }
1433 
1434 static void btt_blk_cleanup(struct btt *btt)
1435 {
1436 	del_gendisk(btt->btt_disk);
1437 	put_disk(btt->btt_disk);
1438 	blk_cleanup_queue(btt->btt_queue);
1439 }
1440 
1441 /**
1442  * btt_init - initialize a block translation table for the given device
1443  * @nd_btt:	device with BTT geometry and backing device info
1444  * @rawsize:	raw size in bytes of the backing device
1445  * @lbasize:	lba size of the backing device
1446  * @uuid:	A uuid for the backing device - this is stored on media
1447  * @maxlane:	maximum number of parallel requests the device can handle
1448  *
1449  * Initialize a Block Translation Table on a backing device to provide
1450  * single sector power fail atomicity.
1451  *
1452  * Context:
1453  * Might sleep.
1454  *
1455  * Returns:
1456  * Pointer to a new struct btt on success, NULL on failure.
1457  */
1458 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1459 		u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1460 {
1461 	int ret;
1462 	struct btt *btt;
1463 	struct nd_namespace_io *nsio;
1464 	struct device *dev = &nd_btt->dev;
1465 
1466 	btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1467 	if (!btt)
1468 		return NULL;
1469 
1470 	btt->nd_btt = nd_btt;
1471 	btt->rawsize = rawsize;
1472 	btt->lbasize = lbasize;
1473 	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1474 	INIT_LIST_HEAD(&btt->arena_list);
1475 	mutex_init(&btt->init_lock);
1476 	btt->nd_region = nd_region;
1477 	nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1478 	btt->phys_bb = &nsio->bb;
1479 
1480 	ret = discover_arenas(btt);
1481 	if (ret) {
1482 		dev_err(dev, "init: error in arena_discover: %d\n", ret);
1483 		return NULL;
1484 	}
1485 
1486 	if (btt->init_state != INIT_READY && nd_region->ro) {
1487 		dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1488 				dev_name(&nd_region->dev));
1489 		return NULL;
1490 	} else if (btt->init_state != INIT_READY) {
1491 		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1492 			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1493 		dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1494 				btt->num_arenas, rawsize);
1495 
1496 		ret = create_arenas(btt);
1497 		if (ret) {
1498 			dev_info(dev, "init: create_arenas: %d\n", ret);
1499 			return NULL;
1500 		}
1501 
1502 		ret = btt_meta_init(btt);
1503 		if (ret) {
1504 			dev_err(dev, "init: error in meta_init: %d\n", ret);
1505 			return NULL;
1506 		}
1507 	}
1508 
1509 	ret = btt_blk_init(btt);
1510 	if (ret) {
1511 		dev_err(dev, "init: error in blk_init: %d\n", ret);
1512 		return NULL;
1513 	}
1514 
1515 	btt_debugfs_init(btt);
1516 
1517 	return btt;
1518 }
1519 
1520 /**
1521  * btt_fini - de-initialize a BTT
1522  * @btt:	the BTT handle that was generated by btt_init
1523  *
1524  * De-initialize a Block Translation Table on device removal
1525  *
1526  * Context:
1527  * Might sleep.
1528  */
1529 static void btt_fini(struct btt *btt)
1530 {
1531 	if (btt) {
1532 		btt_blk_cleanup(btt);
1533 		free_arenas(btt);
1534 		debugfs_remove_recursive(btt->debugfs_dir);
1535 	}
1536 }
1537 
1538 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1539 {
1540 	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1541 	struct nd_region *nd_region;
1542 	struct btt_sb *btt_sb;
1543 	struct btt *btt;
1544 	size_t rawsize;
1545 
1546 	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1547 		dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1548 		return -ENODEV;
1549 	}
1550 
1551 	btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1552 	if (!btt_sb)
1553 		return -ENOMEM;
1554 
1555 	/*
1556 	 * If this returns < 0, that is ok as it just means there wasn't
1557 	 * an existing BTT, and we're creating a new one. We still need to
1558 	 * call this as we need the version dependent fields in nd_btt to be
1559 	 * set correctly based on the holder class
1560 	 */
1561 	nd_btt_version(nd_btt, ndns, btt_sb);
1562 
1563 	rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
1564 	if (rawsize < ARENA_MIN_SIZE) {
1565 		dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1566 				dev_name(&ndns->dev),
1567 				ARENA_MIN_SIZE + nd_btt->initial_offset);
1568 		return -ENXIO;
1569 	}
1570 	nd_region = to_nd_region(nd_btt->dev.parent);
1571 	btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1572 			nd_region);
1573 	if (!btt)
1574 		return -ENOMEM;
1575 	nd_btt->btt = btt;
1576 
1577 	return 0;
1578 }
1579 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1580 
1581 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1582 {
1583 	struct btt *btt = nd_btt->btt;
1584 
1585 	btt_fini(btt);
1586 	nd_btt->btt = NULL;
1587 
1588 	return 0;
1589 }
1590 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1591 
1592 static int __init nd_btt_init(void)
1593 {
1594 	int rc = 0;
1595 
1596 	debugfs_root = debugfs_create_dir("btt", NULL);
1597 	if (IS_ERR_OR_NULL(debugfs_root))
1598 		rc = -ENXIO;
1599 
1600 	return rc;
1601 }
1602 
1603 static void __exit nd_btt_exit(void)
1604 {
1605 	debugfs_remove_recursive(debugfs_root);
1606 }
1607 
1608 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1609 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1610 MODULE_LICENSE("GPL v2");
1611 module_init(nd_btt_init);
1612 module_exit(nd_btt_exit);
1613