1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014-2016, Intel Corporation.
4 */
5 #include "test/nfit_test.h"
6 #include <linux/blkdev.h>
7 #include <linux/dax.h>
8 #include <pmem.h>
9 #include <nd.h>
10
__pmem_direct_access(struct pmem_device * pmem,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)11 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
12 long nr_pages, enum dax_access_mode mode, void **kaddr,
13 pfn_t *pfn)
14 {
15 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
16
17 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
18 PFN_PHYS(nr_pages))))
19 return -EIO;
20
21 /*
22 * Limit dax to a single page at a time given vmalloc()-backed
23 * in the nfit_test case.
24 */
25 if (get_nfit_res(pmem->phys_addr + offset)) {
26 struct page *page;
27
28 if (kaddr)
29 *kaddr = pmem->virt_addr + offset;
30 page = vmalloc_to_page(pmem->virt_addr + offset);
31 if (pfn)
32 *pfn = page_to_pfn_t(page);
33 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
34 __func__, pmem, pgoff, page_to_pfn(page));
35
36 return 1;
37 }
38
39 if (kaddr)
40 *kaddr = pmem->virt_addr + offset;
41 if (pfn)
42 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
43
44 /*
45 * If badblocks are present, limit known good range to the
46 * requested range.
47 */
48 if (unlikely(pmem->bb.count))
49 return nr_pages;
50 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
51 }
52