xref: /linux/arch/x86/mm/iomap_32.c (revision 5e3c6a312a0946d2d83e32359612cbb925a8bed0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright © 2008 Ingo Molnar
4  */
5 
6 #include <asm/iomap.h>
7 #include <asm/memtype.h>
8 #include <linux/export.h>
9 #include <linux/highmem.h>
10 
11 static int is_io_mapping_possible(resource_size_t base, unsigned long size)
12 {
13 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
14 	/* There is no way to map greater than 1 << 32 address without PAE */
15 	if (base + size > 0x100000000ULL)
16 		return 0;
17 #endif
18 	return 1;
19 }
20 
21 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
22 {
23 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
24 	int ret;
25 
26 	if (!is_io_mapping_possible(base, size))
27 		return -EINVAL;
28 
29 	ret = memtype_reserve_io(base, base + size, &pcm);
30 	if (ret)
31 		return ret;
32 
33 	*prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
34 	/* Filter out unsupported __PAGE_KERNEL* bits: */
35 	pgprot_val(*prot) &= __default_kernel_pte_mask;
36 
37 	return 0;
38 }
39 EXPORT_SYMBOL_GPL(iomap_create_wc);
40 
41 void iomap_free(resource_size_t base, unsigned long size)
42 {
43 	memtype_free_io(base, base + size);
44 }
45 EXPORT_SYMBOL_GPL(iomap_free);
46 
47 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
48 {
49 	unsigned long vaddr;
50 	int idx, type;
51 
52 	preempt_disable();
53 	pagefault_disable();
54 
55 	type = kmap_atomic_idx_push();
56 	idx = type + KM_TYPE_NR * smp_processor_id();
57 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58 	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
59 	arch_flush_lazy_mmu_mode();
60 
61 	return (void *)vaddr;
62 }
63 
64 /*
65  * Map 'pfn' using protections 'prot'
66  */
67 void __iomem *
68 iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
69 {
70 	/*
71 	 * For non-PAT systems, translate non-WB request to UC- just in
72 	 * case the caller set the PWT bit to prot directly without using
73 	 * pgprot_writecombine(). UC- translates to uncached if the MTRR
74 	 * is UC or WC. UC- gets the real intention, of the user, which is
75 	 * "WC if the MTRR is WC, UC if you can't do that."
76 	 */
77 	if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
78 		prot = __pgprot(__PAGE_KERNEL |
79 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
80 
81 	/* Filter out unsupported __PAGE_KERNEL* bits: */
82 	pgprot_val(prot) &= __default_kernel_pte_mask;
83 
84 	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
85 }
86 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
87 
88 void
89 iounmap_atomic(void __iomem *kvaddr)
90 {
91 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
92 
93 	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
94 	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
95 		int idx, type;
96 
97 		type = kmap_atomic_idx();
98 		idx = type + KM_TYPE_NR * smp_processor_id();
99 
100 #ifdef CONFIG_DEBUG_HIGHMEM
101 		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
102 #endif
103 		/*
104 		 * Force other mappings to Oops if they'll try to access this
105 		 * pte without first remap it.  Keeping stale mappings around
106 		 * is a bad idea also, in case the page changes cacheability
107 		 * attributes or becomes a protected page in a hypervisor.
108 		 */
109 		kpte_clear_flush(kmap_pte-idx, vaddr);
110 		kmap_atomic_idx_pop();
111 	}
112 
113 	pagefault_enable();
114 	preempt_enable();
115 }
116 EXPORT_SYMBOL_GPL(iounmap_atomic);
117