xref: /linux/arch/x86/mm/iomap_32.c (revision f3539c12d8196ce0a1993364d30b3a18908470d1)
1 /*
2  * Copyright © 2008 Ingo Molnar
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, write to the Free Software Foundation, Inc.,
16  * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #include <asm/iomap.h>
20 #include <asm/pat.h>
21 #include <linux/export.h>
22 #include <linux/highmem.h>
23 
24 static int is_io_mapping_possible(resource_size_t base, unsigned long size)
25 {
26 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
27 	/* There is no way to map greater than 1 << 32 address without PAE */
28 	if (base + size > 0x100000000ULL)
29 		return 0;
30 #endif
31 	return 1;
32 }
33 
34 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35 {
36 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
37 	int ret;
38 
39 	if (!is_io_mapping_possible(base, size))
40 		return -EINVAL;
41 
42 	ret = io_reserve_memtype(base, base + size, &pcm);
43 	if (ret)
44 		return ret;
45 
46 	*prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
47 	return 0;
48 }
49 EXPORT_SYMBOL_GPL(iomap_create_wc);
50 
51 void iomap_free(resource_size_t base, unsigned long size)
52 {
53 	io_free_memtype(base, base + size);
54 }
55 EXPORT_SYMBOL_GPL(iomap_free);
56 
57 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
58 {
59 	unsigned long vaddr;
60 	int idx, type;
61 
62 	preempt_disable();
63 	pagefault_disable();
64 
65 	type = kmap_atomic_idx_push();
66 	idx = type + KM_TYPE_NR * smp_processor_id();
67 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
68 	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
69 	arch_flush_lazy_mmu_mode();
70 
71 	return (void *)vaddr;
72 }
73 
74 /*
75  * Map 'pfn' using protections 'prot'
76  */
77 void __iomem *
78 iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
79 {
80 	/*
81 	 * For non-PAT systems, translate non-WB request to UC- just in
82 	 * case the caller set the PWT bit to prot directly without using
83 	 * pgprot_writecombine(). UC- translates to uncached if the MTRR
84 	 * is UC or WC. UC- gets the real intention, of the user, which is
85 	 * "WC if the MTRR is WC, UC if you can't do that."
86 	 */
87 	if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
88 		prot = __pgprot(__PAGE_KERNEL |
89 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
90 
91 	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
92 }
93 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
94 
95 void
96 iounmap_atomic(void __iomem *kvaddr)
97 {
98 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
99 
100 	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
101 	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
102 		int idx, type;
103 
104 		type = kmap_atomic_idx();
105 		idx = type + KM_TYPE_NR * smp_processor_id();
106 
107 #ifdef CONFIG_DEBUG_HIGHMEM
108 		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
109 #endif
110 		/*
111 		 * Force other mappings to Oops if they'll try to access this
112 		 * pte without first remap it.  Keeping stale mappings around
113 		 * is a bad idea also, in case the page changes cacheability
114 		 * attributes or becomes a protected page in a hypervisor.
115 		 */
116 		kpte_clear_flush(kmap_pte-idx, vaddr);
117 		kmap_atomic_idx_pop();
118 	}
119 
120 	pagefault_enable();
121 	preempt_enable();
122 }
123 EXPORT_SYMBOL_GPL(iounmap_atomic);
124