xref: /linux/arch/x86/mm/iomap_32.c (revision 71fd68e7d234f6b7d8407c8f486764d24f8411f4)
1 /*
2  * Copyright © 2008 Ingo Molnar
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, write to the Free Software Foundation, Inc.,
16  * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #include <asm/iomap.h>
20 #include <asm/pat.h>
21 #include <linux/module.h>
22 #include <linux/highmem.h>
23 
24 static int is_io_mapping_possible(resource_size_t base, unsigned long size)
25 {
26 #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
27 	/* There is no way to map greater than 1 << 32 address without PAE */
28 	if (base + size > 0x100000000ULL)
29 		return 0;
30 #endif
31 	return 1;
32 }
33 
34 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35 {
36 	unsigned long flag = _PAGE_CACHE_WC;
37 	int ret;
38 
39 	if (!is_io_mapping_possible(base, size))
40 		return -EINVAL;
41 
42 	ret = io_reserve_memtype(base, base + size, &flag);
43 	if (ret)
44 		return ret;
45 
46 	*prot = __pgprot(__PAGE_KERNEL | flag);
47 	return 0;
48 }
49 EXPORT_SYMBOL_GPL(iomap_create_wc);
50 
51 void
52 iomap_free(resource_size_t base, unsigned long size)
53 {
54 	io_free_memtype(base, base + size);
55 }
56 EXPORT_SYMBOL_GPL(iomap_free);
57 
58 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
59 {
60 	enum fixed_addresses idx;
61 	unsigned long vaddr;
62 
63 	pagefault_disable();
64 
65 	debug_kmap_atomic(type);
66 	idx = type + KM_TYPE_NR * smp_processor_id();
67 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
68 	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
69 	arch_flush_lazy_mmu_mode();
70 
71 	return (void *)vaddr;
72 }
73 
74 /*
75  * Map 'pfn' using fixed map 'type' and protections 'prot'
76  */
77 void *
78 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
79 {
80 	/*
81 	 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
82 	 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
83 	 * MTRR is UC or WC.  UC_MINUS gets the real intention, of the
84 	 * user, which is "WC if the MTRR is WC, UC if you can't do that."
85 	 */
86 	if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 		prot = PAGE_KERNEL_UC_MINUS;
88 
89 	return kmap_atomic_prot_pfn(pfn, type, prot);
90 }
91 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
92 
93 void
94 iounmap_atomic(void *kvaddr, enum km_type type)
95 {
96 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
98 
99 	/*
100 	 * Force other mappings to Oops if they'll try to access this pte
101 	 * without first remap it.  Keeping stale mappings around is a bad idea
102 	 * also, in case the page changes cacheability attributes or becomes
103 	 * a protected page in a hypervisor.
104 	 */
105 	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
106 		kpte_clear_flush(kmap_pte-idx, vaddr);
107 
108 	pagefault_enable();
109 }
110 EXPORT_SYMBOL_GPL(iounmap_atomic);
111