xref: /linux/arch/x86/mm/ioremap.c (revision 33c2b803edd13487518a2c7d5002d84d7e9c878f)
1e64c8aa0SThomas Gleixner /*
2e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
3e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
4e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
5e64c8aa0SThomas Gleixner  *
6e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
7e64c8aa0SThomas Gleixner  */
8e64c8aa0SThomas Gleixner 
9e64c8aa0SThomas Gleixner #include <linux/bootmem.h>
10e64c8aa0SThomas Gleixner #include <linux/init.h>
11e64c8aa0SThomas Gleixner #include <linux/io.h>
129de94dbbSIngo Molnar #include <linux/ioport.h>
13e64c8aa0SThomas Gleixner #include <linux/slab.h>
14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
15d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
16e64c8aa0SThomas Gleixner 
17d1163651SLaura Abbott #include <asm/set_memory.h>
1866441bd3SIngo Molnar #include <asm/e820/api.h>
19e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
20e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
24e64c8aa0SThomas Gleixner 
2578c86e5eSJeremy Fitzhardinge #include "physaddr.h"
26e64c8aa0SThomas Gleixner 
27e64c8aa0SThomas Gleixner /*
28e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29e64c8aa0SThomas Gleixner  * conflicts.
30e64c8aa0SThomas Gleixner  */
313a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32b14097bdSJuergen Gross 			enum page_cache_mode pcm)
33e64c8aa0SThomas Gleixner {
34d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
3593809be8SHarvey Harrison 	int err;
36e64c8aa0SThomas Gleixner 
37b14097bdSJuergen Gross 	switch (pcm) {
38b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
39d806e5eeSThomas Gleixner 	default:
401219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
41d806e5eeSThomas Gleixner 		break;
42b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
43b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
44b310f381Svenkatesh.pallipadi@intel.com 		break;
45623dffb2SToshi Kani 	case _PAGE_CACHE_MODE_WT:
46623dffb2SToshi Kani 		err = _set_memory_wt(vaddr, nrpages);
47623dffb2SToshi Kani 		break;
48b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
491219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
50d806e5eeSThomas Gleixner 		break;
51d806e5eeSThomas Gleixner 	}
52e64c8aa0SThomas Gleixner 
53e64c8aa0SThomas Gleixner 	return err;
54e64c8aa0SThomas Gleixner }
55e64c8aa0SThomas Gleixner 
56c81c8a1eSRoland Dreier static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
57c81c8a1eSRoland Dreier 			       void *arg)
58c81c8a1eSRoland Dreier {
59c81c8a1eSRoland Dreier 	unsigned long i;
60c81c8a1eSRoland Dreier 
61c81c8a1eSRoland Dreier 	for (i = 0; i < nr_pages; ++i)
62c81c8a1eSRoland Dreier 		if (pfn_valid(start_pfn + i) &&
63c81c8a1eSRoland Dreier 		    !PageReserved(pfn_to_page(start_pfn + i)))
64c81c8a1eSRoland Dreier 			return 1;
65c81c8a1eSRoland Dreier 
66c81c8a1eSRoland Dreier 	return 0;
67c81c8a1eSRoland Dreier }
68c81c8a1eSRoland Dreier 
69e64c8aa0SThomas Gleixner /*
70e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
715d72b4fbSToshi Kani  * address space. It transparently creates kernel huge I/O mapping when
725d72b4fbSToshi Kani  * the physical address is aligned by a huge page size (1GB or 2MB) and
735d72b4fbSToshi Kani  * the requested size is at least the huge page size.
745d72b4fbSToshi Kani  *
755d72b4fbSToshi Kani  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
765d72b4fbSToshi Kani  * Therefore, the mapping code falls back to use a smaller page toward 4KB
775d72b4fbSToshi Kani  * when a mapping range is covered by non-WB type of MTRRs.
78e64c8aa0SThomas Gleixner  *
79e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
80e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
81e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
82e64c8aa0SThomas Gleixner  */
8323016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr,
84b14097bdSJuergen Gross 		unsigned long size, enum page_cache_mode pcm, void *caller)
85e64c8aa0SThomas Gleixner {
86ffa71f33SKenji Kaneshige 	unsigned long offset, vaddr;
87ffa71f33SKenji Kaneshige 	resource_size_t pfn, last_pfn, last_addr;
8887e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
8987e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
90e64c8aa0SThomas Gleixner 	struct vm_struct *area;
91b14097bdSJuergen Gross 	enum page_cache_mode new_pcm;
92d806e5eeSThomas Gleixner 	pgprot_t prot;
93dee7cbb2SVenki Pallipadi 	int retval;
94d61fc448SPekka Paalanen 	void __iomem *ret_addr;
95e64c8aa0SThomas Gleixner 
96e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
97e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
98e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
99e64c8aa0SThomas Gleixner 		return NULL;
100e64c8aa0SThomas Gleixner 
101e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
1026997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
1034c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
104e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
105e3100c82SThomas Gleixner 		return NULL;
106e3100c82SThomas Gleixner 	}
107e3100c82SThomas Gleixner 
108e64c8aa0SThomas Gleixner 	/*
109e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
110e64c8aa0SThomas Gleixner 	 */
111c81c8a1eSRoland Dreier 	pfn      = phys_addr >> PAGE_SHIFT;
112ffa71f33SKenji Kaneshige 	last_pfn = last_addr >> PAGE_SHIFT;
113c81c8a1eSRoland Dreier 	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
1141c9cf9b2SToshi Kani 					  __ioremap_check_ram) == 1) {
1158a0a5da6SThomas Gleixner 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
1168a0a5da6SThomas Gleixner 			  &phys_addr, &last_addr);
117e64c8aa0SThomas Gleixner 		return NULL;
118906e36c5SMike Travis 	}
1199a58eebeSToshi Kani 
120d7677d40Svenkatesh.pallipadi@intel.com 	/*
121d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
122d7677d40Svenkatesh.pallipadi@intel.com 	 */
123d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
124ffa71f33SKenji Kaneshige 	phys_addr &= PHYSICAL_PAGE_MASK;
125d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
126d7677d40Svenkatesh.pallipadi@intel.com 
127e213e877SAndi Kleen 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
128e00c8cc9SJuergen Gross 						pcm, &new_pcm);
129dee7cbb2SVenki Pallipadi 	if (retval) {
130279e669bSVenkatesh Pallipadi 		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
131dee7cbb2SVenki Pallipadi 		return NULL;
132dee7cbb2SVenki Pallipadi 	}
133dee7cbb2SVenki Pallipadi 
134b14097bdSJuergen Gross 	if (pcm != new_pcm) {
135b14097bdSJuergen Gross 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
136279e669bSVenkatesh Pallipadi 			printk(KERN_ERR
137b14097bdSJuergen Gross 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
1384c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
1394c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
140b14097bdSJuergen Gross 				pcm, new_pcm);
141de2a47cfSXiaotian Feng 			goto err_free_memtype;
142d7677d40Svenkatesh.pallipadi@intel.com 		}
143b14097bdSJuergen Gross 		pcm = new_pcm;
144d7677d40Svenkatesh.pallipadi@intel.com 	}
145d7677d40Svenkatesh.pallipadi@intel.com 
146be43d728SJeremy Fitzhardinge 	prot = PAGE_KERNEL_IO;
147b14097bdSJuergen Gross 	switch (pcm) {
148b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
149b14097bdSJuergen Gross 	default:
150b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
151b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC));
152b14097bdSJuergen Gross 		break;
153b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC_MINUS:
154b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
155b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
156b14097bdSJuergen Gross 		break;
157b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
158b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
159b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_WC));
160b14097bdSJuergen Gross 		break;
161d838270eSToshi Kani 	case _PAGE_CACHE_MODE_WT:
162d838270eSToshi Kani 		prot = __pgprot(pgprot_val(prot) |
163d838270eSToshi Kani 				cachemode2protval(_PAGE_CACHE_MODE_WT));
164d838270eSToshi Kani 		break;
165b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
166d806e5eeSThomas Gleixner 		break;
167d806e5eeSThomas Gleixner 	}
168e64c8aa0SThomas Gleixner 
169e64c8aa0SThomas Gleixner 	/*
170e64c8aa0SThomas Gleixner 	 * Ok, go for it..
171e64c8aa0SThomas Gleixner 	 */
17223016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
173e64c8aa0SThomas Gleixner 	if (!area)
174de2a47cfSXiaotian Feng 		goto err_free_memtype;
175e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
176e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
17743a432b1SSuresh Siddha 
178b14097bdSJuergen Gross 	if (kernel_map_sync_memtype(phys_addr, size, pcm))
179de2a47cfSXiaotian Feng 		goto err_free_area;
180e64c8aa0SThomas Gleixner 
181de2a47cfSXiaotian Feng 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
182de2a47cfSXiaotian Feng 		goto err_free_area;
183e64c8aa0SThomas Gleixner 
184d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
18587e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
186d61fc448SPekka Paalanen 
187c7a7b814STim Gardner 	/*
188c7a7b814STim Gardner 	 * Check if the request spans more than any BAR in the iomem resource
189c7a7b814STim Gardner 	 * tree.
190c7a7b814STim Gardner 	 */
1919abb0ecdSLaura Abbott 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
1929abb0ecdSLaura Abbott 		pr_warn("caller %pS mapping multiple BARs\n", caller);
193c7a7b814STim Gardner 
194d61fc448SPekka Paalanen 	return ret_addr;
195de2a47cfSXiaotian Feng err_free_area:
196de2a47cfSXiaotian Feng 	free_vm_area(area);
197de2a47cfSXiaotian Feng err_free_memtype:
198de2a47cfSXiaotian Feng 	free_memtype(phys_addr, phys_addr + size);
199de2a47cfSXiaotian Feng 	return NULL;
200e64c8aa0SThomas Gleixner }
201e64c8aa0SThomas Gleixner 
202e64c8aa0SThomas Gleixner /**
203e64c8aa0SThomas Gleixner  * ioremap_nocache     -   map bus memory into CPU space
2049efc31b8SWanpeng Li  * @phys_addr:    bus address of the memory
205e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
206e64c8aa0SThomas Gleixner  *
207e64c8aa0SThomas Gleixner  * ioremap_nocache performs a platform specific sequence of operations to
208e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
209e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
210e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
211e64c8aa0SThomas Gleixner  * address.
212e64c8aa0SThomas Gleixner  *
213e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
214e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
215e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
216e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
217e64c8aa0SThomas Gleixner  *
218e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
219e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
220e64c8aa0SThomas Gleixner  *
221e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
222e64c8aa0SThomas Gleixner  */
223b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
224e64c8aa0SThomas Gleixner {
225de33c442SSuresh Siddha 	/*
226de33c442SSuresh Siddha 	 * Ideally, this should be:
227cb32edf6SLuis R. Rodriguez 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
228de33c442SSuresh Siddha 	 *
229de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
230e4b6be33SLuis R. Rodriguez 	 * UC MINUS. Drivers that are certain they need or can already
231e4b6be33SLuis R. Rodriguez 	 * be converted over to strong UC can use ioremap_uc().
232de33c442SSuresh Siddha 	 */
233b14097bdSJuergen Gross 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
234de33c442SSuresh Siddha 
235b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, pcm,
23623016969SChristoph Lameter 				__builtin_return_address(0));
237e64c8aa0SThomas Gleixner }
238e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache);
239e64c8aa0SThomas Gleixner 
240b310f381Svenkatesh.pallipadi@intel.com /**
241e4b6be33SLuis R. Rodriguez  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
242e4b6be33SLuis R. Rodriguez  * @phys_addr:    bus address of the memory
243e4b6be33SLuis R. Rodriguez  * @size:      size of the resource to map
244e4b6be33SLuis R. Rodriguez  *
245e4b6be33SLuis R. Rodriguez  * ioremap_uc performs a platform specific sequence of operations to
246e4b6be33SLuis R. Rodriguez  * make bus memory CPU accessible via the readb/readw/readl/writeb/
247e4b6be33SLuis R. Rodriguez  * writew/writel functions and the other mmio helpers. The returned
248e4b6be33SLuis R. Rodriguez  * address is not guaranteed to be usable directly as a virtual
249e4b6be33SLuis R. Rodriguez  * address.
250e4b6be33SLuis R. Rodriguez  *
251e4b6be33SLuis R. Rodriguez  * This version of ioremap ensures that the memory is marked with a strong
252e4b6be33SLuis R. Rodriguez  * preference as completely uncachable on the CPU when possible. For non-PAT
253e4b6be33SLuis R. Rodriguez  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
254e4b6be33SLuis R. Rodriguez  * systems this will set the PAT entry for the pages as strong UC.  This call
255e4b6be33SLuis R. Rodriguez  * will honor existing caching rules from things like the PCI bus. Note that
256e4b6be33SLuis R. Rodriguez  * there are other caches and buffers on many busses. In particular driver
257e4b6be33SLuis R. Rodriguez  * authors should read up on PCI writes.
258e4b6be33SLuis R. Rodriguez  *
259e4b6be33SLuis R. Rodriguez  * It's useful if some control registers are in such an area and
260e4b6be33SLuis R. Rodriguez  * write combining or read caching is not desirable:
261e4b6be33SLuis R. Rodriguez  *
262e4b6be33SLuis R. Rodriguez  * Must be freed with iounmap.
263e4b6be33SLuis R. Rodriguez  */
264e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
265e4b6be33SLuis R. Rodriguez {
266e4b6be33SLuis R. Rodriguez 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
267e4b6be33SLuis R. Rodriguez 
268e4b6be33SLuis R. Rodriguez 	return __ioremap_caller(phys_addr, size, pcm,
269e4b6be33SLuis R. Rodriguez 				__builtin_return_address(0));
270e4b6be33SLuis R. Rodriguez }
271e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
272e4b6be33SLuis R. Rodriguez 
273e4b6be33SLuis R. Rodriguez /**
274b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
2759efc31b8SWanpeng Li  * @phys_addr:	bus address of the memory
276b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
277b310f381Svenkatesh.pallipadi@intel.com  *
278b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
279b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
280b310f381Svenkatesh.pallipadi@intel.com  *
281b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
282b310f381Svenkatesh.pallipadi@intel.com  */
283d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
284b310f381Svenkatesh.pallipadi@intel.com {
285b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
28623016969SChristoph Lameter 					__builtin_return_address(0));
287b310f381Svenkatesh.pallipadi@intel.com }
288b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
289b310f381Svenkatesh.pallipadi@intel.com 
290d838270eSToshi Kani /**
291d838270eSToshi Kani  * ioremap_wt	-	map memory into CPU space write through
292d838270eSToshi Kani  * @phys_addr:	bus address of the memory
293d838270eSToshi Kani  * @size:	size of the resource to map
294d838270eSToshi Kani  *
295d838270eSToshi Kani  * This version of ioremap ensures that the memory is marked write through.
296d838270eSToshi Kani  * Write through stores data into memory while keeping the cache up-to-date.
297d838270eSToshi Kani  *
298d838270eSToshi Kani  * Must be freed with iounmap.
299d838270eSToshi Kani  */
300d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
301d838270eSToshi Kani {
302d838270eSToshi Kani 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
303d838270eSToshi Kani 					__builtin_return_address(0));
304d838270eSToshi Kani }
305d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt);
306d838270eSToshi Kani 
307b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
3085f868152SThomas Gleixner {
309b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
31023016969SChristoph Lameter 				__builtin_return_address(0));
3115f868152SThomas Gleixner }
3125f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
3135f868152SThomas Gleixner 
31428b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
31528b2ee20SRik van Riel 				unsigned long prot_val)
31628b2ee20SRik van Riel {
317b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size,
318b14097bdSJuergen Gross 				pgprot2cachemode(__pgprot(prot_val)),
31928b2ee20SRik van Riel 				__builtin_return_address(0));
32028b2ee20SRik van Riel }
32128b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
32228b2ee20SRik van Riel 
323e64c8aa0SThomas Gleixner /**
324e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
325e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
326e64c8aa0SThomas Gleixner  *
327e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
328e64c8aa0SThomas Gleixner  */
329e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
330e64c8aa0SThomas Gleixner {
331e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
332e64c8aa0SThomas Gleixner 
333e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
334e64c8aa0SThomas Gleixner 		return;
335e64c8aa0SThomas Gleixner 
336e64c8aa0SThomas Gleixner 	/*
337*33c2b803STom Lendacky 	 * The PCI/ISA range special-casing was removed from __ioremap()
338*33c2b803STom Lendacky 	 * so this check, in theory, can be removed. However, there are
339*33c2b803STom Lendacky 	 * cases where iounmap() is called for addresses not obtained via
340*33c2b803STom Lendacky 	 * ioremap() (vga16fb for example). Add a warning so that these
341*33c2b803STom Lendacky 	 * cases can be caught and fixed.
342e64c8aa0SThomas Gleixner 	 */
3436e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
344*33c2b803STom Lendacky 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
345*33c2b803STom Lendacky 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
346e64c8aa0SThomas Gleixner 		return;
347*33c2b803STom Lendacky 	}
348e64c8aa0SThomas Gleixner 
349e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
350e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
351e64c8aa0SThomas Gleixner 
352d61fc448SPekka Paalanen 	mmiotrace_iounmap(addr);
353d61fc448SPekka Paalanen 
354e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
355e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
356e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
357e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
358e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
359ef932473SJoonsoo Kim 	p = find_vm_area((void __force *)addr);
360e64c8aa0SThomas Gleixner 
361e64c8aa0SThomas Gleixner 	if (!p) {
362e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
363e64c8aa0SThomas Gleixner 		dump_stack();
364e64c8aa0SThomas Gleixner 		return;
365e64c8aa0SThomas Gleixner 	}
366e64c8aa0SThomas Gleixner 
367d7677d40Svenkatesh.pallipadi@intel.com 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
368d7677d40Svenkatesh.pallipadi@intel.com 
369e64c8aa0SThomas Gleixner 	/* Finally remove it */
3706e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
371e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
372e64c8aa0SThomas Gleixner 	kfree(p);
373e64c8aa0SThomas Gleixner }
374e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
375e64c8aa0SThomas Gleixner 
3761e6277deSJan Beulich int __init arch_ioremap_pud_supported(void)
3775d72b4fbSToshi Kani {
3785d72b4fbSToshi Kani #ifdef CONFIG_X86_64
379b8291adcSBorislav Petkov 	return boot_cpu_has(X86_FEATURE_GBPAGES);
3805d72b4fbSToshi Kani #else
3815d72b4fbSToshi Kani 	return 0;
3825d72b4fbSToshi Kani #endif
3835d72b4fbSToshi Kani }
3845d72b4fbSToshi Kani 
3851e6277deSJan Beulich int __init arch_ioremap_pmd_supported(void)
3865d72b4fbSToshi Kani {
38716bf9226SBorislav Petkov 	return boot_cpu_has(X86_FEATURE_PSE);
3885d72b4fbSToshi Kani }
3895d72b4fbSToshi Kani 
390e045fb2aSvenkatesh.pallipadi@intel.com /*
391e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
392e045fb2aSvenkatesh.pallipadi@intel.com  * access
393e045fb2aSvenkatesh.pallipadi@intel.com  */
3944707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
395e045fb2aSvenkatesh.pallipadi@intel.com {
396e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start  = phys &  PAGE_MASK;
39794d4b476SIngo Molnar 	unsigned long offset = phys & ~PAGE_MASK;
398562bfca4SIngo Molnar 	void *vaddr;
399e045fb2aSvenkatesh.pallipadi@intel.com 
400e045fb2aSvenkatesh.pallipadi@intel.com 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
401e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(start >> PAGE_SHIFT))
402e045fb2aSvenkatesh.pallipadi@intel.com 		return __va(phys);
403e045fb2aSvenkatesh.pallipadi@intel.com 
404562bfca4SIngo Molnar 	vaddr = ioremap_cache(start, PAGE_SIZE);
40594d4b476SIngo Molnar 	/* Only add the offset on success and return NULL if the ioremap() failed: */
40694d4b476SIngo Molnar 	if (vaddr)
40794d4b476SIngo Molnar 		vaddr += offset;
408e045fb2aSvenkatesh.pallipadi@intel.com 
409562bfca4SIngo Molnar 	return vaddr;
410e045fb2aSvenkatesh.pallipadi@intel.com }
411e045fb2aSvenkatesh.pallipadi@intel.com 
4124707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
413e045fb2aSvenkatesh.pallipadi@intel.com {
414e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(phys >> PAGE_SHIFT))
415e045fb2aSvenkatesh.pallipadi@intel.com 		return;
416e045fb2aSvenkatesh.pallipadi@intel.com 
417e045fb2aSvenkatesh.pallipadi@intel.com 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
418e045fb2aSvenkatesh.pallipadi@intel.com }
419e045fb2aSvenkatesh.pallipadi@intel.com 
42045c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
421e64c8aa0SThomas Gleixner 
422551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
423e64c8aa0SThomas Gleixner {
42437cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
4256c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
42637cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
427e0c4f675SKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, addr);
428e0c4f675SKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, addr);
429551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
430551889a6SIan Campbell 
431551889a6SIan Campbell 	return pmd;
432e64c8aa0SThomas Gleixner }
433e64c8aa0SThomas Gleixner 
434551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
435e64c8aa0SThomas Gleixner {
436551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
437e64c8aa0SThomas Gleixner }
438e64c8aa0SThomas Gleixner 
439fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
440fef5ba79SJeremy Fitzhardinge {
441fef5ba79SJeremy Fitzhardinge 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
442fef5ba79SJeremy Fitzhardinge }
443fef5ba79SJeremy Fitzhardinge 
444e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
445e64c8aa0SThomas Gleixner {
446551889a6SIan Campbell 	pmd_t *pmd;
447e64c8aa0SThomas Gleixner 
44873159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
44973159fdcSAndy Lutomirski 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
45073159fdcSAndy Lutomirski #else
45173159fdcSAndy Lutomirski 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
45273159fdcSAndy Lutomirski #endif
45373159fdcSAndy Lutomirski 
4545b7c73e0SMark Salter 	early_ioremap_setup();
4558827247fSWang Chen 
456551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
457e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
458b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
459551889a6SIan Campbell 
460e64c8aa0SThomas Gleixner 	/*
461551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
462e64c8aa0SThomas Gleixner 	 * we are not prepared:
463e64c8aa0SThomas Gleixner 	 */
464499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
465499a5f1eSJan Beulich 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
466499a5f1eSJan Beulich 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
467499a5f1eSJan Beulich #undef __FIXADDR_TOP
468551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
469e64c8aa0SThomas Gleixner 		WARN_ON(1);
470551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
471551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
472e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
473e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
474e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
475e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
476e64c8aa0SThomas Gleixner 
477e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
478e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
479e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
480e64c8aa0SThomas Gleixner 	}
481e64c8aa0SThomas Gleixner }
482e64c8aa0SThomas Gleixner 
4835b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
4849b987aebSMasami Hiramatsu 			       phys_addr_t phys, pgprot_t flags)
485e64c8aa0SThomas Gleixner {
486551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
487551889a6SIan Campbell 	pte_t *pte;
488e64c8aa0SThomas Gleixner 
489e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
490e64c8aa0SThomas Gleixner 		BUG();
491e64c8aa0SThomas Gleixner 		return;
492e64c8aa0SThomas Gleixner 	}
493e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
4944583ed51SJeremy Fitzhardinge 
495e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
496551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
497e64c8aa0SThomas Gleixner 	else
4984f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
499e64c8aa0SThomas Gleixner 	__flush_tlb_one(addr);
500e64c8aa0SThomas Gleixner }
501