xref: /linux/arch/x86/mm/ioremap.c (revision e4b6be33c28923d8cde53023e0888b1c5d1a9027)
1e64c8aa0SThomas Gleixner /*
2e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
3e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
4e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
5e64c8aa0SThomas Gleixner  *
6e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
7e64c8aa0SThomas Gleixner  */
8e64c8aa0SThomas Gleixner 
9e64c8aa0SThomas Gleixner #include <linux/bootmem.h>
10e64c8aa0SThomas Gleixner #include <linux/init.h>
11e64c8aa0SThomas Gleixner #include <linux/io.h>
12e64c8aa0SThomas Gleixner #include <linux/module.h>
13e64c8aa0SThomas Gleixner #include <linux/slab.h>
14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
15d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
16e64c8aa0SThomas Gleixner 
17e64c8aa0SThomas Gleixner #include <asm/cacheflush.h>
18e64c8aa0SThomas Gleixner #include <asm/e820.h>
19e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
20e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
24e64c8aa0SThomas Gleixner 
2578c86e5eSJeremy Fitzhardinge #include "physaddr.h"
26e64c8aa0SThomas Gleixner 
27e64c8aa0SThomas Gleixner /*
28e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29e64c8aa0SThomas Gleixner  * conflicts.
30e64c8aa0SThomas Gleixner  */
313a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32b14097bdSJuergen Gross 			enum page_cache_mode pcm)
33e64c8aa0SThomas Gleixner {
34d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
3593809be8SHarvey Harrison 	int err;
36e64c8aa0SThomas Gleixner 
37b14097bdSJuergen Gross 	switch (pcm) {
38b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
39d806e5eeSThomas Gleixner 	default:
401219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
41d806e5eeSThomas Gleixner 		break;
42b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
43b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
44b310f381Svenkatesh.pallipadi@intel.com 		break;
45b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
461219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
47d806e5eeSThomas Gleixner 		break;
48d806e5eeSThomas Gleixner 	}
49e64c8aa0SThomas Gleixner 
50e64c8aa0SThomas Gleixner 	return err;
51e64c8aa0SThomas Gleixner }
52e64c8aa0SThomas Gleixner 
53c81c8a1eSRoland Dreier static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54c81c8a1eSRoland Dreier 			       void *arg)
55c81c8a1eSRoland Dreier {
56c81c8a1eSRoland Dreier 	unsigned long i;
57c81c8a1eSRoland Dreier 
58c81c8a1eSRoland Dreier 	for (i = 0; i < nr_pages; ++i)
59c81c8a1eSRoland Dreier 		if (pfn_valid(start_pfn + i) &&
60c81c8a1eSRoland Dreier 		    !PageReserved(pfn_to_page(start_pfn + i)))
61c81c8a1eSRoland Dreier 			return 1;
62c81c8a1eSRoland Dreier 
63c81c8a1eSRoland Dreier 	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64c81c8a1eSRoland Dreier 
65c81c8a1eSRoland Dreier 	return 0;
66c81c8a1eSRoland Dreier }
67c81c8a1eSRoland Dreier 
68e64c8aa0SThomas Gleixner /*
69e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
705d72b4fbSToshi Kani  * address space. It transparently creates kernel huge I/O mapping when
715d72b4fbSToshi Kani  * the physical address is aligned by a huge page size (1GB or 2MB) and
725d72b4fbSToshi Kani  * the requested size is at least the huge page size.
735d72b4fbSToshi Kani  *
745d72b4fbSToshi Kani  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
755d72b4fbSToshi Kani  * Therefore, the mapping code falls back to use a smaller page toward 4KB
765d72b4fbSToshi Kani  * when a mapping range is covered by non-WB type of MTRRs.
77e64c8aa0SThomas Gleixner  *
78e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
79e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
80e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
81e64c8aa0SThomas Gleixner  */
8223016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83b14097bdSJuergen Gross 		unsigned long size, enum page_cache_mode pcm, void *caller)
84e64c8aa0SThomas Gleixner {
85ffa71f33SKenji Kaneshige 	unsigned long offset, vaddr;
86ffa71f33SKenji Kaneshige 	resource_size_t pfn, last_pfn, last_addr;
8787e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
8887e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
89e64c8aa0SThomas Gleixner 	struct vm_struct *area;
90b14097bdSJuergen Gross 	enum page_cache_mode new_pcm;
91d806e5eeSThomas Gleixner 	pgprot_t prot;
92dee7cbb2SVenki Pallipadi 	int retval;
93d61fc448SPekka Paalanen 	void __iomem *ret_addr;
94906e36c5SMike Travis 	int ram_region;
95e64c8aa0SThomas Gleixner 
96e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
97e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
98e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
99e64c8aa0SThomas Gleixner 		return NULL;
100e64c8aa0SThomas Gleixner 
101e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
1026997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
1034c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
104e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
105e3100c82SThomas Gleixner 		return NULL;
106e3100c82SThomas Gleixner 	}
107e3100c82SThomas Gleixner 
108e64c8aa0SThomas Gleixner 	/*
109e64c8aa0SThomas Gleixner 	 * Don't remap the low PCI/ISA area, it's always mapped..
110e64c8aa0SThomas Gleixner 	 */
111bcc643dcSAndreas Herrmann 	if (is_ISA_range(phys_addr, last_addr))
112e64c8aa0SThomas Gleixner 		return (__force void __iomem *)phys_to_virt(phys_addr);
113e64c8aa0SThomas Gleixner 
114e64c8aa0SThomas Gleixner 	/*
115e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
116e64c8aa0SThomas Gleixner 	 */
117906e36c5SMike Travis 	/* First check if whole region can be identified as RAM or not */
118906e36c5SMike Travis 	ram_region = region_is_ram(phys_addr, size);
119906e36c5SMike Travis 	if (ram_region > 0) {
120906e36c5SMike Travis 		WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
121906e36c5SMike Travis 				(unsigned long int)phys_addr,
122906e36c5SMike Travis 				(unsigned long int)last_addr);
123906e36c5SMike Travis 		return NULL;
124906e36c5SMike Travis 	}
125906e36c5SMike Travis 
126906e36c5SMike Travis 	/* If could not be identified(-1), check page by page */
127906e36c5SMike Travis 	if (ram_region < 0) {
128c81c8a1eSRoland Dreier 		pfn      = phys_addr >> PAGE_SHIFT;
129ffa71f33SKenji Kaneshige 		last_pfn = last_addr >> PAGE_SHIFT;
130c81c8a1eSRoland Dreier 		if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
131c81c8a1eSRoland Dreier 					  __ioremap_check_ram) == 1)
132e64c8aa0SThomas Gleixner 			return NULL;
133906e36c5SMike Travis 	}
134d7677d40Svenkatesh.pallipadi@intel.com 	/*
135d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
136d7677d40Svenkatesh.pallipadi@intel.com 	 */
137d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
138ffa71f33SKenji Kaneshige 	phys_addr &= PHYSICAL_PAGE_MASK;
139d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
140d7677d40Svenkatesh.pallipadi@intel.com 
141e213e877SAndi Kleen 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
142e00c8cc9SJuergen Gross 						pcm, &new_pcm);
143dee7cbb2SVenki Pallipadi 	if (retval) {
144279e669bSVenkatesh Pallipadi 		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
145dee7cbb2SVenki Pallipadi 		return NULL;
146dee7cbb2SVenki Pallipadi 	}
147dee7cbb2SVenki Pallipadi 
148b14097bdSJuergen Gross 	if (pcm != new_pcm) {
149b14097bdSJuergen Gross 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
150279e669bSVenkatesh Pallipadi 			printk(KERN_ERR
151b14097bdSJuergen Gross 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
1524c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
1534c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
154b14097bdSJuergen Gross 				pcm, new_pcm);
155de2a47cfSXiaotian Feng 			goto err_free_memtype;
156d7677d40Svenkatesh.pallipadi@intel.com 		}
157b14097bdSJuergen Gross 		pcm = new_pcm;
158d7677d40Svenkatesh.pallipadi@intel.com 	}
159d7677d40Svenkatesh.pallipadi@intel.com 
160be43d728SJeremy Fitzhardinge 	prot = PAGE_KERNEL_IO;
161b14097bdSJuergen Gross 	switch (pcm) {
162b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
163b14097bdSJuergen Gross 	default:
164b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
165b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC));
166b14097bdSJuergen Gross 		break;
167b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC_MINUS:
168b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
169b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
170b14097bdSJuergen Gross 		break;
171b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
172b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
173b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_WC));
174b14097bdSJuergen Gross 		break;
175b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
176d806e5eeSThomas Gleixner 		break;
177d806e5eeSThomas Gleixner 	}
178e64c8aa0SThomas Gleixner 
179e64c8aa0SThomas Gleixner 	/*
180e64c8aa0SThomas Gleixner 	 * Ok, go for it..
181e64c8aa0SThomas Gleixner 	 */
18223016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
183e64c8aa0SThomas Gleixner 	if (!area)
184de2a47cfSXiaotian Feng 		goto err_free_memtype;
185e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
186e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
18743a432b1SSuresh Siddha 
188b14097bdSJuergen Gross 	if (kernel_map_sync_memtype(phys_addr, size, pcm))
189de2a47cfSXiaotian Feng 		goto err_free_area;
190e64c8aa0SThomas Gleixner 
191de2a47cfSXiaotian Feng 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
192de2a47cfSXiaotian Feng 		goto err_free_area;
193e64c8aa0SThomas Gleixner 
194d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
19587e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
196d61fc448SPekka Paalanen 
197c7a7b814STim Gardner 	/*
198c7a7b814STim Gardner 	 * Check if the request spans more than any BAR in the iomem resource
199c7a7b814STim Gardner 	 * tree.
200c7a7b814STim Gardner 	 */
201c7a7b814STim Gardner 	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
202c7a7b814STim Gardner 		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
203c7a7b814STim Gardner 
204d61fc448SPekka Paalanen 	return ret_addr;
205de2a47cfSXiaotian Feng err_free_area:
206de2a47cfSXiaotian Feng 	free_vm_area(area);
207de2a47cfSXiaotian Feng err_free_memtype:
208de2a47cfSXiaotian Feng 	free_memtype(phys_addr, phys_addr + size);
209de2a47cfSXiaotian Feng 	return NULL;
210e64c8aa0SThomas Gleixner }
211e64c8aa0SThomas Gleixner 
212e64c8aa0SThomas Gleixner /**
213e64c8aa0SThomas Gleixner  * ioremap_nocache     -   map bus memory into CPU space
2149efc31b8SWanpeng Li  * @phys_addr:    bus address of the memory
215e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
216e64c8aa0SThomas Gleixner  *
217e64c8aa0SThomas Gleixner  * ioremap_nocache performs a platform specific sequence of operations to
218e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
219e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
220e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
221e64c8aa0SThomas Gleixner  * address.
222e64c8aa0SThomas Gleixner  *
223e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
224e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
225e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
226e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
227e64c8aa0SThomas Gleixner  *
228e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
229e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
230e64c8aa0SThomas Gleixner  *
231e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
232e64c8aa0SThomas Gleixner  */
233b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
234e64c8aa0SThomas Gleixner {
235de33c442SSuresh Siddha 	/*
236de33c442SSuresh Siddha 	 * Ideally, this should be:
237b14097bdSJuergen Gross 	 *	pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
238de33c442SSuresh Siddha 	 *
239de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
240*e4b6be33SLuis R. Rodriguez 	 * UC MINUS. Drivers that are certain they need or can already
241*e4b6be33SLuis R. Rodriguez 	 * be converted over to strong UC can use ioremap_uc().
242de33c442SSuresh Siddha 	 */
243b14097bdSJuergen Gross 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
244de33c442SSuresh Siddha 
245b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, pcm,
24623016969SChristoph Lameter 				__builtin_return_address(0));
247e64c8aa0SThomas Gleixner }
248e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache);
249e64c8aa0SThomas Gleixner 
250b310f381Svenkatesh.pallipadi@intel.com /**
251*e4b6be33SLuis R. Rodriguez  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
252*e4b6be33SLuis R. Rodriguez  * @phys_addr:    bus address of the memory
253*e4b6be33SLuis R. Rodriguez  * @size:      size of the resource to map
254*e4b6be33SLuis R. Rodriguez  *
255*e4b6be33SLuis R. Rodriguez  * ioremap_uc performs a platform specific sequence of operations to
256*e4b6be33SLuis R. Rodriguez  * make bus memory CPU accessible via the readb/readw/readl/writeb/
257*e4b6be33SLuis R. Rodriguez  * writew/writel functions and the other mmio helpers. The returned
258*e4b6be33SLuis R. Rodriguez  * address is not guaranteed to be usable directly as a virtual
259*e4b6be33SLuis R. Rodriguez  * address.
260*e4b6be33SLuis R. Rodriguez  *
261*e4b6be33SLuis R. Rodriguez  * This version of ioremap ensures that the memory is marked with a strong
262*e4b6be33SLuis R. Rodriguez  * preference as completely uncachable on the CPU when possible. For non-PAT
263*e4b6be33SLuis R. Rodriguez  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
264*e4b6be33SLuis R. Rodriguez  * systems this will set the PAT entry for the pages as strong UC.  This call
265*e4b6be33SLuis R. Rodriguez  * will honor existing caching rules from things like the PCI bus. Note that
266*e4b6be33SLuis R. Rodriguez  * there are other caches and buffers on many busses. In particular driver
267*e4b6be33SLuis R. Rodriguez  * authors should read up on PCI writes.
268*e4b6be33SLuis R. Rodriguez  *
269*e4b6be33SLuis R. Rodriguez  * It's useful if some control registers are in such an area and
270*e4b6be33SLuis R. Rodriguez  * write combining or read caching is not desirable:
271*e4b6be33SLuis R. Rodriguez  *
272*e4b6be33SLuis R. Rodriguez  * Must be freed with iounmap.
273*e4b6be33SLuis R. Rodriguez  */
274*e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
275*e4b6be33SLuis R. Rodriguez {
276*e4b6be33SLuis R. Rodriguez 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
277*e4b6be33SLuis R. Rodriguez 
278*e4b6be33SLuis R. Rodriguez 	return __ioremap_caller(phys_addr, size, pcm,
279*e4b6be33SLuis R. Rodriguez 				__builtin_return_address(0));
280*e4b6be33SLuis R. Rodriguez }
281*e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
282*e4b6be33SLuis R. Rodriguez 
283*e4b6be33SLuis R. Rodriguez /**
284b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
2859efc31b8SWanpeng Li  * @phys_addr:	bus address of the memory
286b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
287b310f381Svenkatesh.pallipadi@intel.com  *
288b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
289b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
290b310f381Svenkatesh.pallipadi@intel.com  *
291b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
292b310f381Svenkatesh.pallipadi@intel.com  */
293d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
294b310f381Svenkatesh.pallipadi@intel.com {
295499f8f84SAndreas Herrmann 	if (pat_enabled)
296b14097bdSJuergen Gross 		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
29723016969SChristoph Lameter 					__builtin_return_address(0));
298b310f381Svenkatesh.pallipadi@intel.com 	else
299b310f381Svenkatesh.pallipadi@intel.com 		return ioremap_nocache(phys_addr, size);
300b310f381Svenkatesh.pallipadi@intel.com }
301b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
302b310f381Svenkatesh.pallipadi@intel.com 
303b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
3045f868152SThomas Gleixner {
305b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
30623016969SChristoph Lameter 				__builtin_return_address(0));
3075f868152SThomas Gleixner }
3085f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
3095f868152SThomas Gleixner 
31028b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
31128b2ee20SRik van Riel 				unsigned long prot_val)
31228b2ee20SRik van Riel {
313b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size,
314b14097bdSJuergen Gross 				pgprot2cachemode(__pgprot(prot_val)),
31528b2ee20SRik van Riel 				__builtin_return_address(0));
31628b2ee20SRik van Riel }
31728b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
31828b2ee20SRik van Riel 
319e64c8aa0SThomas Gleixner /**
320e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
321e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
322e64c8aa0SThomas Gleixner  *
323e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
324e64c8aa0SThomas Gleixner  */
325e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
326e64c8aa0SThomas Gleixner {
327e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
328e64c8aa0SThomas Gleixner 
329e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
330e64c8aa0SThomas Gleixner 		return;
331e64c8aa0SThomas Gleixner 
332e64c8aa0SThomas Gleixner 	/*
333e64c8aa0SThomas Gleixner 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
334e64c8aa0SThomas Gleixner 	 * vm_area and by simply returning an address into the kernel mapping
335e64c8aa0SThomas Gleixner 	 * of ISA space.   So handle that here.
336e64c8aa0SThomas Gleixner 	 */
3376e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
3386e92a5a6SThomas Gleixner 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
339e64c8aa0SThomas Gleixner 		return;
340e64c8aa0SThomas Gleixner 
341e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
342e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
343e64c8aa0SThomas Gleixner 
344d61fc448SPekka Paalanen 	mmiotrace_iounmap(addr);
345d61fc448SPekka Paalanen 
346e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
347e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
348e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
349e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
350e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
351ef932473SJoonsoo Kim 	p = find_vm_area((void __force *)addr);
352e64c8aa0SThomas Gleixner 
353e64c8aa0SThomas Gleixner 	if (!p) {
354e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
355e64c8aa0SThomas Gleixner 		dump_stack();
356e64c8aa0SThomas Gleixner 		return;
357e64c8aa0SThomas Gleixner 	}
358e64c8aa0SThomas Gleixner 
359d7677d40Svenkatesh.pallipadi@intel.com 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
360d7677d40Svenkatesh.pallipadi@intel.com 
361e64c8aa0SThomas Gleixner 	/* Finally remove it */
3626e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
363e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
364e64c8aa0SThomas Gleixner 	kfree(p);
365e64c8aa0SThomas Gleixner }
366e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
367e64c8aa0SThomas Gleixner 
3685d72b4fbSToshi Kani int arch_ioremap_pud_supported(void)
3695d72b4fbSToshi Kani {
3705d72b4fbSToshi Kani #ifdef CONFIG_X86_64
3715d72b4fbSToshi Kani 	return cpu_has_gbpages;
3725d72b4fbSToshi Kani #else
3735d72b4fbSToshi Kani 	return 0;
3745d72b4fbSToshi Kani #endif
3755d72b4fbSToshi Kani }
3765d72b4fbSToshi Kani 
3775d72b4fbSToshi Kani int arch_ioremap_pmd_supported(void)
3785d72b4fbSToshi Kani {
3795d72b4fbSToshi Kani 	return cpu_has_pse;
3805d72b4fbSToshi Kani }
3815d72b4fbSToshi Kani 
382e045fb2aSvenkatesh.pallipadi@intel.com /*
383e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
384e045fb2aSvenkatesh.pallipadi@intel.com  * access
385e045fb2aSvenkatesh.pallipadi@intel.com  */
3864707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
387e045fb2aSvenkatesh.pallipadi@intel.com {
388e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start  = phys &  PAGE_MASK;
38994d4b476SIngo Molnar 	unsigned long offset = phys & ~PAGE_MASK;
39094d4b476SIngo Molnar 	unsigned long vaddr;
391e045fb2aSvenkatesh.pallipadi@intel.com 
392e045fb2aSvenkatesh.pallipadi@intel.com 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
393e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(start >> PAGE_SHIFT))
394e045fb2aSvenkatesh.pallipadi@intel.com 		return __va(phys);
395e045fb2aSvenkatesh.pallipadi@intel.com 
39694d4b476SIngo Molnar 	vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
39794d4b476SIngo Molnar 	/* Only add the offset on success and return NULL if the ioremap() failed: */
39894d4b476SIngo Molnar 	if (vaddr)
39994d4b476SIngo Molnar 		vaddr += offset;
400e045fb2aSvenkatesh.pallipadi@intel.com 
40194d4b476SIngo Molnar 	return (void *)vaddr;
402e045fb2aSvenkatesh.pallipadi@intel.com }
403e045fb2aSvenkatesh.pallipadi@intel.com 
4044707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
405e045fb2aSvenkatesh.pallipadi@intel.com {
406e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(phys >> PAGE_SHIFT))
407e045fb2aSvenkatesh.pallipadi@intel.com 		return;
408e045fb2aSvenkatesh.pallipadi@intel.com 
409e045fb2aSvenkatesh.pallipadi@intel.com 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
410e045fb2aSvenkatesh.pallipadi@intel.com 	return;
411e045fb2aSvenkatesh.pallipadi@intel.com }
412e045fb2aSvenkatesh.pallipadi@intel.com 
41345c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
414e64c8aa0SThomas Gleixner 
415551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
416e64c8aa0SThomas Gleixner {
41737cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
41837cc8d7fSJeremy Fitzhardinge 	pgd_t *base = __va(read_cr3());
41937cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
420551889a6SIan Campbell 	pud_t *pud = pud_offset(pgd, addr);
421551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
422551889a6SIan Campbell 
423551889a6SIan Campbell 	return pmd;
424e64c8aa0SThomas Gleixner }
425e64c8aa0SThomas Gleixner 
426551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
427e64c8aa0SThomas Gleixner {
428551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
429e64c8aa0SThomas Gleixner }
430e64c8aa0SThomas Gleixner 
431fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
432fef5ba79SJeremy Fitzhardinge {
433fef5ba79SJeremy Fitzhardinge 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
434fef5ba79SJeremy Fitzhardinge }
435fef5ba79SJeremy Fitzhardinge 
436e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
437e64c8aa0SThomas Gleixner {
438551889a6SIan Campbell 	pmd_t *pmd;
439e64c8aa0SThomas Gleixner 
44073159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
44173159fdcSAndy Lutomirski 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
44273159fdcSAndy Lutomirski #else
44373159fdcSAndy Lutomirski 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
44473159fdcSAndy Lutomirski #endif
44573159fdcSAndy Lutomirski 
4465b7c73e0SMark Salter 	early_ioremap_setup();
4478827247fSWang Chen 
448551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
449e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
450b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
451551889a6SIan Campbell 
452e64c8aa0SThomas Gleixner 	/*
453551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
454e64c8aa0SThomas Gleixner 	 * we are not prepared:
455e64c8aa0SThomas Gleixner 	 */
456499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
457499a5f1eSJan Beulich 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
458499a5f1eSJan Beulich 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
459499a5f1eSJan Beulich #undef __FIXADDR_TOP
460551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
461e64c8aa0SThomas Gleixner 		WARN_ON(1);
462551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
463551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
464e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
465e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
466e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
467e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
468e64c8aa0SThomas Gleixner 
469e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
470e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
471e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
472e64c8aa0SThomas Gleixner 	}
473e64c8aa0SThomas Gleixner }
474e64c8aa0SThomas Gleixner 
4755b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
4769b987aebSMasami Hiramatsu 			       phys_addr_t phys, pgprot_t flags)
477e64c8aa0SThomas Gleixner {
478551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
479551889a6SIan Campbell 	pte_t *pte;
480e64c8aa0SThomas Gleixner 
481e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
482e64c8aa0SThomas Gleixner 		BUG();
483e64c8aa0SThomas Gleixner 		return;
484e64c8aa0SThomas Gleixner 	}
485e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
4864583ed51SJeremy Fitzhardinge 
487e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
488551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
489e64c8aa0SThomas Gleixner 	else
4904f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
491e64c8aa0SThomas Gleixner 	__flush_tlb_one(addr);
492e64c8aa0SThomas Gleixner }
493