xref: /linux/arch/x86/mm/ioremap.c (revision 623dffb2a2e059e1ace45b59b3ff21c66c419614)
1e64c8aa0SThomas Gleixner /*
2e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
3e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
4e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
5e64c8aa0SThomas Gleixner  *
6e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
7e64c8aa0SThomas Gleixner  */
8e64c8aa0SThomas Gleixner 
9e64c8aa0SThomas Gleixner #include <linux/bootmem.h>
10e64c8aa0SThomas Gleixner #include <linux/init.h>
11e64c8aa0SThomas Gleixner #include <linux/io.h>
12e64c8aa0SThomas Gleixner #include <linux/module.h>
13e64c8aa0SThomas Gleixner #include <linux/slab.h>
14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
15d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
16e64c8aa0SThomas Gleixner 
17e64c8aa0SThomas Gleixner #include <asm/cacheflush.h>
18e64c8aa0SThomas Gleixner #include <asm/e820.h>
19e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
20e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
24e64c8aa0SThomas Gleixner 
2578c86e5eSJeremy Fitzhardinge #include "physaddr.h"
26e64c8aa0SThomas Gleixner 
27e64c8aa0SThomas Gleixner /*
28e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29e64c8aa0SThomas Gleixner  * conflicts.
30e64c8aa0SThomas Gleixner  */
313a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32b14097bdSJuergen Gross 			enum page_cache_mode pcm)
33e64c8aa0SThomas Gleixner {
34d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
3593809be8SHarvey Harrison 	int err;
36e64c8aa0SThomas Gleixner 
37b14097bdSJuergen Gross 	switch (pcm) {
38b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
39d806e5eeSThomas Gleixner 	default:
401219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
41d806e5eeSThomas Gleixner 		break;
42b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
43b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
44b310f381Svenkatesh.pallipadi@intel.com 		break;
45*623dffb2SToshi Kani 	case _PAGE_CACHE_MODE_WT:
46*623dffb2SToshi Kani 		err = _set_memory_wt(vaddr, nrpages);
47*623dffb2SToshi Kani 		break;
48b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
491219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
50d806e5eeSThomas Gleixner 		break;
51d806e5eeSThomas Gleixner 	}
52e64c8aa0SThomas Gleixner 
53e64c8aa0SThomas Gleixner 	return err;
54e64c8aa0SThomas Gleixner }
55e64c8aa0SThomas Gleixner 
56c81c8a1eSRoland Dreier static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
57c81c8a1eSRoland Dreier 			       void *arg)
58c81c8a1eSRoland Dreier {
59c81c8a1eSRoland Dreier 	unsigned long i;
60c81c8a1eSRoland Dreier 
61c81c8a1eSRoland Dreier 	for (i = 0; i < nr_pages; ++i)
62c81c8a1eSRoland Dreier 		if (pfn_valid(start_pfn + i) &&
63c81c8a1eSRoland Dreier 		    !PageReserved(pfn_to_page(start_pfn + i)))
64c81c8a1eSRoland Dreier 			return 1;
65c81c8a1eSRoland Dreier 
66c81c8a1eSRoland Dreier 	WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
67c81c8a1eSRoland Dreier 
68c81c8a1eSRoland Dreier 	return 0;
69c81c8a1eSRoland Dreier }
70c81c8a1eSRoland Dreier 
71e64c8aa0SThomas Gleixner /*
72e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
735d72b4fbSToshi Kani  * address space. It transparently creates kernel huge I/O mapping when
745d72b4fbSToshi Kani  * the physical address is aligned by a huge page size (1GB or 2MB) and
755d72b4fbSToshi Kani  * the requested size is at least the huge page size.
765d72b4fbSToshi Kani  *
775d72b4fbSToshi Kani  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
785d72b4fbSToshi Kani  * Therefore, the mapping code falls back to use a smaller page toward 4KB
795d72b4fbSToshi Kani  * when a mapping range is covered by non-WB type of MTRRs.
80e64c8aa0SThomas Gleixner  *
81e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
82e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
83e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
84e64c8aa0SThomas Gleixner  */
8523016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr,
86b14097bdSJuergen Gross 		unsigned long size, enum page_cache_mode pcm, void *caller)
87e64c8aa0SThomas Gleixner {
88ffa71f33SKenji Kaneshige 	unsigned long offset, vaddr;
89ffa71f33SKenji Kaneshige 	resource_size_t pfn, last_pfn, last_addr;
9087e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
9187e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
92e64c8aa0SThomas Gleixner 	struct vm_struct *area;
93b14097bdSJuergen Gross 	enum page_cache_mode new_pcm;
94d806e5eeSThomas Gleixner 	pgprot_t prot;
95dee7cbb2SVenki Pallipadi 	int retval;
96d61fc448SPekka Paalanen 	void __iomem *ret_addr;
97906e36c5SMike Travis 	int ram_region;
98e64c8aa0SThomas Gleixner 
99e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
100e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
101e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
102e64c8aa0SThomas Gleixner 		return NULL;
103e64c8aa0SThomas Gleixner 
104e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
1056997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
1064c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
107e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
108e3100c82SThomas Gleixner 		return NULL;
109e3100c82SThomas Gleixner 	}
110e3100c82SThomas Gleixner 
111e64c8aa0SThomas Gleixner 	/*
112e64c8aa0SThomas Gleixner 	 * Don't remap the low PCI/ISA area, it's always mapped..
113e64c8aa0SThomas Gleixner 	 */
114bcc643dcSAndreas Herrmann 	if (is_ISA_range(phys_addr, last_addr))
115e64c8aa0SThomas Gleixner 		return (__force void __iomem *)phys_to_virt(phys_addr);
116e64c8aa0SThomas Gleixner 
117e64c8aa0SThomas Gleixner 	/*
118e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
119e64c8aa0SThomas Gleixner 	 */
120906e36c5SMike Travis 	/* First check if whole region can be identified as RAM or not */
121906e36c5SMike Travis 	ram_region = region_is_ram(phys_addr, size);
122906e36c5SMike Travis 	if (ram_region > 0) {
123906e36c5SMike Travis 		WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
124906e36c5SMike Travis 				(unsigned long int)phys_addr,
125906e36c5SMike Travis 				(unsigned long int)last_addr);
126906e36c5SMike Travis 		return NULL;
127906e36c5SMike Travis 	}
128906e36c5SMike Travis 
129906e36c5SMike Travis 	/* If could not be identified(-1), check page by page */
130906e36c5SMike Travis 	if (ram_region < 0) {
131c81c8a1eSRoland Dreier 		pfn      = phys_addr >> PAGE_SHIFT;
132ffa71f33SKenji Kaneshige 		last_pfn = last_addr >> PAGE_SHIFT;
133c81c8a1eSRoland Dreier 		if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
134c81c8a1eSRoland Dreier 					  __ioremap_check_ram) == 1)
135e64c8aa0SThomas Gleixner 			return NULL;
136906e36c5SMike Travis 	}
137d7677d40Svenkatesh.pallipadi@intel.com 	/*
138d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
139d7677d40Svenkatesh.pallipadi@intel.com 	 */
140d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
141ffa71f33SKenji Kaneshige 	phys_addr &= PHYSICAL_PAGE_MASK;
142d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
143d7677d40Svenkatesh.pallipadi@intel.com 
144e213e877SAndi Kleen 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
145e00c8cc9SJuergen Gross 						pcm, &new_pcm);
146dee7cbb2SVenki Pallipadi 	if (retval) {
147279e669bSVenkatesh Pallipadi 		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
148dee7cbb2SVenki Pallipadi 		return NULL;
149dee7cbb2SVenki Pallipadi 	}
150dee7cbb2SVenki Pallipadi 
151b14097bdSJuergen Gross 	if (pcm != new_pcm) {
152b14097bdSJuergen Gross 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
153279e669bSVenkatesh Pallipadi 			printk(KERN_ERR
154b14097bdSJuergen Gross 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
1554c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
1564c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
157b14097bdSJuergen Gross 				pcm, new_pcm);
158de2a47cfSXiaotian Feng 			goto err_free_memtype;
159d7677d40Svenkatesh.pallipadi@intel.com 		}
160b14097bdSJuergen Gross 		pcm = new_pcm;
161d7677d40Svenkatesh.pallipadi@intel.com 	}
162d7677d40Svenkatesh.pallipadi@intel.com 
163be43d728SJeremy Fitzhardinge 	prot = PAGE_KERNEL_IO;
164b14097bdSJuergen Gross 	switch (pcm) {
165b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
166b14097bdSJuergen Gross 	default:
167b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
168b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC));
169b14097bdSJuergen Gross 		break;
170b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC_MINUS:
171b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
172b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
173b14097bdSJuergen Gross 		break;
174b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
175b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
176b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_WC));
177b14097bdSJuergen Gross 		break;
178d838270eSToshi Kani 	case _PAGE_CACHE_MODE_WT:
179d838270eSToshi Kani 		prot = __pgprot(pgprot_val(prot) |
180d838270eSToshi Kani 				cachemode2protval(_PAGE_CACHE_MODE_WT));
181d838270eSToshi Kani 		break;
182b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
183d806e5eeSThomas Gleixner 		break;
184d806e5eeSThomas Gleixner 	}
185e64c8aa0SThomas Gleixner 
186e64c8aa0SThomas Gleixner 	/*
187e64c8aa0SThomas Gleixner 	 * Ok, go for it..
188e64c8aa0SThomas Gleixner 	 */
18923016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
190e64c8aa0SThomas Gleixner 	if (!area)
191de2a47cfSXiaotian Feng 		goto err_free_memtype;
192e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
193e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
19443a432b1SSuresh Siddha 
195b14097bdSJuergen Gross 	if (kernel_map_sync_memtype(phys_addr, size, pcm))
196de2a47cfSXiaotian Feng 		goto err_free_area;
197e64c8aa0SThomas Gleixner 
198de2a47cfSXiaotian Feng 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
199de2a47cfSXiaotian Feng 		goto err_free_area;
200e64c8aa0SThomas Gleixner 
201d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
20287e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
203d61fc448SPekka Paalanen 
204c7a7b814STim Gardner 	/*
205c7a7b814STim Gardner 	 * Check if the request spans more than any BAR in the iomem resource
206c7a7b814STim Gardner 	 * tree.
207c7a7b814STim Gardner 	 */
208c7a7b814STim Gardner 	WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
209c7a7b814STim Gardner 		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
210c7a7b814STim Gardner 
211d61fc448SPekka Paalanen 	return ret_addr;
212de2a47cfSXiaotian Feng err_free_area:
213de2a47cfSXiaotian Feng 	free_vm_area(area);
214de2a47cfSXiaotian Feng err_free_memtype:
215de2a47cfSXiaotian Feng 	free_memtype(phys_addr, phys_addr + size);
216de2a47cfSXiaotian Feng 	return NULL;
217e64c8aa0SThomas Gleixner }
218e64c8aa0SThomas Gleixner 
219e64c8aa0SThomas Gleixner /**
220e64c8aa0SThomas Gleixner  * ioremap_nocache     -   map bus memory into CPU space
2219efc31b8SWanpeng Li  * @phys_addr:    bus address of the memory
222e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
223e64c8aa0SThomas Gleixner  *
224e64c8aa0SThomas Gleixner  * ioremap_nocache performs a platform specific sequence of operations to
225e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
226e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
227e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
228e64c8aa0SThomas Gleixner  * address.
229e64c8aa0SThomas Gleixner  *
230e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
231e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
232e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
233e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
234e64c8aa0SThomas Gleixner  *
235e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
236e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
237e64c8aa0SThomas Gleixner  *
238e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
239e64c8aa0SThomas Gleixner  */
240b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
241e64c8aa0SThomas Gleixner {
242de33c442SSuresh Siddha 	/*
243de33c442SSuresh Siddha 	 * Ideally, this should be:
244cb32edf6SLuis R. Rodriguez 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
245de33c442SSuresh Siddha 	 *
246de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
247e4b6be33SLuis R. Rodriguez 	 * UC MINUS. Drivers that are certain they need or can already
248e4b6be33SLuis R. Rodriguez 	 * be converted over to strong UC can use ioremap_uc().
249de33c442SSuresh Siddha 	 */
250b14097bdSJuergen Gross 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
251de33c442SSuresh Siddha 
252b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, pcm,
25323016969SChristoph Lameter 				__builtin_return_address(0));
254e64c8aa0SThomas Gleixner }
255e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache);
256e64c8aa0SThomas Gleixner 
257b310f381Svenkatesh.pallipadi@intel.com /**
258e4b6be33SLuis R. Rodriguez  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
259e4b6be33SLuis R. Rodriguez  * @phys_addr:    bus address of the memory
260e4b6be33SLuis R. Rodriguez  * @size:      size of the resource to map
261e4b6be33SLuis R. Rodriguez  *
262e4b6be33SLuis R. Rodriguez  * ioremap_uc performs a platform specific sequence of operations to
263e4b6be33SLuis R. Rodriguez  * make bus memory CPU accessible via the readb/readw/readl/writeb/
264e4b6be33SLuis R. Rodriguez  * writew/writel functions and the other mmio helpers. The returned
265e4b6be33SLuis R. Rodriguez  * address is not guaranteed to be usable directly as a virtual
266e4b6be33SLuis R. Rodriguez  * address.
267e4b6be33SLuis R. Rodriguez  *
268e4b6be33SLuis R. Rodriguez  * This version of ioremap ensures that the memory is marked with a strong
269e4b6be33SLuis R. Rodriguez  * preference as completely uncachable on the CPU when possible. For non-PAT
270e4b6be33SLuis R. Rodriguez  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
271e4b6be33SLuis R. Rodriguez  * systems this will set the PAT entry for the pages as strong UC.  This call
272e4b6be33SLuis R. Rodriguez  * will honor existing caching rules from things like the PCI bus. Note that
273e4b6be33SLuis R. Rodriguez  * there are other caches and buffers on many busses. In particular driver
274e4b6be33SLuis R. Rodriguez  * authors should read up on PCI writes.
275e4b6be33SLuis R. Rodriguez  *
276e4b6be33SLuis R. Rodriguez  * It's useful if some control registers are in such an area and
277e4b6be33SLuis R. Rodriguez  * write combining or read caching is not desirable:
278e4b6be33SLuis R. Rodriguez  *
279e4b6be33SLuis R. Rodriguez  * Must be freed with iounmap.
280e4b6be33SLuis R. Rodriguez  */
281e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
282e4b6be33SLuis R. Rodriguez {
283e4b6be33SLuis R. Rodriguez 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
284e4b6be33SLuis R. Rodriguez 
285e4b6be33SLuis R. Rodriguez 	return __ioremap_caller(phys_addr, size, pcm,
286e4b6be33SLuis R. Rodriguez 				__builtin_return_address(0));
287e4b6be33SLuis R. Rodriguez }
288e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
289e4b6be33SLuis R. Rodriguez 
290e4b6be33SLuis R. Rodriguez /**
291b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
2929efc31b8SWanpeng Li  * @phys_addr:	bus address of the memory
293b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
294b310f381Svenkatesh.pallipadi@intel.com  *
295b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
296b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
297b310f381Svenkatesh.pallipadi@intel.com  *
298b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
299b310f381Svenkatesh.pallipadi@intel.com  */
300d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
301b310f381Svenkatesh.pallipadi@intel.com {
302b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
30323016969SChristoph Lameter 					__builtin_return_address(0));
304b310f381Svenkatesh.pallipadi@intel.com }
305b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
306b310f381Svenkatesh.pallipadi@intel.com 
307d838270eSToshi Kani /**
308d838270eSToshi Kani  * ioremap_wt	-	map memory into CPU space write through
309d838270eSToshi Kani  * @phys_addr:	bus address of the memory
310d838270eSToshi Kani  * @size:	size of the resource to map
311d838270eSToshi Kani  *
312d838270eSToshi Kani  * This version of ioremap ensures that the memory is marked write through.
313d838270eSToshi Kani  * Write through stores data into memory while keeping the cache up-to-date.
314d838270eSToshi Kani  *
315d838270eSToshi Kani  * Must be freed with iounmap.
316d838270eSToshi Kani  */
317d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
318d838270eSToshi Kani {
319d838270eSToshi Kani 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
320d838270eSToshi Kani 					__builtin_return_address(0));
321d838270eSToshi Kani }
322d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt);
323d838270eSToshi Kani 
324b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
3255f868152SThomas Gleixner {
326b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
32723016969SChristoph Lameter 				__builtin_return_address(0));
3285f868152SThomas Gleixner }
3295f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
3305f868152SThomas Gleixner 
33128b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
33228b2ee20SRik van Riel 				unsigned long prot_val)
33328b2ee20SRik van Riel {
334b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size,
335b14097bdSJuergen Gross 				pgprot2cachemode(__pgprot(prot_val)),
33628b2ee20SRik van Riel 				__builtin_return_address(0));
33728b2ee20SRik van Riel }
33828b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
33928b2ee20SRik van Riel 
340e64c8aa0SThomas Gleixner /**
341e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
342e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
343e64c8aa0SThomas Gleixner  *
344e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
345e64c8aa0SThomas Gleixner  */
346e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
347e64c8aa0SThomas Gleixner {
348e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
349e64c8aa0SThomas Gleixner 
350e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
351e64c8aa0SThomas Gleixner 		return;
352e64c8aa0SThomas Gleixner 
353e64c8aa0SThomas Gleixner 	/*
354e64c8aa0SThomas Gleixner 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
355e64c8aa0SThomas Gleixner 	 * vm_area and by simply returning an address into the kernel mapping
356e64c8aa0SThomas Gleixner 	 * of ISA space.   So handle that here.
357e64c8aa0SThomas Gleixner 	 */
3586e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
3596e92a5a6SThomas Gleixner 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
360e64c8aa0SThomas Gleixner 		return;
361e64c8aa0SThomas Gleixner 
362e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
363e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
364e64c8aa0SThomas Gleixner 
365d61fc448SPekka Paalanen 	mmiotrace_iounmap(addr);
366d61fc448SPekka Paalanen 
367e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
368e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
369e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
370e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
371e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
372ef932473SJoonsoo Kim 	p = find_vm_area((void __force *)addr);
373e64c8aa0SThomas Gleixner 
374e64c8aa0SThomas Gleixner 	if (!p) {
375e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
376e64c8aa0SThomas Gleixner 		dump_stack();
377e64c8aa0SThomas Gleixner 		return;
378e64c8aa0SThomas Gleixner 	}
379e64c8aa0SThomas Gleixner 
380d7677d40Svenkatesh.pallipadi@intel.com 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
381d7677d40Svenkatesh.pallipadi@intel.com 
382e64c8aa0SThomas Gleixner 	/* Finally remove it */
3836e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
384e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
385e64c8aa0SThomas Gleixner 	kfree(p);
386e64c8aa0SThomas Gleixner }
387e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
388e64c8aa0SThomas Gleixner 
3891e6277deSJan Beulich int __init arch_ioremap_pud_supported(void)
3905d72b4fbSToshi Kani {
3915d72b4fbSToshi Kani #ifdef CONFIG_X86_64
3925d72b4fbSToshi Kani 	return cpu_has_gbpages;
3935d72b4fbSToshi Kani #else
3945d72b4fbSToshi Kani 	return 0;
3955d72b4fbSToshi Kani #endif
3965d72b4fbSToshi Kani }
3975d72b4fbSToshi Kani 
3981e6277deSJan Beulich int __init arch_ioremap_pmd_supported(void)
3995d72b4fbSToshi Kani {
4005d72b4fbSToshi Kani 	return cpu_has_pse;
4015d72b4fbSToshi Kani }
4025d72b4fbSToshi Kani 
403e045fb2aSvenkatesh.pallipadi@intel.com /*
404e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
405e045fb2aSvenkatesh.pallipadi@intel.com  * access
406e045fb2aSvenkatesh.pallipadi@intel.com  */
4074707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
408e045fb2aSvenkatesh.pallipadi@intel.com {
409e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start  = phys &  PAGE_MASK;
41094d4b476SIngo Molnar 	unsigned long offset = phys & ~PAGE_MASK;
41194d4b476SIngo Molnar 	unsigned long vaddr;
412e045fb2aSvenkatesh.pallipadi@intel.com 
413e045fb2aSvenkatesh.pallipadi@intel.com 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
414e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(start >> PAGE_SHIFT))
415e045fb2aSvenkatesh.pallipadi@intel.com 		return __va(phys);
416e045fb2aSvenkatesh.pallipadi@intel.com 
41794d4b476SIngo Molnar 	vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
41894d4b476SIngo Molnar 	/* Only add the offset on success and return NULL if the ioremap() failed: */
41994d4b476SIngo Molnar 	if (vaddr)
42094d4b476SIngo Molnar 		vaddr += offset;
421e045fb2aSvenkatesh.pallipadi@intel.com 
42294d4b476SIngo Molnar 	return (void *)vaddr;
423e045fb2aSvenkatesh.pallipadi@intel.com }
424e045fb2aSvenkatesh.pallipadi@intel.com 
4254707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
426e045fb2aSvenkatesh.pallipadi@intel.com {
427e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(phys >> PAGE_SHIFT))
428e045fb2aSvenkatesh.pallipadi@intel.com 		return;
429e045fb2aSvenkatesh.pallipadi@intel.com 
430e045fb2aSvenkatesh.pallipadi@intel.com 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
431e045fb2aSvenkatesh.pallipadi@intel.com 	return;
432e045fb2aSvenkatesh.pallipadi@intel.com }
433e045fb2aSvenkatesh.pallipadi@intel.com 
43445c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
435e64c8aa0SThomas Gleixner 
436551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
437e64c8aa0SThomas Gleixner {
43837cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
43937cc8d7fSJeremy Fitzhardinge 	pgd_t *base = __va(read_cr3());
44037cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
441551889a6SIan Campbell 	pud_t *pud = pud_offset(pgd, addr);
442551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
443551889a6SIan Campbell 
444551889a6SIan Campbell 	return pmd;
445e64c8aa0SThomas Gleixner }
446e64c8aa0SThomas Gleixner 
447551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
448e64c8aa0SThomas Gleixner {
449551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
450e64c8aa0SThomas Gleixner }
451e64c8aa0SThomas Gleixner 
452fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
453fef5ba79SJeremy Fitzhardinge {
454fef5ba79SJeremy Fitzhardinge 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
455fef5ba79SJeremy Fitzhardinge }
456fef5ba79SJeremy Fitzhardinge 
457e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
458e64c8aa0SThomas Gleixner {
459551889a6SIan Campbell 	pmd_t *pmd;
460e64c8aa0SThomas Gleixner 
46173159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
46273159fdcSAndy Lutomirski 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
46373159fdcSAndy Lutomirski #else
46473159fdcSAndy Lutomirski 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
46573159fdcSAndy Lutomirski #endif
46673159fdcSAndy Lutomirski 
4675b7c73e0SMark Salter 	early_ioremap_setup();
4688827247fSWang Chen 
469551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
470e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
471b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
472551889a6SIan Campbell 
473e64c8aa0SThomas Gleixner 	/*
474551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
475e64c8aa0SThomas Gleixner 	 * we are not prepared:
476e64c8aa0SThomas Gleixner 	 */
477499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
478499a5f1eSJan Beulich 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
479499a5f1eSJan Beulich 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
480499a5f1eSJan Beulich #undef __FIXADDR_TOP
481551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
482e64c8aa0SThomas Gleixner 		WARN_ON(1);
483551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
484551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
485e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
486e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
487e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
488e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
489e64c8aa0SThomas Gleixner 
490e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
491e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
492e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
493e64c8aa0SThomas Gleixner 	}
494e64c8aa0SThomas Gleixner }
495e64c8aa0SThomas Gleixner 
4965b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
4979b987aebSMasami Hiramatsu 			       phys_addr_t phys, pgprot_t flags)
498e64c8aa0SThomas Gleixner {
499551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
500551889a6SIan Campbell 	pte_t *pte;
501e64c8aa0SThomas Gleixner 
502e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
503e64c8aa0SThomas Gleixner 		BUG();
504e64c8aa0SThomas Gleixner 		return;
505e64c8aa0SThomas Gleixner 	}
506e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
5074583ed51SJeremy Fitzhardinge 
508e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
509551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
510e64c8aa0SThomas Gleixner 	else
5114f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
512e64c8aa0SThomas Gleixner 	__flush_tlb_one(addr);
513e64c8aa0SThomas Gleixner }
514