xref: /linux/arch/x86/mm/ioremap.c (revision 8808500f26a61757cb414da76b271bbd09d5958c)
1e64c8aa0SThomas Gleixner /*
2e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
3e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
4e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
5e64c8aa0SThomas Gleixner  *
6e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
7e64c8aa0SThomas Gleixner  */
8e64c8aa0SThomas Gleixner 
9e64c8aa0SThomas Gleixner #include <linux/bootmem.h>
10e64c8aa0SThomas Gleixner #include <linux/init.h>
11e64c8aa0SThomas Gleixner #include <linux/io.h>
12e64c8aa0SThomas Gleixner #include <linux/module.h>
13e64c8aa0SThomas Gleixner #include <linux/slab.h>
14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
15d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
16e64c8aa0SThomas Gleixner 
17e64c8aa0SThomas Gleixner #include <asm/cacheflush.h>
18e64c8aa0SThomas Gleixner #include <asm/e820.h>
19e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
20e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
24e64c8aa0SThomas Gleixner 
25e64c8aa0SThomas Gleixner #ifdef CONFIG_X86_64
26e64c8aa0SThomas Gleixner 
27e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr)
28e3100c82SThomas Gleixner {
29e3100c82SThomas Gleixner 	return addr < (1UL << boot_cpu_data.x86_phys_bits);
30e3100c82SThomas Gleixner }
31e3100c82SThomas Gleixner 
3259ea7463SJiri Slaby unsigned long __phys_addr(unsigned long x)
3359ea7463SJiri Slaby {
3459ea7463SJiri Slaby 	if (x >= __START_KERNEL_map) {
3559ea7463SJiri Slaby 		x -= __START_KERNEL_map;
3659ea7463SJiri Slaby 		VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
3759ea7463SJiri Slaby 		x += phys_base;
3859ea7463SJiri Slaby 	} else {
3959ea7463SJiri Slaby 		VIRTUAL_BUG_ON(x < PAGE_OFFSET);
4059ea7463SJiri Slaby 		x -= PAGE_OFFSET;
4159ea7463SJiri Slaby 		VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
4259ea7463SJiri Slaby 					!phys_addr_valid(x));
4359ea7463SJiri Slaby 	}
4459ea7463SJiri Slaby 	return x;
4559ea7463SJiri Slaby }
4659ea7463SJiri Slaby EXPORT_SYMBOL(__phys_addr);
4759ea7463SJiri Slaby 
48af5c2bd1SVegard Nossum bool __virt_addr_valid(unsigned long x)
49af5c2bd1SVegard Nossum {
50af5c2bd1SVegard Nossum 	if (x >= __START_KERNEL_map) {
51af5c2bd1SVegard Nossum 		x -= __START_KERNEL_map;
52af5c2bd1SVegard Nossum 		if (x >= KERNEL_IMAGE_SIZE)
53af5c2bd1SVegard Nossum 			return false;
54af5c2bd1SVegard Nossum 		x += phys_base;
55af5c2bd1SVegard Nossum 	} else {
56af5c2bd1SVegard Nossum 		if (x < PAGE_OFFSET)
57af5c2bd1SVegard Nossum 			return false;
58af5c2bd1SVegard Nossum 		x -= PAGE_OFFSET;
59af5c2bd1SVegard Nossum 		if (system_state == SYSTEM_BOOTING ?
60af5c2bd1SVegard Nossum 				x > MAXMEM : !phys_addr_valid(x)) {
61af5c2bd1SVegard Nossum 			return false;
62af5c2bd1SVegard Nossum 		}
63af5c2bd1SVegard Nossum 	}
64af5c2bd1SVegard Nossum 
65af5c2bd1SVegard Nossum 	return pfn_valid(x >> PAGE_SHIFT);
66af5c2bd1SVegard Nossum }
67af5c2bd1SVegard Nossum EXPORT_SYMBOL(__virt_addr_valid);
68af5c2bd1SVegard Nossum 
69e3100c82SThomas Gleixner #else
70e3100c82SThomas Gleixner 
71e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr)
72e3100c82SThomas Gleixner {
73e3100c82SThomas Gleixner 	return 1;
74e3100c82SThomas Gleixner }
75e3100c82SThomas Gleixner 
76a1bf9631SJiri Slaby #ifdef CONFIG_DEBUG_VIRTUAL
7759ea7463SJiri Slaby unsigned long __phys_addr(unsigned long x)
7859ea7463SJiri Slaby {
7959ea7463SJiri Slaby 	/* VMALLOC_* aren't constants; not available at the boot time */
80af5c2bd1SVegard Nossum 	VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81af5c2bd1SVegard Nossum 	VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82af5c2bd1SVegard Nossum 		is_vmalloc_addr((void *) x));
8359ea7463SJiri Slaby 	return x - PAGE_OFFSET;
8459ea7463SJiri Slaby }
8559ea7463SJiri Slaby EXPORT_SYMBOL(__phys_addr);
86a1bf9631SJiri Slaby #endif
8759ea7463SJiri Slaby 
88af5c2bd1SVegard Nossum bool __virt_addr_valid(unsigned long x)
89af5c2bd1SVegard Nossum {
90af5c2bd1SVegard Nossum 	if (x < PAGE_OFFSET)
91af5c2bd1SVegard Nossum 		return false;
92af5c2bd1SVegard Nossum 	if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93af5c2bd1SVegard Nossum 		return false;
94af5c2bd1SVegard Nossum 	return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95af5c2bd1SVegard Nossum }
96af5c2bd1SVegard Nossum EXPORT_SYMBOL(__virt_addr_valid);
97af5c2bd1SVegard Nossum 
98e64c8aa0SThomas Gleixner #endif
99e64c8aa0SThomas Gleixner 
1005f5192b9SThomas Gleixner int page_is_ram(unsigned long pagenr)
1015f5192b9SThomas Gleixner {
102756a6c68SIngo Molnar 	resource_size_t addr, end;
1035f5192b9SThomas Gleixner 	int i;
1045f5192b9SThomas Gleixner 
105d8a9e6a5SArjan van de Ven 	/*
106d8a9e6a5SArjan van de Ven 	 * A special case is the first 4Kb of memory;
107d8a9e6a5SArjan van de Ven 	 * This is a BIOS owned area, not kernel ram, but generally
108d8a9e6a5SArjan van de Ven 	 * not listed as such in the E820 table.
109d8a9e6a5SArjan van de Ven 	 */
110d8a9e6a5SArjan van de Ven 	if (pagenr == 0)
111d8a9e6a5SArjan van de Ven 		return 0;
112d8a9e6a5SArjan van de Ven 
113156fbc3fSArjan van de Ven 	/*
114156fbc3fSArjan van de Ven 	 * Second special case: Some BIOSen report the PC BIOS
115156fbc3fSArjan van de Ven 	 * area (640->1Mb) as ram even though it is not.
116156fbc3fSArjan van de Ven 	 */
117156fbc3fSArjan van de Ven 	if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118156fbc3fSArjan van de Ven 		    pagenr < (BIOS_END >> PAGE_SHIFT))
119156fbc3fSArjan van de Ven 		return 0;
120d8a9e6a5SArjan van de Ven 
1215f5192b9SThomas Gleixner 	for (i = 0; i < e820.nr_map; i++) {
1225f5192b9SThomas Gleixner 		/*
1235f5192b9SThomas Gleixner 		 * Not usable memory:
1245f5192b9SThomas Gleixner 		 */
1255f5192b9SThomas Gleixner 		if (e820.map[i].type != E820_RAM)
1265f5192b9SThomas Gleixner 			continue;
1275f5192b9SThomas Gleixner 		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
1285f5192b9SThomas Gleixner 		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
129950f9d95SThomas Gleixner 
130950f9d95SThomas Gleixner 
1315f5192b9SThomas Gleixner 		if ((pagenr >= addr) && (pagenr < end))
1325f5192b9SThomas Gleixner 			return 1;
1335f5192b9SThomas Gleixner 	}
1345f5192b9SThomas Gleixner 	return 0;
1355f5192b9SThomas Gleixner }
1365f5192b9SThomas Gleixner 
1379542ada8SSuresh Siddha int pagerange_is_ram(unsigned long start, unsigned long end)
1389542ada8SSuresh Siddha {
1399542ada8SSuresh Siddha 	int ram_page = 0, not_rampage = 0;
1409542ada8SSuresh Siddha 	unsigned long page_nr;
1419542ada8SSuresh Siddha 
1429542ada8SSuresh Siddha 	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
1439542ada8SSuresh Siddha 	     ++page_nr) {
1449542ada8SSuresh Siddha 		if (page_is_ram(page_nr))
1459542ada8SSuresh Siddha 			ram_page = 1;
1469542ada8SSuresh Siddha 		else
1479542ada8SSuresh Siddha 			not_rampage = 1;
1489542ada8SSuresh Siddha 
1499542ada8SSuresh Siddha 		if (ram_page == not_rampage)
1509542ada8SSuresh Siddha 			return -1;
1519542ada8SSuresh Siddha 	}
1529542ada8SSuresh Siddha 
1539542ada8SSuresh Siddha 	return ram_page;
1549542ada8SSuresh Siddha }
1559542ada8SSuresh Siddha 
156e64c8aa0SThomas Gleixner /*
157e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
158e64c8aa0SThomas Gleixner  * conflicts.
159e64c8aa0SThomas Gleixner  */
1603a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
1613a96ce8cSvenkatesh.pallipadi@intel.com 			       unsigned long prot_val)
162e64c8aa0SThomas Gleixner {
163d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
16493809be8SHarvey Harrison 	int err;
165e64c8aa0SThomas Gleixner 
1663a96ce8cSvenkatesh.pallipadi@intel.com 	switch (prot_val) {
1673a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_UC:
168d806e5eeSThomas Gleixner 	default:
1691219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
170d806e5eeSThomas Gleixner 		break;
171b310f381Svenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WC:
172b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
173b310f381Svenkatesh.pallipadi@intel.com 		break;
1743a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WB:
1751219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
176d806e5eeSThomas Gleixner 		break;
177d806e5eeSThomas Gleixner 	}
178e64c8aa0SThomas Gleixner 
179e64c8aa0SThomas Gleixner 	return err;
180e64c8aa0SThomas Gleixner }
181e64c8aa0SThomas Gleixner 
182e64c8aa0SThomas Gleixner /*
183e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
184e64c8aa0SThomas Gleixner  * address space. Needed when the kernel wants to access high addresses
185e64c8aa0SThomas Gleixner  * directly.
186e64c8aa0SThomas Gleixner  *
187e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
189e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
190e64c8aa0SThomas Gleixner  */
19123016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr,
19223016969SChristoph Lameter 		unsigned long size, unsigned long prot_val, void *caller)
193e64c8aa0SThomas Gleixner {
194756a6c68SIngo Molnar 	unsigned long pfn, offset, vaddr;
195756a6c68SIngo Molnar 	resource_size_t last_addr;
19687e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
19787e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
198e64c8aa0SThomas Gleixner 	struct vm_struct *area;
199d7677d40Svenkatesh.pallipadi@intel.com 	unsigned long new_prot_val;
200d806e5eeSThomas Gleixner 	pgprot_t prot;
201dee7cbb2SVenki Pallipadi 	int retval;
202d61fc448SPekka Paalanen 	void __iomem *ret_addr;
203e64c8aa0SThomas Gleixner 
204e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
205e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
206e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
207e64c8aa0SThomas Gleixner 		return NULL;
208e64c8aa0SThomas Gleixner 
209e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
2106997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
2114c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
212e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
213e3100c82SThomas Gleixner 		return NULL;
214e3100c82SThomas Gleixner 	}
215e3100c82SThomas Gleixner 
216e64c8aa0SThomas Gleixner 	/*
217e64c8aa0SThomas Gleixner 	 * Don't remap the low PCI/ISA area, it's always mapped..
218e64c8aa0SThomas Gleixner 	 */
219bcc643dcSAndreas Herrmann 	if (is_ISA_range(phys_addr, last_addr))
220e64c8aa0SThomas Gleixner 		return (__force void __iomem *)phys_to_virt(phys_addr);
221e64c8aa0SThomas Gleixner 
222e64c8aa0SThomas Gleixner 	/*
223379daf62SSuresh Siddha 	 * Check if the request spans more than any BAR in the iomem resource
224379daf62SSuresh Siddha 	 * tree.
225379daf62SSuresh Siddha 	 */
226*8808500fSIngo Molnar 	WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
227*8808500fSIngo Molnar 		  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
228379daf62SSuresh Siddha 
229379daf62SSuresh Siddha 	/*
230e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
231e64c8aa0SThomas Gleixner 	 */
232cb8ab687SAndres Salomon 	for (pfn = phys_addr >> PAGE_SHIFT;
233cb8ab687SAndres Salomon 				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
234cb8ab687SAndres Salomon 				pfn++) {
235bdd3cee2SIngo Molnar 
236ba748d22SIngo Molnar 		int is_ram = page_is_ram(pfn);
237ba748d22SIngo Molnar 
238ba748d22SIngo Molnar 		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
239e64c8aa0SThomas Gleixner 			return NULL;
240ba748d22SIngo Molnar 		WARN_ON_ONCE(is_ram);
241e64c8aa0SThomas Gleixner 	}
242e64c8aa0SThomas Gleixner 
243d7677d40Svenkatesh.pallipadi@intel.com 	/*
244d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
245d7677d40Svenkatesh.pallipadi@intel.com 	 */
246d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
247d7677d40Svenkatesh.pallipadi@intel.com 	phys_addr &= PAGE_MASK;
248d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
249d7677d40Svenkatesh.pallipadi@intel.com 
250e213e877SAndi Kleen 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
251dee7cbb2SVenki Pallipadi 						prot_val, &new_prot_val);
252dee7cbb2SVenki Pallipadi 	if (retval) {
253b450e5e8SVenki Pallipadi 		pr_debug("Warning: reserve_memtype returned %d\n", retval);
254dee7cbb2SVenki Pallipadi 		return NULL;
255dee7cbb2SVenki Pallipadi 	}
256dee7cbb2SVenki Pallipadi 
257dee7cbb2SVenki Pallipadi 	if (prot_val != new_prot_val) {
258d7677d40Svenkatesh.pallipadi@intel.com 		/*
259d7677d40Svenkatesh.pallipadi@intel.com 		 * Do not fallback to certain memory types with certain
260d7677d40Svenkatesh.pallipadi@intel.com 		 * requested type:
261de33c442SSuresh Siddha 		 * - request is uc-, return cannot be write-back
262de33c442SSuresh Siddha 		 * - request is uc-, return cannot be write-combine
263b310f381Svenkatesh.pallipadi@intel.com 		 * - request is write-combine, return cannot be write-back
264d7677d40Svenkatesh.pallipadi@intel.com 		 */
265de33c442SSuresh Siddha 		if ((prot_val == _PAGE_CACHE_UC_MINUS &&
266b310f381Svenkatesh.pallipadi@intel.com 		     (new_prot_val == _PAGE_CACHE_WB ||
267b310f381Svenkatesh.pallipadi@intel.com 		      new_prot_val == _PAGE_CACHE_WC)) ||
268b310f381Svenkatesh.pallipadi@intel.com 		    (prot_val == _PAGE_CACHE_WC &&
269d7677d40Svenkatesh.pallipadi@intel.com 		     new_prot_val == _PAGE_CACHE_WB)) {
270b450e5e8SVenki Pallipadi 			pr_debug(
2716997ab49Svenkatesh.pallipadi@intel.com 		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
2724c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
2734c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
2746997ab49Svenkatesh.pallipadi@intel.com 				prot_val, new_prot_val);
275d7677d40Svenkatesh.pallipadi@intel.com 			free_memtype(phys_addr, phys_addr + size);
276d7677d40Svenkatesh.pallipadi@intel.com 			return NULL;
277d7677d40Svenkatesh.pallipadi@intel.com 		}
278d7677d40Svenkatesh.pallipadi@intel.com 		prot_val = new_prot_val;
279d7677d40Svenkatesh.pallipadi@intel.com 	}
280d7677d40Svenkatesh.pallipadi@intel.com 
2813a96ce8cSvenkatesh.pallipadi@intel.com 	switch (prot_val) {
2823a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_UC:
283d806e5eeSThomas Gleixner 	default:
284be43d728SJeremy Fitzhardinge 		prot = PAGE_KERNEL_IO_NOCACHE;
285d806e5eeSThomas Gleixner 		break;
286de33c442SSuresh Siddha 	case _PAGE_CACHE_UC_MINUS:
287be43d728SJeremy Fitzhardinge 		prot = PAGE_KERNEL_IO_UC_MINUS;
288de33c442SSuresh Siddha 		break;
289b310f381Svenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WC:
290be43d728SJeremy Fitzhardinge 		prot = PAGE_KERNEL_IO_WC;
291b310f381Svenkatesh.pallipadi@intel.com 		break;
2923a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WB:
293be43d728SJeremy Fitzhardinge 		prot = PAGE_KERNEL_IO;
294d806e5eeSThomas Gleixner 		break;
295d806e5eeSThomas Gleixner 	}
296e64c8aa0SThomas Gleixner 
297e64c8aa0SThomas Gleixner 	/*
298e64c8aa0SThomas Gleixner 	 * Ok, go for it..
299e64c8aa0SThomas Gleixner 	 */
30023016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
301e64c8aa0SThomas Gleixner 	if (!area)
302e64c8aa0SThomas Gleixner 		return NULL;
303e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
304e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
305e66aadbeSThomas Gleixner 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
306d7677d40Svenkatesh.pallipadi@intel.com 		free_memtype(phys_addr, phys_addr + size);
307b16bf712SIngo Molnar 		free_vm_area(area);
308e64c8aa0SThomas Gleixner 		return NULL;
309e64c8aa0SThomas Gleixner 	}
310e64c8aa0SThomas Gleixner 
3113a96ce8cSvenkatesh.pallipadi@intel.com 	if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
312d7677d40Svenkatesh.pallipadi@intel.com 		free_memtype(phys_addr, phys_addr + size);
313e66aadbeSThomas Gleixner 		vunmap(area->addr);
314e64c8aa0SThomas Gleixner 		return NULL;
315e64c8aa0SThomas Gleixner 	}
316e64c8aa0SThomas Gleixner 
317d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
31887e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
319d61fc448SPekka Paalanen 
320d61fc448SPekka Paalanen 	return ret_addr;
321e64c8aa0SThomas Gleixner }
322e64c8aa0SThomas Gleixner 
323e64c8aa0SThomas Gleixner /**
324e64c8aa0SThomas Gleixner  * ioremap_nocache     -   map bus memory into CPU space
325e64c8aa0SThomas Gleixner  * @offset:    bus address of the memory
326e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
327e64c8aa0SThomas Gleixner  *
328e64c8aa0SThomas Gleixner  * ioremap_nocache performs a platform specific sequence of operations to
329e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
330e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
331e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
332e64c8aa0SThomas Gleixner  * address.
333e64c8aa0SThomas Gleixner  *
334e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
335e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
336e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
337e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
338e64c8aa0SThomas Gleixner  *
339e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
340e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
341e64c8aa0SThomas Gleixner  *
342e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
343e64c8aa0SThomas Gleixner  */
344b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
345e64c8aa0SThomas Gleixner {
346de33c442SSuresh Siddha 	/*
347de33c442SSuresh Siddha 	 * Ideally, this should be:
348499f8f84SAndreas Herrmann 	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
349de33c442SSuresh Siddha 	 *
350de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
351de33c442SSuresh Siddha 	 * UC MINUS.
352de33c442SSuresh Siddha 	 */
353de33c442SSuresh Siddha 	unsigned long val = _PAGE_CACHE_UC_MINUS;
354de33c442SSuresh Siddha 
355de33c442SSuresh Siddha 	return __ioremap_caller(phys_addr, size, val,
35623016969SChristoph Lameter 				__builtin_return_address(0));
357e64c8aa0SThomas Gleixner }
358e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache);
359e64c8aa0SThomas Gleixner 
360b310f381Svenkatesh.pallipadi@intel.com /**
361b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
362b310f381Svenkatesh.pallipadi@intel.com  * @offset:	bus address of the memory
363b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
364b310f381Svenkatesh.pallipadi@intel.com  *
365b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
366b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
367b310f381Svenkatesh.pallipadi@intel.com  *
368b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
369b310f381Svenkatesh.pallipadi@intel.com  */
370b310f381Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
371b310f381Svenkatesh.pallipadi@intel.com {
372499f8f84SAndreas Herrmann 	if (pat_enabled)
37323016969SChristoph Lameter 		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
37423016969SChristoph Lameter 					__builtin_return_address(0));
375b310f381Svenkatesh.pallipadi@intel.com 	else
376b310f381Svenkatesh.pallipadi@intel.com 		return ioremap_nocache(phys_addr, size);
377b310f381Svenkatesh.pallipadi@intel.com }
378b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
379b310f381Svenkatesh.pallipadi@intel.com 
380b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
3815f868152SThomas Gleixner {
38223016969SChristoph Lameter 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
38323016969SChristoph Lameter 				__builtin_return_address(0));
3845f868152SThomas Gleixner }
3855f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
3865f868152SThomas Gleixner 
387a361ee5cSVenkatesh Pallipadi static void __iomem *ioremap_default(resource_size_t phys_addr,
388a361ee5cSVenkatesh Pallipadi 					unsigned long size)
389a361ee5cSVenkatesh Pallipadi {
390a361ee5cSVenkatesh Pallipadi 	unsigned long flags;
3911d6cf1feSHarvey Harrison 	void __iomem *ret;
392a361ee5cSVenkatesh Pallipadi 	int err;
393a361ee5cSVenkatesh Pallipadi 
394a361ee5cSVenkatesh Pallipadi 	/*
395a361ee5cSVenkatesh Pallipadi 	 * - WB for WB-able memory and no other conflicting mappings
396a361ee5cSVenkatesh Pallipadi 	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
397a361ee5cSVenkatesh Pallipadi 	 * - Inherit from confliting mappings otherwise
398a361ee5cSVenkatesh Pallipadi 	 */
399a361ee5cSVenkatesh Pallipadi 	err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
400a361ee5cSVenkatesh Pallipadi 	if (err < 0)
401a361ee5cSVenkatesh Pallipadi 		return NULL;
402a361ee5cSVenkatesh Pallipadi 
4031d6cf1feSHarvey Harrison 	ret = __ioremap_caller(phys_addr, size, flags,
404a361ee5cSVenkatesh Pallipadi 			       __builtin_return_address(0));
405a361ee5cSVenkatesh Pallipadi 
406a361ee5cSVenkatesh Pallipadi 	free_memtype(phys_addr, phys_addr + size);
4071d6cf1feSHarvey Harrison 	return ret;
408a361ee5cSVenkatesh Pallipadi }
409a361ee5cSVenkatesh Pallipadi 
41028b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
41128b2ee20SRik van Riel 				unsigned long prot_val)
41228b2ee20SRik van Riel {
41328b2ee20SRik van Riel 	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
41428b2ee20SRik van Riel 				__builtin_return_address(0));
41528b2ee20SRik van Riel }
41628b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
41728b2ee20SRik van Riel 
418e64c8aa0SThomas Gleixner /**
419e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
420e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
421e64c8aa0SThomas Gleixner  *
422e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
423e64c8aa0SThomas Gleixner  */
424e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
425e64c8aa0SThomas Gleixner {
426e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
427e64c8aa0SThomas Gleixner 
428e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
429e64c8aa0SThomas Gleixner 		return;
430e64c8aa0SThomas Gleixner 
431e64c8aa0SThomas Gleixner 	/*
432e64c8aa0SThomas Gleixner 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
433e64c8aa0SThomas Gleixner 	 * vm_area and by simply returning an address into the kernel mapping
434e64c8aa0SThomas Gleixner 	 * of ISA space.   So handle that here.
435e64c8aa0SThomas Gleixner 	 */
4366e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
4376e92a5a6SThomas Gleixner 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
438e64c8aa0SThomas Gleixner 		return;
439e64c8aa0SThomas Gleixner 
440e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
441e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
442e64c8aa0SThomas Gleixner 
443d61fc448SPekka Paalanen 	mmiotrace_iounmap(addr);
444d61fc448SPekka Paalanen 
445e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
446e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
447e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
448e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
449e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
450e64c8aa0SThomas Gleixner 	read_lock(&vmlist_lock);
451e64c8aa0SThomas Gleixner 	for (p = vmlist; p; p = p->next) {
4526e92a5a6SThomas Gleixner 		if (p->addr == (void __force *)addr)
453e64c8aa0SThomas Gleixner 			break;
454e64c8aa0SThomas Gleixner 	}
455e64c8aa0SThomas Gleixner 	read_unlock(&vmlist_lock);
456e64c8aa0SThomas Gleixner 
457e64c8aa0SThomas Gleixner 	if (!p) {
458e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
459e64c8aa0SThomas Gleixner 		dump_stack();
460e64c8aa0SThomas Gleixner 		return;
461e64c8aa0SThomas Gleixner 	}
462e64c8aa0SThomas Gleixner 
463d7677d40Svenkatesh.pallipadi@intel.com 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
464d7677d40Svenkatesh.pallipadi@intel.com 
465e64c8aa0SThomas Gleixner 	/* Finally remove it */
4666e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
467e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
468e64c8aa0SThomas Gleixner 	kfree(p);
469e64c8aa0SThomas Gleixner }
470e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
471e64c8aa0SThomas Gleixner 
472e045fb2aSvenkatesh.pallipadi@intel.com /*
473e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
474e045fb2aSvenkatesh.pallipadi@intel.com  * access
475e045fb2aSvenkatesh.pallipadi@intel.com  */
476e045fb2aSvenkatesh.pallipadi@intel.com void *xlate_dev_mem_ptr(unsigned long phys)
477e045fb2aSvenkatesh.pallipadi@intel.com {
478e045fb2aSvenkatesh.pallipadi@intel.com 	void *addr;
479e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start = phys & PAGE_MASK;
480e045fb2aSvenkatesh.pallipadi@intel.com 
481e045fb2aSvenkatesh.pallipadi@intel.com 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
482e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(start >> PAGE_SHIFT))
483e045fb2aSvenkatesh.pallipadi@intel.com 		return __va(phys);
484e045fb2aSvenkatesh.pallipadi@intel.com 
485ae94b807SIngo Molnar 	addr = (void __force *)ioremap_default(start, PAGE_SIZE);
486e045fb2aSvenkatesh.pallipadi@intel.com 	if (addr)
487e045fb2aSvenkatesh.pallipadi@intel.com 		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
488e045fb2aSvenkatesh.pallipadi@intel.com 
489e045fb2aSvenkatesh.pallipadi@intel.com 	return addr;
490e045fb2aSvenkatesh.pallipadi@intel.com }
491e045fb2aSvenkatesh.pallipadi@intel.com 
492e045fb2aSvenkatesh.pallipadi@intel.com void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
493e045fb2aSvenkatesh.pallipadi@intel.com {
494e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(phys >> PAGE_SHIFT))
495e045fb2aSvenkatesh.pallipadi@intel.com 		return;
496e045fb2aSvenkatesh.pallipadi@intel.com 
497e045fb2aSvenkatesh.pallipadi@intel.com 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
498e045fb2aSvenkatesh.pallipadi@intel.com 	return;
499e045fb2aSvenkatesh.pallipadi@intel.com }
500e045fb2aSvenkatesh.pallipadi@intel.com 
5014b6e9f27SJaswinder Singh static int __initdata early_ioremap_debug;
502e64c8aa0SThomas Gleixner 
503e64c8aa0SThomas Gleixner static int __init early_ioremap_debug_setup(char *str)
504e64c8aa0SThomas Gleixner {
505e64c8aa0SThomas Gleixner 	early_ioremap_debug = 1;
506e64c8aa0SThomas Gleixner 
507e64c8aa0SThomas Gleixner 	return 0;
508e64c8aa0SThomas Gleixner }
509e64c8aa0SThomas Gleixner early_param("early_ioremap_debug", early_ioremap_debug_setup);
510e64c8aa0SThomas Gleixner 
511e64c8aa0SThomas Gleixner static __initdata int after_paging_init;
512a7bf0bd5SJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
513e64c8aa0SThomas Gleixner 
514551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
515e64c8aa0SThomas Gleixner {
51637cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
51737cc8d7fSJeremy Fitzhardinge 	pgd_t *base = __va(read_cr3());
51837cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
519551889a6SIan Campbell 	pud_t *pud = pud_offset(pgd, addr);
520551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
521551889a6SIan Campbell 
522551889a6SIan Campbell 	return pmd;
523e64c8aa0SThomas Gleixner }
524e64c8aa0SThomas Gleixner 
525551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
526e64c8aa0SThomas Gleixner {
527551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
528e64c8aa0SThomas Gleixner }
529e64c8aa0SThomas Gleixner 
530e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
531e64c8aa0SThomas Gleixner {
532551889a6SIan Campbell 	pmd_t *pmd;
533e64c8aa0SThomas Gleixner 
534e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
535adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap_init()\n");
536e64c8aa0SThomas Gleixner 
537551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
538e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
539b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
540551889a6SIan Campbell 
541e64c8aa0SThomas Gleixner 	/*
542551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
543e64c8aa0SThomas Gleixner 	 * we are not prepared:
544e64c8aa0SThomas Gleixner 	 */
545551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
546e64c8aa0SThomas Gleixner 		WARN_ON(1);
547551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
548551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
549e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
550e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
551e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
552e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
553e64c8aa0SThomas Gleixner 
554e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
555e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
556e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
557e64c8aa0SThomas Gleixner 	}
558e64c8aa0SThomas Gleixner }
559e64c8aa0SThomas Gleixner 
560e64c8aa0SThomas Gleixner void __init early_ioremap_clear(void)
561e64c8aa0SThomas Gleixner {
562551889a6SIan Campbell 	pmd_t *pmd;
563e64c8aa0SThomas Gleixner 
564e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
565adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap_clear()\n");
566e64c8aa0SThomas Gleixner 
567551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
568551889a6SIan Campbell 	pmd_clear(pmd);
5696944a9c8SJeremy Fitzhardinge 	paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
570e64c8aa0SThomas Gleixner 	__flush_tlb_all();
571e64c8aa0SThomas Gleixner }
572e64c8aa0SThomas Gleixner 
573e64c8aa0SThomas Gleixner void __init early_ioremap_reset(void)
574e64c8aa0SThomas Gleixner {
575e64c8aa0SThomas Gleixner 	enum fixed_addresses idx;
576551889a6SIan Campbell 	unsigned long addr, phys;
577551889a6SIan Campbell 	pte_t *pte;
578e64c8aa0SThomas Gleixner 
579e64c8aa0SThomas Gleixner 	after_paging_init = 1;
580e64c8aa0SThomas Gleixner 	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
581e64c8aa0SThomas Gleixner 		addr = fix_to_virt(idx);
582e64c8aa0SThomas Gleixner 		pte = early_ioremap_pte(addr);
583551889a6SIan Campbell 		if (pte_present(*pte)) {
584551889a6SIan Campbell 			phys = pte_val(*pte) & PAGE_MASK;
585e64c8aa0SThomas Gleixner 			set_fixmap(idx, phys);
586e64c8aa0SThomas Gleixner 		}
587e64c8aa0SThomas Gleixner 	}
588e64c8aa0SThomas Gleixner }
589e64c8aa0SThomas Gleixner 
590e64c8aa0SThomas Gleixner static void __init __early_set_fixmap(enum fixed_addresses idx,
591e64c8aa0SThomas Gleixner 				   unsigned long phys, pgprot_t flags)
592e64c8aa0SThomas Gleixner {
593551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
594551889a6SIan Campbell 	pte_t *pte;
595e64c8aa0SThomas Gleixner 
596e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
597e64c8aa0SThomas Gleixner 		BUG();
598e64c8aa0SThomas Gleixner 		return;
599e64c8aa0SThomas Gleixner 	}
600e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
6014583ed51SJeremy Fitzhardinge 
602e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
603551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
604e64c8aa0SThomas Gleixner 	else
6054f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
606e64c8aa0SThomas Gleixner 	__flush_tlb_one(addr);
607e64c8aa0SThomas Gleixner }
608e64c8aa0SThomas Gleixner 
609e64c8aa0SThomas Gleixner static inline void __init early_set_fixmap(enum fixed_addresses idx,
61014941779SJeremy Fitzhardinge 					   unsigned long phys, pgprot_t prot)
611e64c8aa0SThomas Gleixner {
612e64c8aa0SThomas Gleixner 	if (after_paging_init)
61314941779SJeremy Fitzhardinge 		__set_fixmap(idx, phys, prot);
614e64c8aa0SThomas Gleixner 	else
61514941779SJeremy Fitzhardinge 		__early_set_fixmap(idx, phys, prot);
616e64c8aa0SThomas Gleixner }
617e64c8aa0SThomas Gleixner 
618e64c8aa0SThomas Gleixner static inline void __init early_clear_fixmap(enum fixed_addresses idx)
619e64c8aa0SThomas Gleixner {
620e64c8aa0SThomas Gleixner 	if (after_paging_init)
621e64c8aa0SThomas Gleixner 		clear_fixmap(idx);
622e64c8aa0SThomas Gleixner 	else
623e64c8aa0SThomas Gleixner 		__early_set_fixmap(idx, 0, __pgprot(0));
624e64c8aa0SThomas Gleixner }
625e64c8aa0SThomas Gleixner 
6261d6cf1feSHarvey Harrison static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
627c1a2f4b1SYinghai Lu static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
628e64c8aa0SThomas Gleixner static int __init check_early_ioremap_leak(void)
629e64c8aa0SThomas Gleixner {
630c1a2f4b1SYinghai Lu 	int count = 0;
631c1a2f4b1SYinghai Lu 	int i;
632c1a2f4b1SYinghai Lu 
633c1a2f4b1SYinghai Lu 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
634c1a2f4b1SYinghai Lu 		if (prev_map[i])
635c1a2f4b1SYinghai Lu 			count++;
636c1a2f4b1SYinghai Lu 
637c1a2f4b1SYinghai Lu 	if (!count)
638e64c8aa0SThomas Gleixner 		return 0;
6390c072bb4SArjan van de Ven 	WARN(1, KERN_WARNING
640e64c8aa0SThomas Gleixner 	       "Debug warning: early ioremap leak of %d areas detected.\n",
641c1a2f4b1SYinghai Lu 		count);
642e64c8aa0SThomas Gleixner 	printk(KERN_WARNING
643e64c8aa0SThomas Gleixner 		"please boot with early_ioremap_debug and report the dmesg.\n");
644e64c8aa0SThomas Gleixner 
645e64c8aa0SThomas Gleixner 	return 1;
646e64c8aa0SThomas Gleixner }
647e64c8aa0SThomas Gleixner late_initcall(check_early_ioremap_leak);
648e64c8aa0SThomas Gleixner 
6491d6cf1feSHarvey Harrison static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
650e64c8aa0SThomas Gleixner {
651e64c8aa0SThomas Gleixner 	unsigned long offset, last_addr;
652c1a2f4b1SYinghai Lu 	unsigned int nrpages;
653e64c8aa0SThomas Gleixner 	enum fixed_addresses idx0, idx;
654c1a2f4b1SYinghai Lu 	int i, slot;
655e64c8aa0SThomas Gleixner 
656e64c8aa0SThomas Gleixner 	WARN_ON(system_state != SYSTEM_BOOTING);
657e64c8aa0SThomas Gleixner 
658c1a2f4b1SYinghai Lu 	slot = -1;
659c1a2f4b1SYinghai Lu 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
660c1a2f4b1SYinghai Lu 		if (!prev_map[i]) {
661c1a2f4b1SYinghai Lu 			slot = i;
662c1a2f4b1SYinghai Lu 			break;
663c1a2f4b1SYinghai Lu 		}
664c1a2f4b1SYinghai Lu 	}
665c1a2f4b1SYinghai Lu 
666c1a2f4b1SYinghai Lu 	if (slot < 0) {
667c1a2f4b1SYinghai Lu 		printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
668c1a2f4b1SYinghai Lu 			 phys_addr, size);
669c1a2f4b1SYinghai Lu 		WARN_ON(1);
670c1a2f4b1SYinghai Lu 		return NULL;
671c1a2f4b1SYinghai Lu 	}
672c1a2f4b1SYinghai Lu 
673e64c8aa0SThomas Gleixner 	if (early_ioremap_debug) {
674adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
675c1a2f4b1SYinghai Lu 		       phys_addr, size, slot);
676e64c8aa0SThomas Gleixner 		dump_stack();
677e64c8aa0SThomas Gleixner 	}
678e64c8aa0SThomas Gleixner 
679e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
680e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
681e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr) {
682e64c8aa0SThomas Gleixner 		WARN_ON(1);
683e64c8aa0SThomas Gleixner 		return NULL;
684e64c8aa0SThomas Gleixner 	}
685e64c8aa0SThomas Gleixner 
686c1a2f4b1SYinghai Lu 	prev_size[slot] = size;
687e64c8aa0SThomas Gleixner 	/*
688e64c8aa0SThomas Gleixner 	 * Mappings have to be page-aligned
689e64c8aa0SThomas Gleixner 	 */
690e64c8aa0SThomas Gleixner 	offset = phys_addr & ~PAGE_MASK;
691e64c8aa0SThomas Gleixner 	phys_addr &= PAGE_MASK;
692c613ec1aSAlan Cox 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
693e64c8aa0SThomas Gleixner 
694e64c8aa0SThomas Gleixner 	/*
695e64c8aa0SThomas Gleixner 	 * Mappings have to fit in the FIX_BTMAP area.
696e64c8aa0SThomas Gleixner 	 */
697e64c8aa0SThomas Gleixner 	nrpages = size >> PAGE_SHIFT;
698e64c8aa0SThomas Gleixner 	if (nrpages > NR_FIX_BTMAPS) {
699e64c8aa0SThomas Gleixner 		WARN_ON(1);
700e64c8aa0SThomas Gleixner 		return NULL;
701e64c8aa0SThomas Gleixner 	}
702e64c8aa0SThomas Gleixner 
703e64c8aa0SThomas Gleixner 	/*
704e64c8aa0SThomas Gleixner 	 * Ok, go for it..
705e64c8aa0SThomas Gleixner 	 */
706c1a2f4b1SYinghai Lu 	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
707e64c8aa0SThomas Gleixner 	idx = idx0;
708e64c8aa0SThomas Gleixner 	while (nrpages > 0) {
70914941779SJeremy Fitzhardinge 		early_set_fixmap(idx, phys_addr, prot);
710e64c8aa0SThomas Gleixner 		phys_addr += PAGE_SIZE;
711e64c8aa0SThomas Gleixner 		--idx;
712e64c8aa0SThomas Gleixner 		--nrpages;
713e64c8aa0SThomas Gleixner 	}
714e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
715e64c8aa0SThomas Gleixner 		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
716e64c8aa0SThomas Gleixner 
7171d6cf1feSHarvey Harrison 	prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
718c1a2f4b1SYinghai Lu 	return prev_map[slot];
719e64c8aa0SThomas Gleixner }
720e64c8aa0SThomas Gleixner 
72114941779SJeremy Fitzhardinge /* Remap an IO device */
7221d6cf1feSHarvey Harrison void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
72314941779SJeremy Fitzhardinge {
72414941779SJeremy Fitzhardinge 	return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
72514941779SJeremy Fitzhardinge }
72614941779SJeremy Fitzhardinge 
72714941779SJeremy Fitzhardinge /* Remap memory */
7281d6cf1feSHarvey Harrison void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
72914941779SJeremy Fitzhardinge {
73014941779SJeremy Fitzhardinge 	return __early_ioremap(phys_addr, size, PAGE_KERNEL);
73114941779SJeremy Fitzhardinge }
73214941779SJeremy Fitzhardinge 
7331d6cf1feSHarvey Harrison void __init early_iounmap(void __iomem *addr, unsigned long size)
734e64c8aa0SThomas Gleixner {
735e64c8aa0SThomas Gleixner 	unsigned long virt_addr;
736e64c8aa0SThomas Gleixner 	unsigned long offset;
737e64c8aa0SThomas Gleixner 	unsigned int nrpages;
738e64c8aa0SThomas Gleixner 	enum fixed_addresses idx;
739c1a2f4b1SYinghai Lu 	int i, slot;
740e64c8aa0SThomas Gleixner 
741c1a2f4b1SYinghai Lu 	slot = -1;
742c1a2f4b1SYinghai Lu 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
743c1a2f4b1SYinghai Lu 		if (prev_map[i] == addr) {
744c1a2f4b1SYinghai Lu 			slot = i;
745c1a2f4b1SYinghai Lu 			break;
746c1a2f4b1SYinghai Lu 		}
747c1a2f4b1SYinghai Lu 	}
748c1a2f4b1SYinghai Lu 
749c1a2f4b1SYinghai Lu 	if (slot < 0) {
750c1a2f4b1SYinghai Lu 		printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
751c1a2f4b1SYinghai Lu 			 addr, size);
752c1a2f4b1SYinghai Lu 		WARN_ON(1);
753226e9a93SIngo Molnar 		return;
754c1a2f4b1SYinghai Lu 	}
755c1a2f4b1SYinghai Lu 
756c1a2f4b1SYinghai Lu 	if (prev_size[slot] != size) {
757c1a2f4b1SYinghai Lu 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
758c1a2f4b1SYinghai Lu 			 addr, size, slot, prev_size[slot]);
759c1a2f4b1SYinghai Lu 		WARN_ON(1);
760c1a2f4b1SYinghai Lu 		return;
761c1a2f4b1SYinghai Lu 	}
762e64c8aa0SThomas Gleixner 
763e64c8aa0SThomas Gleixner 	if (early_ioremap_debug) {
764adafdf6aSIngo Molnar 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
765c1a2f4b1SYinghai Lu 		       size, slot);
766e64c8aa0SThomas Gleixner 		dump_stack();
767e64c8aa0SThomas Gleixner 	}
768e64c8aa0SThomas Gleixner 
769e64c8aa0SThomas Gleixner 	virt_addr = (unsigned long)addr;
770e64c8aa0SThomas Gleixner 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
771e64c8aa0SThomas Gleixner 		WARN_ON(1);
772e64c8aa0SThomas Gleixner 		return;
773e64c8aa0SThomas Gleixner 	}
774e64c8aa0SThomas Gleixner 	offset = virt_addr & ~PAGE_MASK;
775e64c8aa0SThomas Gleixner 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
776e64c8aa0SThomas Gleixner 
777c1a2f4b1SYinghai Lu 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
778e64c8aa0SThomas Gleixner 	while (nrpages > 0) {
779e64c8aa0SThomas Gleixner 		early_clear_fixmap(idx);
780e64c8aa0SThomas Gleixner 		--idx;
781e64c8aa0SThomas Gleixner 		--nrpages;
782e64c8aa0SThomas Gleixner 	}
7831d6cf1feSHarvey Harrison 	prev_map[slot] = NULL;
784e64c8aa0SThomas Gleixner }
785e64c8aa0SThomas Gleixner 
786e64c8aa0SThomas Gleixner void __this_fixmap_does_not_exist(void)
787e64c8aa0SThomas Gleixner {
788e64c8aa0SThomas Gleixner 	WARN_ON(1);
789e64c8aa0SThomas Gleixner }
790