xref: /linux/arch/x86/mm/ioremap.c (revision e213e87785559eaf3107897226817aea9291b06f)
1e64c8aa0SThomas Gleixner /*
2e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
3e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
4e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
5e64c8aa0SThomas Gleixner  *
6e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
7e64c8aa0SThomas Gleixner  */
8e64c8aa0SThomas Gleixner 
9e64c8aa0SThomas Gleixner #include <linux/bootmem.h>
10e64c8aa0SThomas Gleixner #include <linux/init.h>
11e64c8aa0SThomas Gleixner #include <linux/io.h>
12e64c8aa0SThomas Gleixner #include <linux/module.h>
13e64c8aa0SThomas Gleixner #include <linux/slab.h>
14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
15d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
16e64c8aa0SThomas Gleixner 
17e64c8aa0SThomas Gleixner #include <asm/cacheflush.h>
18e64c8aa0SThomas Gleixner #include <asm/e820.h>
19e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
20e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
24e64c8aa0SThomas Gleixner 
25e64c8aa0SThomas Gleixner #ifdef CONFIG_X86_64
26e64c8aa0SThomas Gleixner 
27e64c8aa0SThomas Gleixner unsigned long __phys_addr(unsigned long x)
28e64c8aa0SThomas Gleixner {
29e64c8aa0SThomas Gleixner 	if (x >= __START_KERNEL_map)
30e64c8aa0SThomas Gleixner 		return x - __START_KERNEL_map + phys_base;
31e64c8aa0SThomas Gleixner 	return x - PAGE_OFFSET;
32e64c8aa0SThomas Gleixner }
33e64c8aa0SThomas Gleixner EXPORT_SYMBOL(__phys_addr);
34e64c8aa0SThomas Gleixner 
35e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr)
36e3100c82SThomas Gleixner {
37e3100c82SThomas Gleixner 	return addr < (1UL << boot_cpu_data.x86_phys_bits);
38e3100c82SThomas Gleixner }
39e3100c82SThomas Gleixner 
40e3100c82SThomas Gleixner #else
41e3100c82SThomas Gleixner 
42e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr)
43e3100c82SThomas Gleixner {
44e3100c82SThomas Gleixner 	return 1;
45e3100c82SThomas Gleixner }
46e3100c82SThomas Gleixner 
47e64c8aa0SThomas Gleixner #endif
48e64c8aa0SThomas Gleixner 
495f5192b9SThomas Gleixner int page_is_ram(unsigned long pagenr)
505f5192b9SThomas Gleixner {
51756a6c68SIngo Molnar 	resource_size_t addr, end;
525f5192b9SThomas Gleixner 	int i;
535f5192b9SThomas Gleixner 
54d8a9e6a5SArjan van de Ven 	/*
55d8a9e6a5SArjan van de Ven 	 * A special case is the first 4Kb of memory;
56d8a9e6a5SArjan van de Ven 	 * This is a BIOS owned area, not kernel ram, but generally
57d8a9e6a5SArjan van de Ven 	 * not listed as such in the E820 table.
58d8a9e6a5SArjan van de Ven 	 */
59d8a9e6a5SArjan van de Ven 	if (pagenr == 0)
60d8a9e6a5SArjan van de Ven 		return 0;
61d8a9e6a5SArjan van de Ven 
62156fbc3fSArjan van de Ven 	/*
63156fbc3fSArjan van de Ven 	 * Second special case: Some BIOSen report the PC BIOS
64156fbc3fSArjan van de Ven 	 * area (640->1Mb) as ram even though it is not.
65156fbc3fSArjan van de Ven 	 */
66156fbc3fSArjan van de Ven 	if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67156fbc3fSArjan van de Ven 		    pagenr < (BIOS_END >> PAGE_SHIFT))
68156fbc3fSArjan van de Ven 		return 0;
69d8a9e6a5SArjan van de Ven 
705f5192b9SThomas Gleixner 	for (i = 0; i < e820.nr_map; i++) {
715f5192b9SThomas Gleixner 		/*
725f5192b9SThomas Gleixner 		 * Not usable memory:
735f5192b9SThomas Gleixner 		 */
745f5192b9SThomas Gleixner 		if (e820.map[i].type != E820_RAM)
755f5192b9SThomas Gleixner 			continue;
765f5192b9SThomas Gleixner 		addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
775f5192b9SThomas Gleixner 		end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
78950f9d95SThomas Gleixner 
79950f9d95SThomas Gleixner 
805f5192b9SThomas Gleixner 		if ((pagenr >= addr) && (pagenr < end))
815f5192b9SThomas Gleixner 			return 1;
825f5192b9SThomas Gleixner 	}
835f5192b9SThomas Gleixner 	return 0;
845f5192b9SThomas Gleixner }
855f5192b9SThomas Gleixner 
86e64c8aa0SThomas Gleixner /*
87e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
88e64c8aa0SThomas Gleixner  * conflicts.
89e64c8aa0SThomas Gleixner  */
903a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
913a96ce8cSvenkatesh.pallipadi@intel.com 			       unsigned long prot_val)
92e64c8aa0SThomas Gleixner {
93d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
9493809be8SHarvey Harrison 	int err;
95e64c8aa0SThomas Gleixner 
963a96ce8cSvenkatesh.pallipadi@intel.com 	switch (prot_val) {
973a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_UC:
98d806e5eeSThomas Gleixner 	default:
991219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
100d806e5eeSThomas Gleixner 		break;
101b310f381Svenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WC:
102b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
103b310f381Svenkatesh.pallipadi@intel.com 		break;
1043a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WB:
1051219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
106d806e5eeSThomas Gleixner 		break;
107d806e5eeSThomas Gleixner 	}
108e64c8aa0SThomas Gleixner 
109e64c8aa0SThomas Gleixner 	return err;
110e64c8aa0SThomas Gleixner }
111e64c8aa0SThomas Gleixner 
112e64c8aa0SThomas Gleixner /*
113e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
114e64c8aa0SThomas Gleixner  * address space. Needed when the kernel wants to access high addresses
115e64c8aa0SThomas Gleixner  * directly.
116e64c8aa0SThomas Gleixner  *
117e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
119e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
120e64c8aa0SThomas Gleixner  */
12123016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr,
12223016969SChristoph Lameter 		unsigned long size, unsigned long prot_val, void *caller)
123e64c8aa0SThomas Gleixner {
124756a6c68SIngo Molnar 	unsigned long pfn, offset, vaddr;
125756a6c68SIngo Molnar 	resource_size_t last_addr;
12687e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
12787e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
128e64c8aa0SThomas Gleixner 	struct vm_struct *area;
129d7677d40Svenkatesh.pallipadi@intel.com 	unsigned long new_prot_val;
130d806e5eeSThomas Gleixner 	pgprot_t prot;
131dee7cbb2SVenki Pallipadi 	int retval;
132d61fc448SPekka Paalanen 	void __iomem *ret_addr;
133e64c8aa0SThomas Gleixner 
134e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
135e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
136e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
137e64c8aa0SThomas Gleixner 		return NULL;
138e64c8aa0SThomas Gleixner 
139e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
1406997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
1414c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
142e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
143e3100c82SThomas Gleixner 		return NULL;
144e3100c82SThomas Gleixner 	}
145e3100c82SThomas Gleixner 
146e64c8aa0SThomas Gleixner 	/*
147e64c8aa0SThomas Gleixner 	 * Don't remap the low PCI/ISA area, it's always mapped..
148e64c8aa0SThomas Gleixner 	 */
149bcc643dcSAndreas Herrmann 	if (is_ISA_range(phys_addr, last_addr))
150e64c8aa0SThomas Gleixner 		return (__force void __iomem *)phys_to_virt(phys_addr);
151e64c8aa0SThomas Gleixner 
152e64c8aa0SThomas Gleixner 	/*
153e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
154e64c8aa0SThomas Gleixner 	 */
155cb8ab687SAndres Salomon 	for (pfn = phys_addr >> PAGE_SHIFT;
156cb8ab687SAndres Salomon 				(pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
157cb8ab687SAndres Salomon 				pfn++) {
158bdd3cee2SIngo Molnar 
159ba748d22SIngo Molnar 		int is_ram = page_is_ram(pfn);
160ba748d22SIngo Molnar 
161ba748d22SIngo Molnar 		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
162e64c8aa0SThomas Gleixner 			return NULL;
163ba748d22SIngo Molnar 		WARN_ON_ONCE(is_ram);
164e64c8aa0SThomas Gleixner 	}
165e64c8aa0SThomas Gleixner 
166d7677d40Svenkatesh.pallipadi@intel.com 	/*
167d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
168d7677d40Svenkatesh.pallipadi@intel.com 	 */
169d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
170d7677d40Svenkatesh.pallipadi@intel.com 	phys_addr &= PAGE_MASK;
171d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
172d7677d40Svenkatesh.pallipadi@intel.com 
173*e213e877SAndi Kleen 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
174dee7cbb2SVenki Pallipadi 						prot_val, &new_prot_val);
175dee7cbb2SVenki Pallipadi 	if (retval) {
176b450e5e8SVenki Pallipadi 		pr_debug("Warning: reserve_memtype returned %d\n", retval);
177dee7cbb2SVenki Pallipadi 		return NULL;
178dee7cbb2SVenki Pallipadi 	}
179dee7cbb2SVenki Pallipadi 
180dee7cbb2SVenki Pallipadi 	if (prot_val != new_prot_val) {
181d7677d40Svenkatesh.pallipadi@intel.com 		/*
182d7677d40Svenkatesh.pallipadi@intel.com 		 * Do not fallback to certain memory types with certain
183d7677d40Svenkatesh.pallipadi@intel.com 		 * requested type:
184de33c442SSuresh Siddha 		 * - request is uc-, return cannot be write-back
185de33c442SSuresh Siddha 		 * - request is uc-, return cannot be write-combine
186b310f381Svenkatesh.pallipadi@intel.com 		 * - request is write-combine, return cannot be write-back
187d7677d40Svenkatesh.pallipadi@intel.com 		 */
188de33c442SSuresh Siddha 		if ((prot_val == _PAGE_CACHE_UC_MINUS &&
189b310f381Svenkatesh.pallipadi@intel.com 		     (new_prot_val == _PAGE_CACHE_WB ||
190b310f381Svenkatesh.pallipadi@intel.com 		      new_prot_val == _PAGE_CACHE_WC)) ||
191b310f381Svenkatesh.pallipadi@intel.com 		    (prot_val == _PAGE_CACHE_WC &&
192d7677d40Svenkatesh.pallipadi@intel.com 		     new_prot_val == _PAGE_CACHE_WB)) {
193b450e5e8SVenki Pallipadi 			pr_debug(
1946997ab49Svenkatesh.pallipadi@intel.com 		"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
1954c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
1964c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
1976997ab49Svenkatesh.pallipadi@intel.com 				prot_val, new_prot_val);
198d7677d40Svenkatesh.pallipadi@intel.com 			free_memtype(phys_addr, phys_addr + size);
199d7677d40Svenkatesh.pallipadi@intel.com 			return NULL;
200d7677d40Svenkatesh.pallipadi@intel.com 		}
201d7677d40Svenkatesh.pallipadi@intel.com 		prot_val = new_prot_val;
202d7677d40Svenkatesh.pallipadi@intel.com 	}
203d7677d40Svenkatesh.pallipadi@intel.com 
2043a96ce8cSvenkatesh.pallipadi@intel.com 	switch (prot_val) {
2053a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_UC:
206d806e5eeSThomas Gleixner 	default:
20755c62682SIngo Molnar 		prot = PAGE_KERNEL_NOCACHE;
208d806e5eeSThomas Gleixner 		break;
209de33c442SSuresh Siddha 	case _PAGE_CACHE_UC_MINUS:
210de33c442SSuresh Siddha 		prot = PAGE_KERNEL_UC_MINUS;
211de33c442SSuresh Siddha 		break;
212b310f381Svenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WC:
213b310f381Svenkatesh.pallipadi@intel.com 		prot = PAGE_KERNEL_WC;
214b310f381Svenkatesh.pallipadi@intel.com 		break;
2153a96ce8cSvenkatesh.pallipadi@intel.com 	case _PAGE_CACHE_WB:
216d806e5eeSThomas Gleixner 		prot = PAGE_KERNEL;
217d806e5eeSThomas Gleixner 		break;
218d806e5eeSThomas Gleixner 	}
219e64c8aa0SThomas Gleixner 
220e64c8aa0SThomas Gleixner 	/*
221e64c8aa0SThomas Gleixner 	 * Ok, go for it..
222e64c8aa0SThomas Gleixner 	 */
22323016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
224e64c8aa0SThomas Gleixner 	if (!area)
225e64c8aa0SThomas Gleixner 		return NULL;
226e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
227e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
228e66aadbeSThomas Gleixner 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
229d7677d40Svenkatesh.pallipadi@intel.com 		free_memtype(phys_addr, phys_addr + size);
230b16bf712SIngo Molnar 		free_vm_area(area);
231e64c8aa0SThomas Gleixner 		return NULL;
232e64c8aa0SThomas Gleixner 	}
233e64c8aa0SThomas Gleixner 
2343a96ce8cSvenkatesh.pallipadi@intel.com 	if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
235d7677d40Svenkatesh.pallipadi@intel.com 		free_memtype(phys_addr, phys_addr + size);
236e66aadbeSThomas Gleixner 		vunmap(area->addr);
237e64c8aa0SThomas Gleixner 		return NULL;
238e64c8aa0SThomas Gleixner 	}
239e64c8aa0SThomas Gleixner 
240d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
24187e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
242d61fc448SPekka Paalanen 
243d61fc448SPekka Paalanen 	return ret_addr;
244e64c8aa0SThomas Gleixner }
245e64c8aa0SThomas Gleixner 
246e64c8aa0SThomas Gleixner /**
247e64c8aa0SThomas Gleixner  * ioremap_nocache     -   map bus memory into CPU space
248e64c8aa0SThomas Gleixner  * @offset:    bus address of the memory
249e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
250e64c8aa0SThomas Gleixner  *
251e64c8aa0SThomas Gleixner  * ioremap_nocache performs a platform specific sequence of operations to
252e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
253e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
254e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
255e64c8aa0SThomas Gleixner  * address.
256e64c8aa0SThomas Gleixner  *
257e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
258e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
259e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
260e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
261e64c8aa0SThomas Gleixner  *
262e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
263e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
264e64c8aa0SThomas Gleixner  *
265e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
266e64c8aa0SThomas Gleixner  */
267b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
268e64c8aa0SThomas Gleixner {
269de33c442SSuresh Siddha 	/*
270de33c442SSuresh Siddha 	 * Ideally, this should be:
271499f8f84SAndreas Herrmann 	 *	pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
272de33c442SSuresh Siddha 	 *
273de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
274de33c442SSuresh Siddha 	 * UC MINUS.
275de33c442SSuresh Siddha 	 */
276de33c442SSuresh Siddha 	unsigned long val = _PAGE_CACHE_UC_MINUS;
277de33c442SSuresh Siddha 
278de33c442SSuresh Siddha 	return __ioremap_caller(phys_addr, size, val,
27923016969SChristoph Lameter 				__builtin_return_address(0));
280e64c8aa0SThomas Gleixner }
281e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache);
282e64c8aa0SThomas Gleixner 
283b310f381Svenkatesh.pallipadi@intel.com /**
284b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
285b310f381Svenkatesh.pallipadi@intel.com  * @offset:	bus address of the memory
286b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
287b310f381Svenkatesh.pallipadi@intel.com  *
288b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
289b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
290b310f381Svenkatesh.pallipadi@intel.com  *
291b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
292b310f381Svenkatesh.pallipadi@intel.com  */
293b310f381Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
294b310f381Svenkatesh.pallipadi@intel.com {
295499f8f84SAndreas Herrmann 	if (pat_enabled)
29623016969SChristoph Lameter 		return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
29723016969SChristoph Lameter 					__builtin_return_address(0));
298b310f381Svenkatesh.pallipadi@intel.com 	else
299b310f381Svenkatesh.pallipadi@intel.com 		return ioremap_nocache(phys_addr, size);
300b310f381Svenkatesh.pallipadi@intel.com }
301b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
302b310f381Svenkatesh.pallipadi@intel.com 
303b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
3045f868152SThomas Gleixner {
30523016969SChristoph Lameter 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
30623016969SChristoph Lameter 				__builtin_return_address(0));
3075f868152SThomas Gleixner }
3085f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
3095f868152SThomas Gleixner 
310a361ee5cSVenkatesh Pallipadi static void __iomem *ioremap_default(resource_size_t phys_addr,
311a361ee5cSVenkatesh Pallipadi 					unsigned long size)
312a361ee5cSVenkatesh Pallipadi {
313a361ee5cSVenkatesh Pallipadi 	unsigned long flags;
314a361ee5cSVenkatesh Pallipadi 	void *ret;
315a361ee5cSVenkatesh Pallipadi 	int err;
316a361ee5cSVenkatesh Pallipadi 
317a361ee5cSVenkatesh Pallipadi 	/*
318a361ee5cSVenkatesh Pallipadi 	 * - WB for WB-able memory and no other conflicting mappings
319a361ee5cSVenkatesh Pallipadi 	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
320a361ee5cSVenkatesh Pallipadi 	 * - Inherit from confliting mappings otherwise
321a361ee5cSVenkatesh Pallipadi 	 */
322a361ee5cSVenkatesh Pallipadi 	err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
323a361ee5cSVenkatesh Pallipadi 	if (err < 0)
324a361ee5cSVenkatesh Pallipadi 		return NULL;
325a361ee5cSVenkatesh Pallipadi 
326a361ee5cSVenkatesh Pallipadi 	ret = (void *) __ioremap_caller(phys_addr, size, flags,
327a361ee5cSVenkatesh Pallipadi 					__builtin_return_address(0));
328a361ee5cSVenkatesh Pallipadi 
329a361ee5cSVenkatesh Pallipadi 	free_memtype(phys_addr, phys_addr + size);
330a361ee5cSVenkatesh Pallipadi 	return (void __iomem *)ret;
331a361ee5cSVenkatesh Pallipadi }
332a361ee5cSVenkatesh Pallipadi 
33328b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
33428b2ee20SRik van Riel 				unsigned long prot_val)
33528b2ee20SRik van Riel {
33628b2ee20SRik van Riel 	return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
33728b2ee20SRik van Riel 				__builtin_return_address(0));
33828b2ee20SRik van Riel }
33928b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
34028b2ee20SRik van Riel 
341e64c8aa0SThomas Gleixner /**
342e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
343e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
344e64c8aa0SThomas Gleixner  *
345e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
346e64c8aa0SThomas Gleixner  */
347e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
348e64c8aa0SThomas Gleixner {
349e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
350e64c8aa0SThomas Gleixner 
351e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
352e64c8aa0SThomas Gleixner 		return;
353e64c8aa0SThomas Gleixner 
354e64c8aa0SThomas Gleixner 	/*
355e64c8aa0SThomas Gleixner 	 * __ioremap special-cases the PCI/ISA range by not instantiating a
356e64c8aa0SThomas Gleixner 	 * vm_area and by simply returning an address into the kernel mapping
357e64c8aa0SThomas Gleixner 	 * of ISA space.   So handle that here.
358e64c8aa0SThomas Gleixner 	 */
3596e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
3606e92a5a6SThomas Gleixner 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
361e64c8aa0SThomas Gleixner 		return;
362e64c8aa0SThomas Gleixner 
363e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
364e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
365e64c8aa0SThomas Gleixner 
366d61fc448SPekka Paalanen 	mmiotrace_iounmap(addr);
367d61fc448SPekka Paalanen 
368e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
369e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
370e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
371e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
372e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
373e64c8aa0SThomas Gleixner 	read_lock(&vmlist_lock);
374e64c8aa0SThomas Gleixner 	for (p = vmlist; p; p = p->next) {
3756e92a5a6SThomas Gleixner 		if (p->addr == (void __force *)addr)
376e64c8aa0SThomas Gleixner 			break;
377e64c8aa0SThomas Gleixner 	}
378e64c8aa0SThomas Gleixner 	read_unlock(&vmlist_lock);
379e64c8aa0SThomas Gleixner 
380e64c8aa0SThomas Gleixner 	if (!p) {
381e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
382e64c8aa0SThomas Gleixner 		dump_stack();
383e64c8aa0SThomas Gleixner 		return;
384e64c8aa0SThomas Gleixner 	}
385e64c8aa0SThomas Gleixner 
386d7677d40Svenkatesh.pallipadi@intel.com 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
387d7677d40Svenkatesh.pallipadi@intel.com 
388e64c8aa0SThomas Gleixner 	/* Finally remove it */
3896e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
390e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
391e64c8aa0SThomas Gleixner 	kfree(p);
392e64c8aa0SThomas Gleixner }
393e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
394e64c8aa0SThomas Gleixner 
395e045fb2aSvenkatesh.pallipadi@intel.com /*
396e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
397e045fb2aSvenkatesh.pallipadi@intel.com  * access
398e045fb2aSvenkatesh.pallipadi@intel.com  */
399e045fb2aSvenkatesh.pallipadi@intel.com void *xlate_dev_mem_ptr(unsigned long phys)
400e045fb2aSvenkatesh.pallipadi@intel.com {
401e045fb2aSvenkatesh.pallipadi@intel.com 	void *addr;
402e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start = phys & PAGE_MASK;
403e045fb2aSvenkatesh.pallipadi@intel.com 
404e045fb2aSvenkatesh.pallipadi@intel.com 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
405e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(start >> PAGE_SHIFT))
406e045fb2aSvenkatesh.pallipadi@intel.com 		return __va(phys);
407e045fb2aSvenkatesh.pallipadi@intel.com 
408ae94b807SIngo Molnar 	addr = (void __force *)ioremap_default(start, PAGE_SIZE);
409e045fb2aSvenkatesh.pallipadi@intel.com 	if (addr)
410e045fb2aSvenkatesh.pallipadi@intel.com 		addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
411e045fb2aSvenkatesh.pallipadi@intel.com 
412e045fb2aSvenkatesh.pallipadi@intel.com 	return addr;
413e045fb2aSvenkatesh.pallipadi@intel.com }
414e045fb2aSvenkatesh.pallipadi@intel.com 
415e045fb2aSvenkatesh.pallipadi@intel.com void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
416e045fb2aSvenkatesh.pallipadi@intel.com {
417e045fb2aSvenkatesh.pallipadi@intel.com 	if (page_is_ram(phys >> PAGE_SHIFT))
418e045fb2aSvenkatesh.pallipadi@intel.com 		return;
419e045fb2aSvenkatesh.pallipadi@intel.com 
420e045fb2aSvenkatesh.pallipadi@intel.com 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
421e045fb2aSvenkatesh.pallipadi@intel.com 	return;
422e045fb2aSvenkatesh.pallipadi@intel.com }
423e045fb2aSvenkatesh.pallipadi@intel.com 
424e64c8aa0SThomas Gleixner int __initdata early_ioremap_debug;
425e64c8aa0SThomas Gleixner 
426e64c8aa0SThomas Gleixner static int __init early_ioremap_debug_setup(char *str)
427e64c8aa0SThomas Gleixner {
428e64c8aa0SThomas Gleixner 	early_ioremap_debug = 1;
429e64c8aa0SThomas Gleixner 
430e64c8aa0SThomas Gleixner 	return 0;
431e64c8aa0SThomas Gleixner }
432e64c8aa0SThomas Gleixner early_param("early_ioremap_debug", early_ioremap_debug_setup);
433e64c8aa0SThomas Gleixner 
434e64c8aa0SThomas Gleixner static __initdata int after_paging_init;
435a7bf0bd5SJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
436e64c8aa0SThomas Gleixner 
437551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
438e64c8aa0SThomas Gleixner {
43937cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
44037cc8d7fSJeremy Fitzhardinge 	pgd_t *base = __va(read_cr3());
44137cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
442551889a6SIan Campbell 	pud_t *pud = pud_offset(pgd, addr);
443551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
444551889a6SIan Campbell 
445551889a6SIan Campbell 	return pmd;
446e64c8aa0SThomas Gleixner }
447e64c8aa0SThomas Gleixner 
448551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
449e64c8aa0SThomas Gleixner {
450551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
451e64c8aa0SThomas Gleixner }
452e64c8aa0SThomas Gleixner 
453e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
454e64c8aa0SThomas Gleixner {
455551889a6SIan Campbell 	pmd_t *pmd;
456e64c8aa0SThomas Gleixner 
457e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
458adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap_init()\n");
459e64c8aa0SThomas Gleixner 
460551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
461e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
462b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
463551889a6SIan Campbell 
464e64c8aa0SThomas Gleixner 	/*
465551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
466e64c8aa0SThomas Gleixner 	 * we are not prepared:
467e64c8aa0SThomas Gleixner 	 */
468551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
469e64c8aa0SThomas Gleixner 		WARN_ON(1);
470551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
471551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
472e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
473e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
474e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
475e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
476e64c8aa0SThomas Gleixner 
477e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
478e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
479e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
480e64c8aa0SThomas Gleixner 	}
481e64c8aa0SThomas Gleixner }
482e64c8aa0SThomas Gleixner 
483e64c8aa0SThomas Gleixner void __init early_ioremap_clear(void)
484e64c8aa0SThomas Gleixner {
485551889a6SIan Campbell 	pmd_t *pmd;
486e64c8aa0SThomas Gleixner 
487e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
488adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap_clear()\n");
489e64c8aa0SThomas Gleixner 
490551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
491551889a6SIan Campbell 	pmd_clear(pmd);
4926944a9c8SJeremy Fitzhardinge 	paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
493e64c8aa0SThomas Gleixner 	__flush_tlb_all();
494e64c8aa0SThomas Gleixner }
495e64c8aa0SThomas Gleixner 
496e64c8aa0SThomas Gleixner void __init early_ioremap_reset(void)
497e64c8aa0SThomas Gleixner {
498e64c8aa0SThomas Gleixner 	enum fixed_addresses idx;
499551889a6SIan Campbell 	unsigned long addr, phys;
500551889a6SIan Campbell 	pte_t *pte;
501e64c8aa0SThomas Gleixner 
502e64c8aa0SThomas Gleixner 	after_paging_init = 1;
503e64c8aa0SThomas Gleixner 	for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
504e64c8aa0SThomas Gleixner 		addr = fix_to_virt(idx);
505e64c8aa0SThomas Gleixner 		pte = early_ioremap_pte(addr);
506551889a6SIan Campbell 		if (pte_present(*pte)) {
507551889a6SIan Campbell 			phys = pte_val(*pte) & PAGE_MASK;
508e64c8aa0SThomas Gleixner 			set_fixmap(idx, phys);
509e64c8aa0SThomas Gleixner 		}
510e64c8aa0SThomas Gleixner 	}
511e64c8aa0SThomas Gleixner }
512e64c8aa0SThomas Gleixner 
513e64c8aa0SThomas Gleixner static void __init __early_set_fixmap(enum fixed_addresses idx,
514e64c8aa0SThomas Gleixner 				   unsigned long phys, pgprot_t flags)
515e64c8aa0SThomas Gleixner {
516551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
517551889a6SIan Campbell 	pte_t *pte;
518e64c8aa0SThomas Gleixner 
519e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
520e64c8aa0SThomas Gleixner 		BUG();
521e64c8aa0SThomas Gleixner 		return;
522e64c8aa0SThomas Gleixner 	}
523e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
5244583ed51SJeremy Fitzhardinge 
525e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
526551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
527e64c8aa0SThomas Gleixner 	else
5284f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
529e64c8aa0SThomas Gleixner 	__flush_tlb_one(addr);
530e64c8aa0SThomas Gleixner }
531e64c8aa0SThomas Gleixner 
532e64c8aa0SThomas Gleixner static inline void __init early_set_fixmap(enum fixed_addresses idx,
533e64c8aa0SThomas Gleixner 					unsigned long phys)
534e64c8aa0SThomas Gleixner {
535e64c8aa0SThomas Gleixner 	if (after_paging_init)
536e64c8aa0SThomas Gleixner 		set_fixmap(idx, phys);
537e64c8aa0SThomas Gleixner 	else
538e64c8aa0SThomas Gleixner 		__early_set_fixmap(idx, phys, PAGE_KERNEL);
539e64c8aa0SThomas Gleixner }
540e64c8aa0SThomas Gleixner 
541e64c8aa0SThomas Gleixner static inline void __init early_clear_fixmap(enum fixed_addresses idx)
542e64c8aa0SThomas Gleixner {
543e64c8aa0SThomas Gleixner 	if (after_paging_init)
544e64c8aa0SThomas Gleixner 		clear_fixmap(idx);
545e64c8aa0SThomas Gleixner 	else
546e64c8aa0SThomas Gleixner 		__early_set_fixmap(idx, 0, __pgprot(0));
547e64c8aa0SThomas Gleixner }
548e64c8aa0SThomas Gleixner 
549e64c8aa0SThomas Gleixner 
550e64c8aa0SThomas Gleixner int __initdata early_ioremap_nested;
551e64c8aa0SThomas Gleixner 
552e64c8aa0SThomas Gleixner static int __init check_early_ioremap_leak(void)
553e64c8aa0SThomas Gleixner {
554e64c8aa0SThomas Gleixner 	if (!early_ioremap_nested)
555e64c8aa0SThomas Gleixner 		return 0;
556e64c8aa0SThomas Gleixner 
557e64c8aa0SThomas Gleixner 	printk(KERN_WARNING
558e64c8aa0SThomas Gleixner 	       "Debug warning: early ioremap leak of %d areas detected.\n",
559e64c8aa0SThomas Gleixner 	       early_ioremap_nested);
560e64c8aa0SThomas Gleixner 	printk(KERN_WARNING
561e64c8aa0SThomas Gleixner 	       "please boot with early_ioremap_debug and report the dmesg.\n");
562e64c8aa0SThomas Gleixner 	WARN_ON(1);
563e64c8aa0SThomas Gleixner 
564e64c8aa0SThomas Gleixner 	return 1;
565e64c8aa0SThomas Gleixner }
566e64c8aa0SThomas Gleixner late_initcall(check_early_ioremap_leak);
567e64c8aa0SThomas Gleixner 
568e64c8aa0SThomas Gleixner void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
569e64c8aa0SThomas Gleixner {
570e64c8aa0SThomas Gleixner 	unsigned long offset, last_addr;
571e64c8aa0SThomas Gleixner 	unsigned int nrpages, nesting;
572e64c8aa0SThomas Gleixner 	enum fixed_addresses idx0, idx;
573e64c8aa0SThomas Gleixner 
574e64c8aa0SThomas Gleixner 	WARN_ON(system_state != SYSTEM_BOOTING);
575e64c8aa0SThomas Gleixner 
576e64c8aa0SThomas Gleixner 	nesting = early_ioremap_nested;
577e64c8aa0SThomas Gleixner 	if (early_ioremap_debug) {
578adafdf6aSIngo Molnar 		printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
579e64c8aa0SThomas Gleixner 		       phys_addr, size, nesting);
580e64c8aa0SThomas Gleixner 		dump_stack();
581e64c8aa0SThomas Gleixner 	}
582e64c8aa0SThomas Gleixner 
583e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
584e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
585e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr) {
586e64c8aa0SThomas Gleixner 		WARN_ON(1);
587e64c8aa0SThomas Gleixner 		return NULL;
588e64c8aa0SThomas Gleixner 	}
589e64c8aa0SThomas Gleixner 
590e64c8aa0SThomas Gleixner 	if (nesting >= FIX_BTMAPS_NESTING) {
591e64c8aa0SThomas Gleixner 		WARN_ON(1);
592e64c8aa0SThomas Gleixner 		return NULL;
593e64c8aa0SThomas Gleixner 	}
594e64c8aa0SThomas Gleixner 	early_ioremap_nested++;
595e64c8aa0SThomas Gleixner 	/*
596e64c8aa0SThomas Gleixner 	 * Mappings have to be page-aligned
597e64c8aa0SThomas Gleixner 	 */
598e64c8aa0SThomas Gleixner 	offset = phys_addr & ~PAGE_MASK;
599e64c8aa0SThomas Gleixner 	phys_addr &= PAGE_MASK;
600e64c8aa0SThomas Gleixner 	size = PAGE_ALIGN(last_addr) - phys_addr;
601e64c8aa0SThomas Gleixner 
602e64c8aa0SThomas Gleixner 	/*
603e64c8aa0SThomas Gleixner 	 * Mappings have to fit in the FIX_BTMAP area.
604e64c8aa0SThomas Gleixner 	 */
605e64c8aa0SThomas Gleixner 	nrpages = size >> PAGE_SHIFT;
606e64c8aa0SThomas Gleixner 	if (nrpages > NR_FIX_BTMAPS) {
607e64c8aa0SThomas Gleixner 		WARN_ON(1);
608e64c8aa0SThomas Gleixner 		return NULL;
609e64c8aa0SThomas Gleixner 	}
610e64c8aa0SThomas Gleixner 
611e64c8aa0SThomas Gleixner 	/*
612e64c8aa0SThomas Gleixner 	 * Ok, go for it..
613e64c8aa0SThomas Gleixner 	 */
614e64c8aa0SThomas Gleixner 	idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
615e64c8aa0SThomas Gleixner 	idx = idx0;
616e64c8aa0SThomas Gleixner 	while (nrpages > 0) {
617e64c8aa0SThomas Gleixner 		early_set_fixmap(idx, phys_addr);
618e64c8aa0SThomas Gleixner 		phys_addr += PAGE_SIZE;
619e64c8aa0SThomas Gleixner 		--idx;
620e64c8aa0SThomas Gleixner 		--nrpages;
621e64c8aa0SThomas Gleixner 	}
622e64c8aa0SThomas Gleixner 	if (early_ioremap_debug)
623e64c8aa0SThomas Gleixner 		printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
624e64c8aa0SThomas Gleixner 
625e64c8aa0SThomas Gleixner 	return (void *) (offset + fix_to_virt(idx0));
626e64c8aa0SThomas Gleixner }
627e64c8aa0SThomas Gleixner 
628e64c8aa0SThomas Gleixner void __init early_iounmap(void *addr, unsigned long size)
629e64c8aa0SThomas Gleixner {
630e64c8aa0SThomas Gleixner 	unsigned long virt_addr;
631e64c8aa0SThomas Gleixner 	unsigned long offset;
632e64c8aa0SThomas Gleixner 	unsigned int nrpages;
633e64c8aa0SThomas Gleixner 	enum fixed_addresses idx;
634226e9a93SIngo Molnar 	int nesting;
635e64c8aa0SThomas Gleixner 
636e64c8aa0SThomas Gleixner 	nesting = --early_ioremap_nested;
637226e9a93SIngo Molnar 	if (WARN_ON(nesting < 0))
638226e9a93SIngo Molnar 		return;
639e64c8aa0SThomas Gleixner 
640e64c8aa0SThomas Gleixner 	if (early_ioremap_debug) {
641adafdf6aSIngo Molnar 		printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
642e64c8aa0SThomas Gleixner 		       size, nesting);
643e64c8aa0SThomas Gleixner 		dump_stack();
644e64c8aa0SThomas Gleixner 	}
645e64c8aa0SThomas Gleixner 
646e64c8aa0SThomas Gleixner 	virt_addr = (unsigned long)addr;
647e64c8aa0SThomas Gleixner 	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
648e64c8aa0SThomas Gleixner 		WARN_ON(1);
649e64c8aa0SThomas Gleixner 		return;
650e64c8aa0SThomas Gleixner 	}
651e64c8aa0SThomas Gleixner 	offset = virt_addr & ~PAGE_MASK;
652e64c8aa0SThomas Gleixner 	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
653e64c8aa0SThomas Gleixner 
654e64c8aa0SThomas Gleixner 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
655e64c8aa0SThomas Gleixner 	while (nrpages > 0) {
656e64c8aa0SThomas Gleixner 		early_clear_fixmap(idx);
657e64c8aa0SThomas Gleixner 		--idx;
658e64c8aa0SThomas Gleixner 		--nrpages;
659e64c8aa0SThomas Gleixner 	}
660e64c8aa0SThomas Gleixner }
661e64c8aa0SThomas Gleixner 
662e64c8aa0SThomas Gleixner void __this_fixmap_does_not_exist(void)
663e64c8aa0SThomas Gleixner {
664e64c8aa0SThomas Gleixner 	WARN_ON(1);
665e64c8aa0SThomas Gleixner }
666