1e64c8aa0SThomas Gleixner /* 2e64c8aa0SThomas Gleixner * Re-map IO memory to kernel address space so that we can access it. 3e64c8aa0SThomas Gleixner * This is needed for high PCI addresses that aren't mapped in the 4e64c8aa0SThomas Gleixner * 640k-1MB IO memory area on PC's 5e64c8aa0SThomas Gleixner * 6e64c8aa0SThomas Gleixner * (C) Copyright 1995 1996 Linus Torvalds 7e64c8aa0SThomas Gleixner */ 8e64c8aa0SThomas Gleixner 9e64c8aa0SThomas Gleixner #include <linux/bootmem.h> 10e64c8aa0SThomas Gleixner #include <linux/init.h> 11e64c8aa0SThomas Gleixner #include <linux/io.h> 12e64c8aa0SThomas Gleixner #include <linux/module.h> 13e64c8aa0SThomas Gleixner #include <linux/slab.h> 14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h> 15e64c8aa0SThomas Gleixner 16e64c8aa0SThomas Gleixner #include <asm/cacheflush.h> 17e64c8aa0SThomas Gleixner #include <asm/e820.h> 18e64c8aa0SThomas Gleixner #include <asm/fixmap.h> 19e64c8aa0SThomas Gleixner #include <asm/pgtable.h> 20e64c8aa0SThomas Gleixner #include <asm/tlbflush.h> 21f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h> 22d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h> 23e64c8aa0SThomas Gleixner 24e64c8aa0SThomas Gleixner #ifdef CONFIG_X86_64 25e64c8aa0SThomas Gleixner 26e64c8aa0SThomas Gleixner unsigned long __phys_addr(unsigned long x) 27e64c8aa0SThomas Gleixner { 28e64c8aa0SThomas Gleixner if (x >= __START_KERNEL_map) 29e64c8aa0SThomas Gleixner return x - __START_KERNEL_map + phys_base; 30e64c8aa0SThomas Gleixner return x - PAGE_OFFSET; 31e64c8aa0SThomas Gleixner } 32e64c8aa0SThomas Gleixner EXPORT_SYMBOL(__phys_addr); 33e64c8aa0SThomas Gleixner 34e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr) 35e3100c82SThomas Gleixner { 36e3100c82SThomas Gleixner return addr < (1UL << boot_cpu_data.x86_phys_bits); 37e3100c82SThomas Gleixner } 38e3100c82SThomas Gleixner 39e3100c82SThomas Gleixner #else 40e3100c82SThomas Gleixner 41e3100c82SThomas Gleixner static inline int phys_addr_valid(unsigned long addr) 42e3100c82SThomas Gleixner { 43e3100c82SThomas Gleixner return 1; 44e3100c82SThomas Gleixner } 45e3100c82SThomas Gleixner 46e64c8aa0SThomas Gleixner #endif 47e64c8aa0SThomas Gleixner 485f5192b9SThomas Gleixner int page_is_ram(unsigned long pagenr) 495f5192b9SThomas Gleixner { 505f5192b9SThomas Gleixner unsigned long addr, end; 515f5192b9SThomas Gleixner int i; 525f5192b9SThomas Gleixner 53d8a9e6a5SArjan van de Ven /* 54d8a9e6a5SArjan van de Ven * A special case is the first 4Kb of memory; 55d8a9e6a5SArjan van de Ven * This is a BIOS owned area, not kernel ram, but generally 56d8a9e6a5SArjan van de Ven * not listed as such in the E820 table. 57d8a9e6a5SArjan van de Ven */ 58d8a9e6a5SArjan van de Ven if (pagenr == 0) 59d8a9e6a5SArjan van de Ven return 0; 60d8a9e6a5SArjan van de Ven 61156fbc3fSArjan van de Ven /* 62156fbc3fSArjan van de Ven * Second special case: Some BIOSen report the PC BIOS 63156fbc3fSArjan van de Ven * area (640->1Mb) as ram even though it is not. 64156fbc3fSArjan van de Ven */ 65156fbc3fSArjan van de Ven if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && 66156fbc3fSArjan van de Ven pagenr < (BIOS_END >> PAGE_SHIFT)) 67156fbc3fSArjan van de Ven return 0; 68d8a9e6a5SArjan van de Ven 695f5192b9SThomas Gleixner for (i = 0; i < e820.nr_map; i++) { 705f5192b9SThomas Gleixner /* 715f5192b9SThomas Gleixner * Not usable memory: 725f5192b9SThomas Gleixner */ 735f5192b9SThomas Gleixner if (e820.map[i].type != E820_RAM) 745f5192b9SThomas Gleixner continue; 755f5192b9SThomas Gleixner addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; 765f5192b9SThomas Gleixner end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; 77950f9d95SThomas Gleixner 78950f9d95SThomas Gleixner 795f5192b9SThomas Gleixner if ((pagenr >= addr) && (pagenr < end)) 805f5192b9SThomas Gleixner return 1; 815f5192b9SThomas Gleixner } 825f5192b9SThomas Gleixner return 0; 835f5192b9SThomas Gleixner } 845f5192b9SThomas Gleixner 85e64c8aa0SThomas Gleixner /* 86e64c8aa0SThomas Gleixner * Fix up the linear direct mapping of the kernel to avoid cache attribute 87e64c8aa0SThomas Gleixner * conflicts. 88e64c8aa0SThomas Gleixner */ 893a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size, 903a96ce8cSvenkatesh.pallipadi@intel.com unsigned long prot_val) 91e64c8aa0SThomas Gleixner { 92d806e5eeSThomas Gleixner unsigned long nrpages = size >> PAGE_SHIFT; 9393809be8SHarvey Harrison int err; 94e64c8aa0SThomas Gleixner 953a96ce8cSvenkatesh.pallipadi@intel.com switch (prot_val) { 963a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_UC: 97d806e5eeSThomas Gleixner default: 981219333dSvenkatesh.pallipadi@intel.com err = _set_memory_uc(vaddr, nrpages); 99d806e5eeSThomas Gleixner break; 100b310f381Svenkatesh.pallipadi@intel.com case _PAGE_CACHE_WC: 101b310f381Svenkatesh.pallipadi@intel.com err = _set_memory_wc(vaddr, nrpages); 102b310f381Svenkatesh.pallipadi@intel.com break; 1033a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_WB: 1041219333dSvenkatesh.pallipadi@intel.com err = _set_memory_wb(vaddr, nrpages); 105d806e5eeSThomas Gleixner break; 106d806e5eeSThomas Gleixner } 107e64c8aa0SThomas Gleixner 108e64c8aa0SThomas Gleixner return err; 109e64c8aa0SThomas Gleixner } 110e64c8aa0SThomas Gleixner 111e64c8aa0SThomas Gleixner /* 112e64c8aa0SThomas Gleixner * Remap an arbitrary physical address space into the kernel virtual 113e64c8aa0SThomas Gleixner * address space. Needed when the kernel wants to access high addresses 114e64c8aa0SThomas Gleixner * directly. 115e64c8aa0SThomas Gleixner * 116e64c8aa0SThomas Gleixner * NOTE! We need to allow non-page-aligned mappings too: we will obviously 117e64c8aa0SThomas Gleixner * have to convert them into an offset in a page-aligned mapping, but the 118e64c8aa0SThomas Gleixner * caller shouldn't need to know that small detail. 119e64c8aa0SThomas Gleixner */ 120b9e76a00SLinus Torvalds static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, 1213a96ce8cSvenkatesh.pallipadi@intel.com unsigned long prot_val) 122e64c8aa0SThomas Gleixner { 123e66aadbeSThomas Gleixner unsigned long pfn, offset, last_addr, vaddr; 124e64c8aa0SThomas Gleixner struct vm_struct *area; 125d7677d40Svenkatesh.pallipadi@intel.com unsigned long new_prot_val; 126d806e5eeSThomas Gleixner pgprot_t prot; 127e64c8aa0SThomas Gleixner 128e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */ 129e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1; 130e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr) 131e64c8aa0SThomas Gleixner return NULL; 132e64c8aa0SThomas Gleixner 133e3100c82SThomas Gleixner if (!phys_addr_valid(phys_addr)) { 134*6997ab49Svenkatesh.pallipadi@intel.com printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 135e3100c82SThomas Gleixner phys_addr); 136e3100c82SThomas Gleixner WARN_ON_ONCE(1); 137e3100c82SThomas Gleixner return NULL; 138e3100c82SThomas Gleixner } 139e3100c82SThomas Gleixner 140e64c8aa0SThomas Gleixner /* 141e64c8aa0SThomas Gleixner * Don't remap the low PCI/ISA area, it's always mapped.. 142e64c8aa0SThomas Gleixner */ 143e64c8aa0SThomas Gleixner if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) 144e64c8aa0SThomas Gleixner return (__force void __iomem *)phys_to_virt(phys_addr); 145e64c8aa0SThomas Gleixner 146e64c8aa0SThomas Gleixner /* 147e64c8aa0SThomas Gleixner * Don't allow anybody to remap normal RAM that we're using.. 148e64c8aa0SThomas Gleixner */ 149bdd3cee2SIngo Molnar for (pfn = phys_addr >> PAGE_SHIFT; 15038cb47baSIngo Molnar (pfn << PAGE_SHIFT) < last_addr; pfn++) { 151bdd3cee2SIngo Molnar 152ba748d22SIngo Molnar int is_ram = page_is_ram(pfn); 153ba748d22SIngo Molnar 154ba748d22SIngo Molnar if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) 155e64c8aa0SThomas Gleixner return NULL; 156ba748d22SIngo Molnar WARN_ON_ONCE(is_ram); 157e64c8aa0SThomas Gleixner } 158e64c8aa0SThomas Gleixner 159d7677d40Svenkatesh.pallipadi@intel.com /* 160d7677d40Svenkatesh.pallipadi@intel.com * Mappings have to be page-aligned 161d7677d40Svenkatesh.pallipadi@intel.com */ 162d7677d40Svenkatesh.pallipadi@intel.com offset = phys_addr & ~PAGE_MASK; 163d7677d40Svenkatesh.pallipadi@intel.com phys_addr &= PAGE_MASK; 164d7677d40Svenkatesh.pallipadi@intel.com size = PAGE_ALIGN(last_addr+1) - phys_addr; 165d7677d40Svenkatesh.pallipadi@intel.com 166d7677d40Svenkatesh.pallipadi@intel.com if (reserve_memtype(phys_addr, phys_addr + size, 167d7677d40Svenkatesh.pallipadi@intel.com prot_val, &new_prot_val)) { 168d7677d40Svenkatesh.pallipadi@intel.com /* 169d7677d40Svenkatesh.pallipadi@intel.com * Do not fallback to certain memory types with certain 170d7677d40Svenkatesh.pallipadi@intel.com * requested type: 171d7677d40Svenkatesh.pallipadi@intel.com * - request is uncached, return cannot be write-back 172b310f381Svenkatesh.pallipadi@intel.com * - request is uncached, return cannot be write-combine 173b310f381Svenkatesh.pallipadi@intel.com * - request is write-combine, return cannot be write-back 174d7677d40Svenkatesh.pallipadi@intel.com */ 175d7677d40Svenkatesh.pallipadi@intel.com if ((prot_val == _PAGE_CACHE_UC && 176b310f381Svenkatesh.pallipadi@intel.com (new_prot_val == _PAGE_CACHE_WB || 177b310f381Svenkatesh.pallipadi@intel.com new_prot_val == _PAGE_CACHE_WC)) || 178b310f381Svenkatesh.pallipadi@intel.com (prot_val == _PAGE_CACHE_WC && 179d7677d40Svenkatesh.pallipadi@intel.com new_prot_val == _PAGE_CACHE_WB)) { 180*6997ab49Svenkatesh.pallipadi@intel.com printk( 181*6997ab49Svenkatesh.pallipadi@intel.com "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 182*6997ab49Svenkatesh.pallipadi@intel.com phys_addr, phys_addr + size, 183*6997ab49Svenkatesh.pallipadi@intel.com prot_val, new_prot_val); 184d7677d40Svenkatesh.pallipadi@intel.com free_memtype(phys_addr, phys_addr + size); 185d7677d40Svenkatesh.pallipadi@intel.com return NULL; 186d7677d40Svenkatesh.pallipadi@intel.com } 187d7677d40Svenkatesh.pallipadi@intel.com prot_val = new_prot_val; 188d7677d40Svenkatesh.pallipadi@intel.com } 189d7677d40Svenkatesh.pallipadi@intel.com 1903a96ce8cSvenkatesh.pallipadi@intel.com switch (prot_val) { 1913a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_UC: 192d806e5eeSThomas Gleixner default: 19355c62682SIngo Molnar prot = PAGE_KERNEL_NOCACHE; 194d806e5eeSThomas Gleixner break; 195b310f381Svenkatesh.pallipadi@intel.com case _PAGE_CACHE_WC: 196b310f381Svenkatesh.pallipadi@intel.com prot = PAGE_KERNEL_WC; 197b310f381Svenkatesh.pallipadi@intel.com break; 1983a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_WB: 199d806e5eeSThomas Gleixner prot = PAGE_KERNEL; 200d806e5eeSThomas Gleixner break; 201d806e5eeSThomas Gleixner } 202e64c8aa0SThomas Gleixner 203e64c8aa0SThomas Gleixner /* 204e64c8aa0SThomas Gleixner * Ok, go for it.. 205e64c8aa0SThomas Gleixner */ 206e64c8aa0SThomas Gleixner area = get_vm_area(size, VM_IOREMAP); 207e64c8aa0SThomas Gleixner if (!area) 208e64c8aa0SThomas Gleixner return NULL; 209e64c8aa0SThomas Gleixner area->phys_addr = phys_addr; 210e66aadbeSThomas Gleixner vaddr = (unsigned long) area->addr; 211e66aadbeSThomas Gleixner if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { 212d7677d40Svenkatesh.pallipadi@intel.com free_memtype(phys_addr, phys_addr + size); 213b16bf712SIngo Molnar free_vm_area(area); 214e64c8aa0SThomas Gleixner return NULL; 215e64c8aa0SThomas Gleixner } 216e64c8aa0SThomas Gleixner 2173a96ce8cSvenkatesh.pallipadi@intel.com if (ioremap_change_attr(vaddr, size, prot_val) < 0) { 218d7677d40Svenkatesh.pallipadi@intel.com free_memtype(phys_addr, phys_addr + size); 219e66aadbeSThomas Gleixner vunmap(area->addr); 220e64c8aa0SThomas Gleixner return NULL; 221e64c8aa0SThomas Gleixner } 222e64c8aa0SThomas Gleixner 223e66aadbeSThomas Gleixner return (void __iomem *) (vaddr + offset); 224e64c8aa0SThomas Gleixner } 225e64c8aa0SThomas Gleixner 226e64c8aa0SThomas Gleixner /** 227e64c8aa0SThomas Gleixner * ioremap_nocache - map bus memory into CPU space 228e64c8aa0SThomas Gleixner * @offset: bus address of the memory 229e64c8aa0SThomas Gleixner * @size: size of the resource to map 230e64c8aa0SThomas Gleixner * 231e64c8aa0SThomas Gleixner * ioremap_nocache performs a platform specific sequence of operations to 232e64c8aa0SThomas Gleixner * make bus memory CPU accessible via the readb/readw/readl/writeb/ 233e64c8aa0SThomas Gleixner * writew/writel functions and the other mmio helpers. The returned 234e64c8aa0SThomas Gleixner * address is not guaranteed to be usable directly as a virtual 235e64c8aa0SThomas Gleixner * address. 236e64c8aa0SThomas Gleixner * 237e64c8aa0SThomas Gleixner * This version of ioremap ensures that the memory is marked uncachable 238e64c8aa0SThomas Gleixner * on the CPU as well as honouring existing caching rules from things like 239e64c8aa0SThomas Gleixner * the PCI bus. Note that there are other caches and buffers on many 240e64c8aa0SThomas Gleixner * busses. In particular driver authors should read up on PCI writes 241e64c8aa0SThomas Gleixner * 242e64c8aa0SThomas Gleixner * It's useful if some control registers are in such an area and 243e64c8aa0SThomas Gleixner * write combining or read caching is not desirable: 244e64c8aa0SThomas Gleixner * 245e64c8aa0SThomas Gleixner * Must be freed with iounmap. 246e64c8aa0SThomas Gleixner */ 247b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) 248e64c8aa0SThomas Gleixner { 2493a96ce8cSvenkatesh.pallipadi@intel.com return __ioremap(phys_addr, size, _PAGE_CACHE_UC); 250e64c8aa0SThomas Gleixner } 251e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache); 252e64c8aa0SThomas Gleixner 253b310f381Svenkatesh.pallipadi@intel.com /** 254b310f381Svenkatesh.pallipadi@intel.com * ioremap_wc - map memory into CPU space write combined 255b310f381Svenkatesh.pallipadi@intel.com * @offset: bus address of the memory 256b310f381Svenkatesh.pallipadi@intel.com * @size: size of the resource to map 257b310f381Svenkatesh.pallipadi@intel.com * 258b310f381Svenkatesh.pallipadi@intel.com * This version of ioremap ensures that the memory is marked write combining. 259b310f381Svenkatesh.pallipadi@intel.com * Write combining allows faster writes to some hardware devices. 260b310f381Svenkatesh.pallipadi@intel.com * 261b310f381Svenkatesh.pallipadi@intel.com * Must be freed with iounmap. 262b310f381Svenkatesh.pallipadi@intel.com */ 263b310f381Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) 264b310f381Svenkatesh.pallipadi@intel.com { 265b310f381Svenkatesh.pallipadi@intel.com if (pat_wc_enabled) 266b310f381Svenkatesh.pallipadi@intel.com return __ioremap(phys_addr, size, _PAGE_CACHE_WC); 267b310f381Svenkatesh.pallipadi@intel.com else 268b310f381Svenkatesh.pallipadi@intel.com return ioremap_nocache(phys_addr, size); 269b310f381Svenkatesh.pallipadi@intel.com } 270b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc); 271b310f381Svenkatesh.pallipadi@intel.com 272b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 2735f868152SThomas Gleixner { 2743a96ce8cSvenkatesh.pallipadi@intel.com return __ioremap(phys_addr, size, _PAGE_CACHE_WB); 2755f868152SThomas Gleixner } 2765f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache); 2775f868152SThomas Gleixner 278e64c8aa0SThomas Gleixner /** 279e64c8aa0SThomas Gleixner * iounmap - Free a IO remapping 280e64c8aa0SThomas Gleixner * @addr: virtual address from ioremap_* 281e64c8aa0SThomas Gleixner * 282e64c8aa0SThomas Gleixner * Caller must ensure there is only one unmapping for the same pointer. 283e64c8aa0SThomas Gleixner */ 284e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr) 285e64c8aa0SThomas Gleixner { 286e64c8aa0SThomas Gleixner struct vm_struct *p, *o; 287e64c8aa0SThomas Gleixner 288e64c8aa0SThomas Gleixner if ((void __force *)addr <= high_memory) 289e64c8aa0SThomas Gleixner return; 290e64c8aa0SThomas Gleixner 291e64c8aa0SThomas Gleixner /* 292e64c8aa0SThomas Gleixner * __ioremap special-cases the PCI/ISA range by not instantiating a 293e64c8aa0SThomas Gleixner * vm_area and by simply returning an address into the kernel mapping 294e64c8aa0SThomas Gleixner * of ISA space. So handle that here. 295e64c8aa0SThomas Gleixner */ 296e64c8aa0SThomas Gleixner if (addr >= phys_to_virt(ISA_START_ADDRESS) && 297e64c8aa0SThomas Gleixner addr < phys_to_virt(ISA_END_ADDRESS)) 298e64c8aa0SThomas Gleixner return; 299e64c8aa0SThomas Gleixner 300e64c8aa0SThomas Gleixner addr = (volatile void __iomem *) 301e64c8aa0SThomas Gleixner (PAGE_MASK & (unsigned long __force)addr); 302e64c8aa0SThomas Gleixner 303e64c8aa0SThomas Gleixner /* Use the vm area unlocked, assuming the caller 304e64c8aa0SThomas Gleixner ensures there isn't another iounmap for the same address 305e64c8aa0SThomas Gleixner in parallel. Reuse of the virtual address is prevented by 306e64c8aa0SThomas Gleixner leaving it in the global lists until we're done with it. 307e64c8aa0SThomas Gleixner cpa takes care of the direct mappings. */ 308e64c8aa0SThomas Gleixner read_lock(&vmlist_lock); 309e64c8aa0SThomas Gleixner for (p = vmlist; p; p = p->next) { 310e64c8aa0SThomas Gleixner if (p->addr == addr) 311e64c8aa0SThomas Gleixner break; 312e64c8aa0SThomas Gleixner } 313e64c8aa0SThomas Gleixner read_unlock(&vmlist_lock); 314e64c8aa0SThomas Gleixner 315e64c8aa0SThomas Gleixner if (!p) { 316e64c8aa0SThomas Gleixner printk(KERN_ERR "iounmap: bad address %p\n", addr); 317e64c8aa0SThomas Gleixner dump_stack(); 318e64c8aa0SThomas Gleixner return; 319e64c8aa0SThomas Gleixner } 320e64c8aa0SThomas Gleixner 321d7677d40Svenkatesh.pallipadi@intel.com free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 322d7677d40Svenkatesh.pallipadi@intel.com 323e64c8aa0SThomas Gleixner /* Finally remove it */ 324e64c8aa0SThomas Gleixner o = remove_vm_area((void *)addr); 325e64c8aa0SThomas Gleixner BUG_ON(p != o || o == NULL); 326e64c8aa0SThomas Gleixner kfree(p); 327e64c8aa0SThomas Gleixner } 328e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap); 329e64c8aa0SThomas Gleixner 330e64c8aa0SThomas Gleixner #ifdef CONFIG_X86_32 331e64c8aa0SThomas Gleixner 332e64c8aa0SThomas Gleixner int __initdata early_ioremap_debug; 333e64c8aa0SThomas Gleixner 334e64c8aa0SThomas Gleixner static int __init early_ioremap_debug_setup(char *str) 335e64c8aa0SThomas Gleixner { 336e64c8aa0SThomas Gleixner early_ioremap_debug = 1; 337e64c8aa0SThomas Gleixner 338e64c8aa0SThomas Gleixner return 0; 339e64c8aa0SThomas Gleixner } 340e64c8aa0SThomas Gleixner early_param("early_ioremap_debug", early_ioremap_debug_setup); 341e64c8aa0SThomas Gleixner 342e64c8aa0SThomas Gleixner static __initdata int after_paging_init; 343c92a7a54SIan Campbell static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] 344c92a7a54SIan Campbell __section(.bss.page_aligned); 345e64c8aa0SThomas Gleixner 346551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 347e64c8aa0SThomas Gleixner { 34837cc8d7fSJeremy Fitzhardinge /* Don't assume we're using swapper_pg_dir at this point */ 34937cc8d7fSJeremy Fitzhardinge pgd_t *base = __va(read_cr3()); 35037cc8d7fSJeremy Fitzhardinge pgd_t *pgd = &base[pgd_index(addr)]; 351551889a6SIan Campbell pud_t *pud = pud_offset(pgd, addr); 352551889a6SIan Campbell pmd_t *pmd = pmd_offset(pud, addr); 353551889a6SIan Campbell 354551889a6SIan Campbell return pmd; 355e64c8aa0SThomas Gleixner } 356e64c8aa0SThomas Gleixner 357551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr) 358e64c8aa0SThomas Gleixner { 359551889a6SIan Campbell return &bm_pte[pte_index(addr)]; 360e64c8aa0SThomas Gleixner } 361e64c8aa0SThomas Gleixner 362e64c8aa0SThomas Gleixner void __init early_ioremap_init(void) 363e64c8aa0SThomas Gleixner { 364551889a6SIan Campbell pmd_t *pmd; 365e64c8aa0SThomas Gleixner 366e64c8aa0SThomas Gleixner if (early_ioremap_debug) 367adafdf6aSIngo Molnar printk(KERN_INFO "early_ioremap_init()\n"); 368e64c8aa0SThomas Gleixner 369551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 370e64c8aa0SThomas Gleixner memset(bm_pte, 0, sizeof(bm_pte)); 371b6fbb669SIan Campbell pmd_populate_kernel(&init_mm, pmd, bm_pte); 372551889a6SIan Campbell 373e64c8aa0SThomas Gleixner /* 374551889a6SIan Campbell * The boot-ioremap range spans multiple pmds, for which 375e64c8aa0SThomas Gleixner * we are not prepared: 376e64c8aa0SThomas Gleixner */ 377551889a6SIan Campbell if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 378e64c8aa0SThomas Gleixner WARN_ON(1); 379551889a6SIan Campbell printk(KERN_WARNING "pmd %p != %p\n", 380551889a6SIan Campbell pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 381e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 382e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_BEGIN)); 383e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 384e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_END)); 385e64c8aa0SThomas Gleixner 386e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 387e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 388e64c8aa0SThomas Gleixner FIX_BTMAP_BEGIN); 389e64c8aa0SThomas Gleixner } 390e64c8aa0SThomas Gleixner } 391e64c8aa0SThomas Gleixner 392e64c8aa0SThomas Gleixner void __init early_ioremap_clear(void) 393e64c8aa0SThomas Gleixner { 394551889a6SIan Campbell pmd_t *pmd; 395e64c8aa0SThomas Gleixner 396e64c8aa0SThomas Gleixner if (early_ioremap_debug) 397adafdf6aSIngo Molnar printk(KERN_INFO "early_ioremap_clear()\n"); 398e64c8aa0SThomas Gleixner 399551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 400551889a6SIan Campbell pmd_clear(pmd); 401b6fbb669SIan Campbell paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); 402e64c8aa0SThomas Gleixner __flush_tlb_all(); 403e64c8aa0SThomas Gleixner } 404e64c8aa0SThomas Gleixner 405e64c8aa0SThomas Gleixner void __init early_ioremap_reset(void) 406e64c8aa0SThomas Gleixner { 407e64c8aa0SThomas Gleixner enum fixed_addresses idx; 408551889a6SIan Campbell unsigned long addr, phys; 409551889a6SIan Campbell pte_t *pte; 410e64c8aa0SThomas Gleixner 411e64c8aa0SThomas Gleixner after_paging_init = 1; 412e64c8aa0SThomas Gleixner for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { 413e64c8aa0SThomas Gleixner addr = fix_to_virt(idx); 414e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr); 415551889a6SIan Campbell if (pte_present(*pte)) { 416551889a6SIan Campbell phys = pte_val(*pte) & PAGE_MASK; 417e64c8aa0SThomas Gleixner set_fixmap(idx, phys); 418e64c8aa0SThomas Gleixner } 419e64c8aa0SThomas Gleixner } 420e64c8aa0SThomas Gleixner } 421e64c8aa0SThomas Gleixner 422e64c8aa0SThomas Gleixner static void __init __early_set_fixmap(enum fixed_addresses idx, 423e64c8aa0SThomas Gleixner unsigned long phys, pgprot_t flags) 424e64c8aa0SThomas Gleixner { 425551889a6SIan Campbell unsigned long addr = __fix_to_virt(idx); 426551889a6SIan Campbell pte_t *pte; 427e64c8aa0SThomas Gleixner 428e64c8aa0SThomas Gleixner if (idx >= __end_of_fixed_addresses) { 429e64c8aa0SThomas Gleixner BUG(); 430e64c8aa0SThomas Gleixner return; 431e64c8aa0SThomas Gleixner } 432e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr); 433e64c8aa0SThomas Gleixner if (pgprot_val(flags)) 434551889a6SIan Campbell set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 435e64c8aa0SThomas Gleixner else 436551889a6SIan Campbell pte_clear(NULL, addr, pte); 437e64c8aa0SThomas Gleixner __flush_tlb_one(addr); 438e64c8aa0SThomas Gleixner } 439e64c8aa0SThomas Gleixner 440e64c8aa0SThomas Gleixner static inline void __init early_set_fixmap(enum fixed_addresses idx, 441e64c8aa0SThomas Gleixner unsigned long phys) 442e64c8aa0SThomas Gleixner { 443e64c8aa0SThomas Gleixner if (after_paging_init) 444e64c8aa0SThomas Gleixner set_fixmap(idx, phys); 445e64c8aa0SThomas Gleixner else 446e64c8aa0SThomas Gleixner __early_set_fixmap(idx, phys, PAGE_KERNEL); 447e64c8aa0SThomas Gleixner } 448e64c8aa0SThomas Gleixner 449e64c8aa0SThomas Gleixner static inline void __init early_clear_fixmap(enum fixed_addresses idx) 450e64c8aa0SThomas Gleixner { 451e64c8aa0SThomas Gleixner if (after_paging_init) 452e64c8aa0SThomas Gleixner clear_fixmap(idx); 453e64c8aa0SThomas Gleixner else 454e64c8aa0SThomas Gleixner __early_set_fixmap(idx, 0, __pgprot(0)); 455e64c8aa0SThomas Gleixner } 456e64c8aa0SThomas Gleixner 457e64c8aa0SThomas Gleixner 458e64c8aa0SThomas Gleixner int __initdata early_ioremap_nested; 459e64c8aa0SThomas Gleixner 460e64c8aa0SThomas Gleixner static int __init check_early_ioremap_leak(void) 461e64c8aa0SThomas Gleixner { 462e64c8aa0SThomas Gleixner if (!early_ioremap_nested) 463e64c8aa0SThomas Gleixner return 0; 464e64c8aa0SThomas Gleixner 465e64c8aa0SThomas Gleixner printk(KERN_WARNING 466e64c8aa0SThomas Gleixner "Debug warning: early ioremap leak of %d areas detected.\n", 467e64c8aa0SThomas Gleixner early_ioremap_nested); 468e64c8aa0SThomas Gleixner printk(KERN_WARNING 469e64c8aa0SThomas Gleixner "please boot with early_ioremap_debug and report the dmesg.\n"); 470e64c8aa0SThomas Gleixner WARN_ON(1); 471e64c8aa0SThomas Gleixner 472e64c8aa0SThomas Gleixner return 1; 473e64c8aa0SThomas Gleixner } 474e64c8aa0SThomas Gleixner late_initcall(check_early_ioremap_leak); 475e64c8aa0SThomas Gleixner 476e64c8aa0SThomas Gleixner void __init *early_ioremap(unsigned long phys_addr, unsigned long size) 477e64c8aa0SThomas Gleixner { 478e64c8aa0SThomas Gleixner unsigned long offset, last_addr; 479e64c8aa0SThomas Gleixner unsigned int nrpages, nesting; 480e64c8aa0SThomas Gleixner enum fixed_addresses idx0, idx; 481e64c8aa0SThomas Gleixner 482e64c8aa0SThomas Gleixner WARN_ON(system_state != SYSTEM_BOOTING); 483e64c8aa0SThomas Gleixner 484e64c8aa0SThomas Gleixner nesting = early_ioremap_nested; 485e64c8aa0SThomas Gleixner if (early_ioremap_debug) { 486adafdf6aSIngo Molnar printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", 487e64c8aa0SThomas Gleixner phys_addr, size, nesting); 488e64c8aa0SThomas Gleixner dump_stack(); 489e64c8aa0SThomas Gleixner } 490e64c8aa0SThomas Gleixner 491e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */ 492e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1; 493e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr) { 494e64c8aa0SThomas Gleixner WARN_ON(1); 495e64c8aa0SThomas Gleixner return NULL; 496e64c8aa0SThomas Gleixner } 497e64c8aa0SThomas Gleixner 498e64c8aa0SThomas Gleixner if (nesting >= FIX_BTMAPS_NESTING) { 499e64c8aa0SThomas Gleixner WARN_ON(1); 500e64c8aa0SThomas Gleixner return NULL; 501e64c8aa0SThomas Gleixner } 502e64c8aa0SThomas Gleixner early_ioremap_nested++; 503e64c8aa0SThomas Gleixner /* 504e64c8aa0SThomas Gleixner * Mappings have to be page-aligned 505e64c8aa0SThomas Gleixner */ 506e64c8aa0SThomas Gleixner offset = phys_addr & ~PAGE_MASK; 507e64c8aa0SThomas Gleixner phys_addr &= PAGE_MASK; 508e64c8aa0SThomas Gleixner size = PAGE_ALIGN(last_addr) - phys_addr; 509e64c8aa0SThomas Gleixner 510e64c8aa0SThomas Gleixner /* 511e64c8aa0SThomas Gleixner * Mappings have to fit in the FIX_BTMAP area. 512e64c8aa0SThomas Gleixner */ 513e64c8aa0SThomas Gleixner nrpages = size >> PAGE_SHIFT; 514e64c8aa0SThomas Gleixner if (nrpages > NR_FIX_BTMAPS) { 515e64c8aa0SThomas Gleixner WARN_ON(1); 516e64c8aa0SThomas Gleixner return NULL; 517e64c8aa0SThomas Gleixner } 518e64c8aa0SThomas Gleixner 519e64c8aa0SThomas Gleixner /* 520e64c8aa0SThomas Gleixner * Ok, go for it.. 521e64c8aa0SThomas Gleixner */ 522e64c8aa0SThomas Gleixner idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; 523e64c8aa0SThomas Gleixner idx = idx0; 524e64c8aa0SThomas Gleixner while (nrpages > 0) { 525e64c8aa0SThomas Gleixner early_set_fixmap(idx, phys_addr); 526e64c8aa0SThomas Gleixner phys_addr += PAGE_SIZE; 527e64c8aa0SThomas Gleixner --idx; 528e64c8aa0SThomas Gleixner --nrpages; 529e64c8aa0SThomas Gleixner } 530e64c8aa0SThomas Gleixner if (early_ioremap_debug) 531e64c8aa0SThomas Gleixner printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 532e64c8aa0SThomas Gleixner 533e64c8aa0SThomas Gleixner return (void *) (offset + fix_to_virt(idx0)); 534e64c8aa0SThomas Gleixner } 535e64c8aa0SThomas Gleixner 536e64c8aa0SThomas Gleixner void __init early_iounmap(void *addr, unsigned long size) 537e64c8aa0SThomas Gleixner { 538e64c8aa0SThomas Gleixner unsigned long virt_addr; 539e64c8aa0SThomas Gleixner unsigned long offset; 540e64c8aa0SThomas Gleixner unsigned int nrpages; 541e64c8aa0SThomas Gleixner enum fixed_addresses idx; 542e64c8aa0SThomas Gleixner unsigned int nesting; 543e64c8aa0SThomas Gleixner 544e64c8aa0SThomas Gleixner nesting = --early_ioremap_nested; 545e64c8aa0SThomas Gleixner WARN_ON(nesting < 0); 546e64c8aa0SThomas Gleixner 547e64c8aa0SThomas Gleixner if (early_ioremap_debug) { 548adafdf6aSIngo Molnar printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, 549e64c8aa0SThomas Gleixner size, nesting); 550e64c8aa0SThomas Gleixner dump_stack(); 551e64c8aa0SThomas Gleixner } 552e64c8aa0SThomas Gleixner 553e64c8aa0SThomas Gleixner virt_addr = (unsigned long)addr; 554e64c8aa0SThomas Gleixner if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { 555e64c8aa0SThomas Gleixner WARN_ON(1); 556e64c8aa0SThomas Gleixner return; 557e64c8aa0SThomas Gleixner } 558e64c8aa0SThomas Gleixner offset = virt_addr & ~PAGE_MASK; 559e64c8aa0SThomas Gleixner nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 560e64c8aa0SThomas Gleixner 561e64c8aa0SThomas Gleixner idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; 562e64c8aa0SThomas Gleixner while (nrpages > 0) { 563e64c8aa0SThomas Gleixner early_clear_fixmap(idx); 564e64c8aa0SThomas Gleixner --idx; 565e64c8aa0SThomas Gleixner --nrpages; 566e64c8aa0SThomas Gleixner } 567e64c8aa0SThomas Gleixner } 568e64c8aa0SThomas Gleixner 569e64c8aa0SThomas Gleixner void __this_fixmap_does_not_exist(void) 570e64c8aa0SThomas Gleixner { 571e64c8aa0SThomas Gleixner WARN_ON(1); 572e64c8aa0SThomas Gleixner } 573e64c8aa0SThomas Gleixner 574e64c8aa0SThomas Gleixner #endif /* CONFIG_X86_32 */ 575