1e64c8aa0SThomas Gleixner /* 2e64c8aa0SThomas Gleixner * Re-map IO memory to kernel address space so that we can access it. 3e64c8aa0SThomas Gleixner * This is needed for high PCI addresses that aren't mapped in the 4e64c8aa0SThomas Gleixner * 640k-1MB IO memory area on PC's 5e64c8aa0SThomas Gleixner * 6e64c8aa0SThomas Gleixner * (C) Copyright 1995 1996 Linus Torvalds 7e64c8aa0SThomas Gleixner */ 8e64c8aa0SThomas Gleixner 9e64c8aa0SThomas Gleixner #include <linux/bootmem.h> 10e64c8aa0SThomas Gleixner #include <linux/init.h> 11e64c8aa0SThomas Gleixner #include <linux/io.h> 12e64c8aa0SThomas Gleixner #include <linux/module.h> 13e64c8aa0SThomas Gleixner #include <linux/slab.h> 14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h> 15d61fc448SPekka Paalanen #include <linux/mmiotrace.h> 16e64c8aa0SThomas Gleixner 17e64c8aa0SThomas Gleixner #include <asm/cacheflush.h> 18e64c8aa0SThomas Gleixner #include <asm/e820.h> 19e64c8aa0SThomas Gleixner #include <asm/fixmap.h> 20e64c8aa0SThomas Gleixner #include <asm/pgtable.h> 21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h> 22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h> 23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h> 24e64c8aa0SThomas Gleixner 2578c86e5eSJeremy Fitzhardinge #include "physaddr.h" 26e64c8aa0SThomas Gleixner 27e64c8aa0SThomas Gleixner /* 28e64c8aa0SThomas Gleixner * Fix up the linear direct mapping of the kernel to avoid cache attribute 29e64c8aa0SThomas Gleixner * conflicts. 30e64c8aa0SThomas Gleixner */ 313a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size, 323a96ce8cSvenkatesh.pallipadi@intel.com unsigned long prot_val) 33e64c8aa0SThomas Gleixner { 34d806e5eeSThomas Gleixner unsigned long nrpages = size >> PAGE_SHIFT; 3593809be8SHarvey Harrison int err; 36e64c8aa0SThomas Gleixner 373a96ce8cSvenkatesh.pallipadi@intel.com switch (prot_val) { 383a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_UC: 39d806e5eeSThomas Gleixner default: 401219333dSvenkatesh.pallipadi@intel.com err = _set_memory_uc(vaddr, nrpages); 41d806e5eeSThomas Gleixner break; 42b310f381Svenkatesh.pallipadi@intel.com case _PAGE_CACHE_WC: 43b310f381Svenkatesh.pallipadi@intel.com err = _set_memory_wc(vaddr, nrpages); 44b310f381Svenkatesh.pallipadi@intel.com break; 453a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_WB: 461219333dSvenkatesh.pallipadi@intel.com err = _set_memory_wb(vaddr, nrpages); 47d806e5eeSThomas Gleixner break; 48d806e5eeSThomas Gleixner } 49e64c8aa0SThomas Gleixner 50e64c8aa0SThomas Gleixner return err; 51e64c8aa0SThomas Gleixner } 52e64c8aa0SThomas Gleixner 53e64c8aa0SThomas Gleixner /* 54e64c8aa0SThomas Gleixner * Remap an arbitrary physical address space into the kernel virtual 55e64c8aa0SThomas Gleixner * address space. Needed when the kernel wants to access high addresses 56e64c8aa0SThomas Gleixner * directly. 57e64c8aa0SThomas Gleixner * 58e64c8aa0SThomas Gleixner * NOTE! We need to allow non-page-aligned mappings too: we will obviously 59e64c8aa0SThomas Gleixner * have to convert them into an offset in a page-aligned mapping, but the 60e64c8aa0SThomas Gleixner * caller shouldn't need to know that small detail. 61e64c8aa0SThomas Gleixner */ 6223016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr, 6323016969SChristoph Lameter unsigned long size, unsigned long prot_val, void *caller) 64e64c8aa0SThomas Gleixner { 65ffa71f33SKenji Kaneshige unsigned long offset, vaddr; 66ffa71f33SKenji Kaneshige resource_size_t pfn, last_pfn, last_addr; 6787e547feSPekka Paalanen const resource_size_t unaligned_phys_addr = phys_addr; 6887e547feSPekka Paalanen const unsigned long unaligned_size = size; 69e64c8aa0SThomas Gleixner struct vm_struct *area; 70d7677d40Svenkatesh.pallipadi@intel.com unsigned long new_prot_val; 71d806e5eeSThomas Gleixner pgprot_t prot; 72dee7cbb2SVenki Pallipadi int retval; 73d61fc448SPekka Paalanen void __iomem *ret_addr; 74e64c8aa0SThomas Gleixner 75e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */ 76e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1; 77e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr) 78e64c8aa0SThomas Gleixner return NULL; 79e64c8aa0SThomas Gleixner 80e3100c82SThomas Gleixner if (!phys_addr_valid(phys_addr)) { 816997ab49Svenkatesh.pallipadi@intel.com printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 824c8337acSRandy Dunlap (unsigned long long)phys_addr); 83e3100c82SThomas Gleixner WARN_ON_ONCE(1); 84e3100c82SThomas Gleixner return NULL; 85e3100c82SThomas Gleixner } 86e3100c82SThomas Gleixner 87e64c8aa0SThomas Gleixner /* 88e64c8aa0SThomas Gleixner * Don't remap the low PCI/ISA area, it's always mapped.. 89e64c8aa0SThomas Gleixner */ 90bcc643dcSAndreas Herrmann if (is_ISA_range(phys_addr, last_addr)) 91e64c8aa0SThomas Gleixner return (__force void __iomem *)phys_to_virt(phys_addr); 92e64c8aa0SThomas Gleixner 93e64c8aa0SThomas Gleixner /* 94e64c8aa0SThomas Gleixner * Don't allow anybody to remap normal RAM that we're using.. 95e64c8aa0SThomas Gleixner */ 96ffa71f33SKenji Kaneshige last_pfn = last_addr >> PAGE_SHIFT; 9735be1b71SKenji Kaneshige for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { 98ba748d22SIngo Molnar int is_ram = page_is_ram(pfn); 99ba748d22SIngo Molnar 100ba748d22SIngo Molnar if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) 101e64c8aa0SThomas Gleixner return NULL; 102ba748d22SIngo Molnar WARN_ON_ONCE(is_ram); 103e64c8aa0SThomas Gleixner } 104e64c8aa0SThomas Gleixner 105d7677d40Svenkatesh.pallipadi@intel.com /* 106d7677d40Svenkatesh.pallipadi@intel.com * Mappings have to be page-aligned 107d7677d40Svenkatesh.pallipadi@intel.com */ 108d7677d40Svenkatesh.pallipadi@intel.com offset = phys_addr & ~PAGE_MASK; 109ffa71f33SKenji Kaneshige phys_addr &= PHYSICAL_PAGE_MASK; 110d7677d40Svenkatesh.pallipadi@intel.com size = PAGE_ALIGN(last_addr+1) - phys_addr; 111d7677d40Svenkatesh.pallipadi@intel.com 112e213e877SAndi Kleen retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 113dee7cbb2SVenki Pallipadi prot_val, &new_prot_val); 114dee7cbb2SVenki Pallipadi if (retval) { 115279e669bSVenkatesh Pallipadi printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 116dee7cbb2SVenki Pallipadi return NULL; 117dee7cbb2SVenki Pallipadi } 118dee7cbb2SVenki Pallipadi 119dee7cbb2SVenki Pallipadi if (prot_val != new_prot_val) { 120b855192cSH. Peter Anvin if (!is_new_memtype_allowed(phys_addr, size, 121b855192cSH. Peter Anvin prot_val, new_prot_val)) { 122279e669bSVenkatesh Pallipadi printk(KERN_ERR 1236997ab49Svenkatesh.pallipadi@intel.com "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 1244c8337acSRandy Dunlap (unsigned long long)phys_addr, 1254c8337acSRandy Dunlap (unsigned long long)(phys_addr + size), 1266997ab49Svenkatesh.pallipadi@intel.com prot_val, new_prot_val); 127de2a47cfSXiaotian Feng goto err_free_memtype; 128d7677d40Svenkatesh.pallipadi@intel.com } 129d7677d40Svenkatesh.pallipadi@intel.com prot_val = new_prot_val; 130d7677d40Svenkatesh.pallipadi@intel.com } 131d7677d40Svenkatesh.pallipadi@intel.com 1323a96ce8cSvenkatesh.pallipadi@intel.com switch (prot_val) { 1333a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_UC: 134d806e5eeSThomas Gleixner default: 135be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO_NOCACHE; 136d806e5eeSThomas Gleixner break; 137de33c442SSuresh Siddha case _PAGE_CACHE_UC_MINUS: 138be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO_UC_MINUS; 139de33c442SSuresh Siddha break; 140b310f381Svenkatesh.pallipadi@intel.com case _PAGE_CACHE_WC: 141be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO_WC; 142b310f381Svenkatesh.pallipadi@intel.com break; 1433a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_WB: 144be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO; 145d806e5eeSThomas Gleixner break; 146d806e5eeSThomas Gleixner } 147e64c8aa0SThomas Gleixner 148e64c8aa0SThomas Gleixner /* 149e64c8aa0SThomas Gleixner * Ok, go for it.. 150e64c8aa0SThomas Gleixner */ 15123016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, caller); 152e64c8aa0SThomas Gleixner if (!area) 153de2a47cfSXiaotian Feng goto err_free_memtype; 154e64c8aa0SThomas Gleixner area->phys_addr = phys_addr; 155e66aadbeSThomas Gleixner vaddr = (unsigned long) area->addr; 15643a432b1SSuresh Siddha 157de2a47cfSXiaotian Feng if (kernel_map_sync_memtype(phys_addr, size, prot_val)) 158de2a47cfSXiaotian Feng goto err_free_area; 159e64c8aa0SThomas Gleixner 160de2a47cfSXiaotian Feng if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 161de2a47cfSXiaotian Feng goto err_free_area; 162e64c8aa0SThomas Gleixner 163d61fc448SPekka Paalanen ret_addr = (void __iomem *) (vaddr + offset); 16487e547feSPekka Paalanen mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 165d61fc448SPekka Paalanen 166c7a7b814STim Gardner /* 167c7a7b814STim Gardner * Check if the request spans more than any BAR in the iomem resource 168c7a7b814STim Gardner * tree. 169c7a7b814STim Gardner */ 170c7a7b814STim Gardner WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), 171c7a7b814STim Gardner KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); 172c7a7b814STim Gardner 173d61fc448SPekka Paalanen return ret_addr; 174de2a47cfSXiaotian Feng err_free_area: 175de2a47cfSXiaotian Feng free_vm_area(area); 176de2a47cfSXiaotian Feng err_free_memtype: 177de2a47cfSXiaotian Feng free_memtype(phys_addr, phys_addr + size); 178de2a47cfSXiaotian Feng return NULL; 179e64c8aa0SThomas Gleixner } 180e64c8aa0SThomas Gleixner 181e64c8aa0SThomas Gleixner /** 182e64c8aa0SThomas Gleixner * ioremap_nocache - map bus memory into CPU space 1839efc31b8SWanpeng Li * @phys_addr: bus address of the memory 184e64c8aa0SThomas Gleixner * @size: size of the resource to map 185e64c8aa0SThomas Gleixner * 186e64c8aa0SThomas Gleixner * ioremap_nocache performs a platform specific sequence of operations to 187e64c8aa0SThomas Gleixner * make bus memory CPU accessible via the readb/readw/readl/writeb/ 188e64c8aa0SThomas Gleixner * writew/writel functions and the other mmio helpers. The returned 189e64c8aa0SThomas Gleixner * address is not guaranteed to be usable directly as a virtual 190e64c8aa0SThomas Gleixner * address. 191e64c8aa0SThomas Gleixner * 192e64c8aa0SThomas Gleixner * This version of ioremap ensures that the memory is marked uncachable 193e64c8aa0SThomas Gleixner * on the CPU as well as honouring existing caching rules from things like 194e64c8aa0SThomas Gleixner * the PCI bus. Note that there are other caches and buffers on many 195e64c8aa0SThomas Gleixner * busses. In particular driver authors should read up on PCI writes 196e64c8aa0SThomas Gleixner * 197e64c8aa0SThomas Gleixner * It's useful if some control registers are in such an area and 198e64c8aa0SThomas Gleixner * write combining or read caching is not desirable: 199e64c8aa0SThomas Gleixner * 200e64c8aa0SThomas Gleixner * Must be freed with iounmap. 201e64c8aa0SThomas Gleixner */ 202b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) 203e64c8aa0SThomas Gleixner { 204de33c442SSuresh Siddha /* 205de33c442SSuresh Siddha * Ideally, this should be: 206499f8f84SAndreas Herrmann * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; 207de33c442SSuresh Siddha * 208de33c442SSuresh Siddha * Till we fix all X drivers to use ioremap_wc(), we will use 209de33c442SSuresh Siddha * UC MINUS. 210de33c442SSuresh Siddha */ 211de33c442SSuresh Siddha unsigned long val = _PAGE_CACHE_UC_MINUS; 212de33c442SSuresh Siddha 213de33c442SSuresh Siddha return __ioremap_caller(phys_addr, size, val, 21423016969SChristoph Lameter __builtin_return_address(0)); 215e64c8aa0SThomas Gleixner } 216e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache); 217e64c8aa0SThomas Gleixner 218b310f381Svenkatesh.pallipadi@intel.com /** 219b310f381Svenkatesh.pallipadi@intel.com * ioremap_wc - map memory into CPU space write combined 2209efc31b8SWanpeng Li * @phys_addr: bus address of the memory 221b310f381Svenkatesh.pallipadi@intel.com * @size: size of the resource to map 222b310f381Svenkatesh.pallipadi@intel.com * 223b310f381Svenkatesh.pallipadi@intel.com * This version of ioremap ensures that the memory is marked write combining. 224b310f381Svenkatesh.pallipadi@intel.com * Write combining allows faster writes to some hardware devices. 225b310f381Svenkatesh.pallipadi@intel.com * 226b310f381Svenkatesh.pallipadi@intel.com * Must be freed with iounmap. 227b310f381Svenkatesh.pallipadi@intel.com */ 228d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 229b310f381Svenkatesh.pallipadi@intel.com { 230499f8f84SAndreas Herrmann if (pat_enabled) 23123016969SChristoph Lameter return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 23223016969SChristoph Lameter __builtin_return_address(0)); 233b310f381Svenkatesh.pallipadi@intel.com else 234b310f381Svenkatesh.pallipadi@intel.com return ioremap_nocache(phys_addr, size); 235b310f381Svenkatesh.pallipadi@intel.com } 236b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc); 237b310f381Svenkatesh.pallipadi@intel.com 238b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 2395f868152SThomas Gleixner { 24023016969SChristoph Lameter return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, 24123016969SChristoph Lameter __builtin_return_address(0)); 2425f868152SThomas Gleixner } 2435f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache); 2445f868152SThomas Gleixner 24528b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 24628b2ee20SRik van Riel unsigned long prot_val) 24728b2ee20SRik van Riel { 24828b2ee20SRik van Riel return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), 24928b2ee20SRik van Riel __builtin_return_address(0)); 25028b2ee20SRik van Riel } 25128b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot); 25228b2ee20SRik van Riel 253e64c8aa0SThomas Gleixner /** 254e64c8aa0SThomas Gleixner * iounmap - Free a IO remapping 255e64c8aa0SThomas Gleixner * @addr: virtual address from ioremap_* 256e64c8aa0SThomas Gleixner * 257e64c8aa0SThomas Gleixner * Caller must ensure there is only one unmapping for the same pointer. 258e64c8aa0SThomas Gleixner */ 259e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr) 260e64c8aa0SThomas Gleixner { 261e64c8aa0SThomas Gleixner struct vm_struct *p, *o; 262e64c8aa0SThomas Gleixner 263e64c8aa0SThomas Gleixner if ((void __force *)addr <= high_memory) 264e64c8aa0SThomas Gleixner return; 265e64c8aa0SThomas Gleixner 266e64c8aa0SThomas Gleixner /* 267e64c8aa0SThomas Gleixner * __ioremap special-cases the PCI/ISA range by not instantiating a 268e64c8aa0SThomas Gleixner * vm_area and by simply returning an address into the kernel mapping 269e64c8aa0SThomas Gleixner * of ISA space. So handle that here. 270e64c8aa0SThomas Gleixner */ 2716e92a5a6SThomas Gleixner if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && 2726e92a5a6SThomas Gleixner (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) 273e64c8aa0SThomas Gleixner return; 274e64c8aa0SThomas Gleixner 275e64c8aa0SThomas Gleixner addr = (volatile void __iomem *) 276e64c8aa0SThomas Gleixner (PAGE_MASK & (unsigned long __force)addr); 277e64c8aa0SThomas Gleixner 278d61fc448SPekka Paalanen mmiotrace_iounmap(addr); 279d61fc448SPekka Paalanen 280e64c8aa0SThomas Gleixner /* Use the vm area unlocked, assuming the caller 281e64c8aa0SThomas Gleixner ensures there isn't another iounmap for the same address 282e64c8aa0SThomas Gleixner in parallel. Reuse of the virtual address is prevented by 283e64c8aa0SThomas Gleixner leaving it in the global lists until we're done with it. 284e64c8aa0SThomas Gleixner cpa takes care of the direct mappings. */ 285ef932473SJoonsoo Kim p = find_vm_area((void __force *)addr); 286e64c8aa0SThomas Gleixner 287e64c8aa0SThomas Gleixner if (!p) { 288e64c8aa0SThomas Gleixner printk(KERN_ERR "iounmap: bad address %p\n", addr); 289e64c8aa0SThomas Gleixner dump_stack(); 290e64c8aa0SThomas Gleixner return; 291e64c8aa0SThomas Gleixner } 292e64c8aa0SThomas Gleixner 293d7677d40Svenkatesh.pallipadi@intel.com free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 294d7677d40Svenkatesh.pallipadi@intel.com 295e64c8aa0SThomas Gleixner /* Finally remove it */ 2966e92a5a6SThomas Gleixner o = remove_vm_area((void __force *)addr); 297e64c8aa0SThomas Gleixner BUG_ON(p != o || o == NULL); 298e64c8aa0SThomas Gleixner kfree(p); 299e64c8aa0SThomas Gleixner } 300e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap); 301e64c8aa0SThomas Gleixner 302e045fb2aSvenkatesh.pallipadi@intel.com /* 303e045fb2aSvenkatesh.pallipadi@intel.com * Convert a physical pointer to a virtual kernel pointer for /dev/mem 304e045fb2aSvenkatesh.pallipadi@intel.com * access 305e045fb2aSvenkatesh.pallipadi@intel.com */ 306e045fb2aSvenkatesh.pallipadi@intel.com void *xlate_dev_mem_ptr(unsigned long phys) 307e045fb2aSvenkatesh.pallipadi@intel.com { 308e045fb2aSvenkatesh.pallipadi@intel.com void *addr; 309e045fb2aSvenkatesh.pallipadi@intel.com unsigned long start = phys & PAGE_MASK; 310e045fb2aSvenkatesh.pallipadi@intel.com 311e045fb2aSvenkatesh.pallipadi@intel.com /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ 312e045fb2aSvenkatesh.pallipadi@intel.com if (page_is_ram(start >> PAGE_SHIFT)) 313e045fb2aSvenkatesh.pallipadi@intel.com return __va(phys); 314e045fb2aSvenkatesh.pallipadi@intel.com 3152fb8f4e6SXiaotian Feng addr = (void __force *)ioremap_cache(start, PAGE_SIZE); 316e045fb2aSvenkatesh.pallipadi@intel.com if (addr) 317e045fb2aSvenkatesh.pallipadi@intel.com addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); 318e045fb2aSvenkatesh.pallipadi@intel.com 319e045fb2aSvenkatesh.pallipadi@intel.com return addr; 320e045fb2aSvenkatesh.pallipadi@intel.com } 321e045fb2aSvenkatesh.pallipadi@intel.com 322e045fb2aSvenkatesh.pallipadi@intel.com void unxlate_dev_mem_ptr(unsigned long phys, void *addr) 323e045fb2aSvenkatesh.pallipadi@intel.com { 324e045fb2aSvenkatesh.pallipadi@intel.com if (page_is_ram(phys >> PAGE_SHIFT)) 325e045fb2aSvenkatesh.pallipadi@intel.com return; 326e045fb2aSvenkatesh.pallipadi@intel.com 327e045fb2aSvenkatesh.pallipadi@intel.com iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); 328e045fb2aSvenkatesh.pallipadi@intel.com return; 329e045fb2aSvenkatesh.pallipadi@intel.com } 330e045fb2aSvenkatesh.pallipadi@intel.com 33145c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 332e64c8aa0SThomas Gleixner 333551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 334e64c8aa0SThomas Gleixner { 33537cc8d7fSJeremy Fitzhardinge /* Don't assume we're using swapper_pg_dir at this point */ 33637cc8d7fSJeremy Fitzhardinge pgd_t *base = __va(read_cr3()); 33737cc8d7fSJeremy Fitzhardinge pgd_t *pgd = &base[pgd_index(addr)]; 338551889a6SIan Campbell pud_t *pud = pud_offset(pgd, addr); 339551889a6SIan Campbell pmd_t *pmd = pmd_offset(pud, addr); 340551889a6SIan Campbell 341551889a6SIan Campbell return pmd; 342e64c8aa0SThomas Gleixner } 343e64c8aa0SThomas Gleixner 344551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr) 345e64c8aa0SThomas Gleixner { 346551889a6SIan Campbell return &bm_pte[pte_index(addr)]; 347e64c8aa0SThomas Gleixner } 348e64c8aa0SThomas Gleixner 349fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep) 350fef5ba79SJeremy Fitzhardinge { 351fef5ba79SJeremy Fitzhardinge return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; 352fef5ba79SJeremy Fitzhardinge } 353fef5ba79SJeremy Fitzhardinge 354e64c8aa0SThomas Gleixner void __init early_ioremap_init(void) 355e64c8aa0SThomas Gleixner { 356551889a6SIan Campbell pmd_t *pmd; 357e64c8aa0SThomas Gleixner 358*5b7c73e0SMark Salter early_ioremap_setup(); 3598827247fSWang Chen 360551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 361e64c8aa0SThomas Gleixner memset(bm_pte, 0, sizeof(bm_pte)); 362b6fbb669SIan Campbell pmd_populate_kernel(&init_mm, pmd, bm_pte); 363551889a6SIan Campbell 364e64c8aa0SThomas Gleixner /* 365551889a6SIan Campbell * The boot-ioremap range spans multiple pmds, for which 366e64c8aa0SThomas Gleixner * we are not prepared: 367e64c8aa0SThomas Gleixner */ 368499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE) 369499a5f1eSJan Beulich BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 370499a5f1eSJan Beulich != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 371499a5f1eSJan Beulich #undef __FIXADDR_TOP 372551889a6SIan Campbell if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 373e64c8aa0SThomas Gleixner WARN_ON(1); 374551889a6SIan Campbell printk(KERN_WARNING "pmd %p != %p\n", 375551889a6SIan Campbell pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 376e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 377e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_BEGIN)); 378e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 379e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_END)); 380e64c8aa0SThomas Gleixner 381e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 382e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 383e64c8aa0SThomas Gleixner FIX_BTMAP_BEGIN); 384e64c8aa0SThomas Gleixner } 385e64c8aa0SThomas Gleixner } 386e64c8aa0SThomas Gleixner 387*5b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx, 3889b987aebSMasami Hiramatsu phys_addr_t phys, pgprot_t flags) 389e64c8aa0SThomas Gleixner { 390551889a6SIan Campbell unsigned long addr = __fix_to_virt(idx); 391551889a6SIan Campbell pte_t *pte; 392e64c8aa0SThomas Gleixner 393e64c8aa0SThomas Gleixner if (idx >= __end_of_fixed_addresses) { 394e64c8aa0SThomas Gleixner BUG(); 395e64c8aa0SThomas Gleixner return; 396e64c8aa0SThomas Gleixner } 397e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr); 3984583ed51SJeremy Fitzhardinge 399e64c8aa0SThomas Gleixner if (pgprot_val(flags)) 400551889a6SIan Campbell set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 401e64c8aa0SThomas Gleixner else 4024f9c11ddSJeremy Fitzhardinge pte_clear(&init_mm, addr, pte); 403e64c8aa0SThomas Gleixner __flush_tlb_one(addr); 404e64c8aa0SThomas Gleixner } 405