1e64c8aa0SThomas Gleixner /* 2e64c8aa0SThomas Gleixner * Re-map IO memory to kernel address space so that we can access it. 3e64c8aa0SThomas Gleixner * This is needed for high PCI addresses that aren't mapped in the 4e64c8aa0SThomas Gleixner * 640k-1MB IO memory area on PC's 5e64c8aa0SThomas Gleixner * 6e64c8aa0SThomas Gleixner * (C) Copyright 1995 1996 Linus Torvalds 7e64c8aa0SThomas Gleixner */ 8e64c8aa0SThomas Gleixner 9e64c8aa0SThomas Gleixner #include <linux/bootmem.h> 10e64c8aa0SThomas Gleixner #include <linux/init.h> 11e64c8aa0SThomas Gleixner #include <linux/io.h> 12e64c8aa0SThomas Gleixner #include <linux/module.h> 13e64c8aa0SThomas Gleixner #include <linux/slab.h> 14e64c8aa0SThomas Gleixner #include <linux/vmalloc.h> 15d61fc448SPekka Paalanen #include <linux/mmiotrace.h> 16e64c8aa0SThomas Gleixner 17e64c8aa0SThomas Gleixner #include <asm/cacheflush.h> 18e64c8aa0SThomas Gleixner #include <asm/e820.h> 19e64c8aa0SThomas Gleixner #include <asm/fixmap.h> 20e64c8aa0SThomas Gleixner #include <asm/pgtable.h> 21e64c8aa0SThomas Gleixner #include <asm/tlbflush.h> 22f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h> 23d7677d40Svenkatesh.pallipadi@intel.com #include <asm/pat.h> 24e64c8aa0SThomas Gleixner 2578c86e5eSJeremy Fitzhardinge #include "physaddr.h" 26e64c8aa0SThomas Gleixner 27e64c8aa0SThomas Gleixner /* 28e64c8aa0SThomas Gleixner * Fix up the linear direct mapping of the kernel to avoid cache attribute 29e64c8aa0SThomas Gleixner * conflicts. 30e64c8aa0SThomas Gleixner */ 313a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size, 323a96ce8cSvenkatesh.pallipadi@intel.com unsigned long prot_val) 33e64c8aa0SThomas Gleixner { 34d806e5eeSThomas Gleixner unsigned long nrpages = size >> PAGE_SHIFT; 3593809be8SHarvey Harrison int err; 36e64c8aa0SThomas Gleixner 373a96ce8cSvenkatesh.pallipadi@intel.com switch (prot_val) { 383a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_UC: 39d806e5eeSThomas Gleixner default: 401219333dSvenkatesh.pallipadi@intel.com err = _set_memory_uc(vaddr, nrpages); 41d806e5eeSThomas Gleixner break; 42b310f381Svenkatesh.pallipadi@intel.com case _PAGE_CACHE_WC: 43b310f381Svenkatesh.pallipadi@intel.com err = _set_memory_wc(vaddr, nrpages); 44b310f381Svenkatesh.pallipadi@intel.com break; 453a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_WB: 461219333dSvenkatesh.pallipadi@intel.com err = _set_memory_wb(vaddr, nrpages); 47d806e5eeSThomas Gleixner break; 48d806e5eeSThomas Gleixner } 49e64c8aa0SThomas Gleixner 50e64c8aa0SThomas Gleixner return err; 51e64c8aa0SThomas Gleixner } 52e64c8aa0SThomas Gleixner 53c81c8a1eSRoland Dreier static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, 54c81c8a1eSRoland Dreier void *arg) 55c81c8a1eSRoland Dreier { 56c81c8a1eSRoland Dreier unsigned long i; 57c81c8a1eSRoland Dreier 58c81c8a1eSRoland Dreier for (i = 0; i < nr_pages; ++i) 59c81c8a1eSRoland Dreier if (pfn_valid(start_pfn + i) && 60c81c8a1eSRoland Dreier !PageReserved(pfn_to_page(start_pfn + i))) 61c81c8a1eSRoland Dreier return 1; 62c81c8a1eSRoland Dreier 63c81c8a1eSRoland Dreier WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); 64c81c8a1eSRoland Dreier 65c81c8a1eSRoland Dreier return 0; 66c81c8a1eSRoland Dreier } 67c81c8a1eSRoland Dreier 68e64c8aa0SThomas Gleixner /* 69e64c8aa0SThomas Gleixner * Remap an arbitrary physical address space into the kernel virtual 70e64c8aa0SThomas Gleixner * address space. Needed when the kernel wants to access high addresses 71e64c8aa0SThomas Gleixner * directly. 72e64c8aa0SThomas Gleixner * 73e64c8aa0SThomas Gleixner * NOTE! We need to allow non-page-aligned mappings too: we will obviously 74e64c8aa0SThomas Gleixner * have to convert them into an offset in a page-aligned mapping, but the 75e64c8aa0SThomas Gleixner * caller shouldn't need to know that small detail. 76e64c8aa0SThomas Gleixner */ 7723016969SChristoph Lameter static void __iomem *__ioremap_caller(resource_size_t phys_addr, 7823016969SChristoph Lameter unsigned long size, unsigned long prot_val, void *caller) 79e64c8aa0SThomas Gleixner { 80ffa71f33SKenji Kaneshige unsigned long offset, vaddr; 81ffa71f33SKenji Kaneshige resource_size_t pfn, last_pfn, last_addr; 8287e547feSPekka Paalanen const resource_size_t unaligned_phys_addr = phys_addr; 8387e547feSPekka Paalanen const unsigned long unaligned_size = size; 84e64c8aa0SThomas Gleixner struct vm_struct *area; 85d7677d40Svenkatesh.pallipadi@intel.com unsigned long new_prot_val; 86d806e5eeSThomas Gleixner pgprot_t prot; 87dee7cbb2SVenki Pallipadi int retval; 88d61fc448SPekka Paalanen void __iomem *ret_addr; 89906e36c5SMike Travis int ram_region; 90e64c8aa0SThomas Gleixner 91e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */ 92e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1; 93e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr) 94e64c8aa0SThomas Gleixner return NULL; 95e64c8aa0SThomas Gleixner 96e3100c82SThomas Gleixner if (!phys_addr_valid(phys_addr)) { 976997ab49Svenkatesh.pallipadi@intel.com printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 984c8337acSRandy Dunlap (unsigned long long)phys_addr); 99e3100c82SThomas Gleixner WARN_ON_ONCE(1); 100e3100c82SThomas Gleixner return NULL; 101e3100c82SThomas Gleixner } 102e3100c82SThomas Gleixner 103e64c8aa0SThomas Gleixner /* 104e64c8aa0SThomas Gleixner * Don't remap the low PCI/ISA area, it's always mapped.. 105e64c8aa0SThomas Gleixner */ 106bcc643dcSAndreas Herrmann if (is_ISA_range(phys_addr, last_addr)) 107e64c8aa0SThomas Gleixner return (__force void __iomem *)phys_to_virt(phys_addr); 108e64c8aa0SThomas Gleixner 109e64c8aa0SThomas Gleixner /* 110e64c8aa0SThomas Gleixner * Don't allow anybody to remap normal RAM that we're using.. 111e64c8aa0SThomas Gleixner */ 112906e36c5SMike Travis /* First check if whole region can be identified as RAM or not */ 113906e36c5SMike Travis ram_region = region_is_ram(phys_addr, size); 114906e36c5SMike Travis if (ram_region > 0) { 115906e36c5SMike Travis WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 116906e36c5SMike Travis (unsigned long int)phys_addr, 117906e36c5SMike Travis (unsigned long int)last_addr); 118906e36c5SMike Travis return NULL; 119906e36c5SMike Travis } 120906e36c5SMike Travis 121906e36c5SMike Travis /* If could not be identified(-1), check page by page */ 122906e36c5SMike Travis if (ram_region < 0) { 123c81c8a1eSRoland Dreier pfn = phys_addr >> PAGE_SHIFT; 124ffa71f33SKenji Kaneshige last_pfn = last_addr >> PAGE_SHIFT; 125c81c8a1eSRoland Dreier if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 126c81c8a1eSRoland Dreier __ioremap_check_ram) == 1) 127e64c8aa0SThomas Gleixner return NULL; 128906e36c5SMike Travis } 129d7677d40Svenkatesh.pallipadi@intel.com /* 130d7677d40Svenkatesh.pallipadi@intel.com * Mappings have to be page-aligned 131d7677d40Svenkatesh.pallipadi@intel.com */ 132d7677d40Svenkatesh.pallipadi@intel.com offset = phys_addr & ~PAGE_MASK; 133ffa71f33SKenji Kaneshige phys_addr &= PHYSICAL_PAGE_MASK; 134d7677d40Svenkatesh.pallipadi@intel.com size = PAGE_ALIGN(last_addr+1) - phys_addr; 135d7677d40Svenkatesh.pallipadi@intel.com 136e213e877SAndi Kleen retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 137dee7cbb2SVenki Pallipadi prot_val, &new_prot_val); 138dee7cbb2SVenki Pallipadi if (retval) { 139279e669bSVenkatesh Pallipadi printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 140dee7cbb2SVenki Pallipadi return NULL; 141dee7cbb2SVenki Pallipadi } 142dee7cbb2SVenki Pallipadi 143dee7cbb2SVenki Pallipadi if (prot_val != new_prot_val) { 144b855192cSH. Peter Anvin if (!is_new_memtype_allowed(phys_addr, size, 145b855192cSH. Peter Anvin prot_val, new_prot_val)) { 146279e669bSVenkatesh Pallipadi printk(KERN_ERR 1476997ab49Svenkatesh.pallipadi@intel.com "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 1484c8337acSRandy Dunlap (unsigned long long)phys_addr, 1494c8337acSRandy Dunlap (unsigned long long)(phys_addr + size), 1506997ab49Svenkatesh.pallipadi@intel.com prot_val, new_prot_val); 151de2a47cfSXiaotian Feng goto err_free_memtype; 152d7677d40Svenkatesh.pallipadi@intel.com } 153d7677d40Svenkatesh.pallipadi@intel.com prot_val = new_prot_val; 154d7677d40Svenkatesh.pallipadi@intel.com } 155d7677d40Svenkatesh.pallipadi@intel.com 1563a96ce8cSvenkatesh.pallipadi@intel.com switch (prot_val) { 1573a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_UC: 158d806e5eeSThomas Gleixner default: 159be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO_NOCACHE; 160d806e5eeSThomas Gleixner break; 161de33c442SSuresh Siddha case _PAGE_CACHE_UC_MINUS: 162be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO_UC_MINUS; 163de33c442SSuresh Siddha break; 164b310f381Svenkatesh.pallipadi@intel.com case _PAGE_CACHE_WC: 165be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO_WC; 166b310f381Svenkatesh.pallipadi@intel.com break; 1673a96ce8cSvenkatesh.pallipadi@intel.com case _PAGE_CACHE_WB: 168be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO; 169d806e5eeSThomas Gleixner break; 170d806e5eeSThomas Gleixner } 171e64c8aa0SThomas Gleixner 172e64c8aa0SThomas Gleixner /* 173e64c8aa0SThomas Gleixner * Ok, go for it.. 174e64c8aa0SThomas Gleixner */ 17523016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, caller); 176e64c8aa0SThomas Gleixner if (!area) 177de2a47cfSXiaotian Feng goto err_free_memtype; 178e64c8aa0SThomas Gleixner area->phys_addr = phys_addr; 179e66aadbeSThomas Gleixner vaddr = (unsigned long) area->addr; 18043a432b1SSuresh Siddha 181de2a47cfSXiaotian Feng if (kernel_map_sync_memtype(phys_addr, size, prot_val)) 182de2a47cfSXiaotian Feng goto err_free_area; 183e64c8aa0SThomas Gleixner 184de2a47cfSXiaotian Feng if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 185de2a47cfSXiaotian Feng goto err_free_area; 186e64c8aa0SThomas Gleixner 187d61fc448SPekka Paalanen ret_addr = (void __iomem *) (vaddr + offset); 18887e547feSPekka Paalanen mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 189d61fc448SPekka Paalanen 190c7a7b814STim Gardner /* 191c7a7b814STim Gardner * Check if the request spans more than any BAR in the iomem resource 192c7a7b814STim Gardner * tree. 193c7a7b814STim Gardner */ 194c7a7b814STim Gardner WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), 195c7a7b814STim Gardner KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); 196c7a7b814STim Gardner 197d61fc448SPekka Paalanen return ret_addr; 198de2a47cfSXiaotian Feng err_free_area: 199de2a47cfSXiaotian Feng free_vm_area(area); 200de2a47cfSXiaotian Feng err_free_memtype: 201de2a47cfSXiaotian Feng free_memtype(phys_addr, phys_addr + size); 202de2a47cfSXiaotian Feng return NULL; 203e64c8aa0SThomas Gleixner } 204e64c8aa0SThomas Gleixner 205e64c8aa0SThomas Gleixner /** 206e64c8aa0SThomas Gleixner * ioremap_nocache - map bus memory into CPU space 2079efc31b8SWanpeng Li * @phys_addr: bus address of the memory 208e64c8aa0SThomas Gleixner * @size: size of the resource to map 209e64c8aa0SThomas Gleixner * 210e64c8aa0SThomas Gleixner * ioremap_nocache performs a platform specific sequence of operations to 211e64c8aa0SThomas Gleixner * make bus memory CPU accessible via the readb/readw/readl/writeb/ 212e64c8aa0SThomas Gleixner * writew/writel functions and the other mmio helpers. The returned 213e64c8aa0SThomas Gleixner * address is not guaranteed to be usable directly as a virtual 214e64c8aa0SThomas Gleixner * address. 215e64c8aa0SThomas Gleixner * 216e64c8aa0SThomas Gleixner * This version of ioremap ensures that the memory is marked uncachable 217e64c8aa0SThomas Gleixner * on the CPU as well as honouring existing caching rules from things like 218e64c8aa0SThomas Gleixner * the PCI bus. Note that there are other caches and buffers on many 219e64c8aa0SThomas Gleixner * busses. In particular driver authors should read up on PCI writes 220e64c8aa0SThomas Gleixner * 221e64c8aa0SThomas Gleixner * It's useful if some control registers are in such an area and 222e64c8aa0SThomas Gleixner * write combining or read caching is not desirable: 223e64c8aa0SThomas Gleixner * 224e64c8aa0SThomas Gleixner * Must be freed with iounmap. 225e64c8aa0SThomas Gleixner */ 226b9e76a00SLinus Torvalds void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) 227e64c8aa0SThomas Gleixner { 228de33c442SSuresh Siddha /* 229de33c442SSuresh Siddha * Ideally, this should be: 230499f8f84SAndreas Herrmann * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; 231de33c442SSuresh Siddha * 232de33c442SSuresh Siddha * Till we fix all X drivers to use ioremap_wc(), we will use 233de33c442SSuresh Siddha * UC MINUS. 234de33c442SSuresh Siddha */ 235de33c442SSuresh Siddha unsigned long val = _PAGE_CACHE_UC_MINUS; 236de33c442SSuresh Siddha 237de33c442SSuresh Siddha return __ioremap_caller(phys_addr, size, val, 23823016969SChristoph Lameter __builtin_return_address(0)); 239e64c8aa0SThomas Gleixner } 240e64c8aa0SThomas Gleixner EXPORT_SYMBOL(ioremap_nocache); 241e64c8aa0SThomas Gleixner 242b310f381Svenkatesh.pallipadi@intel.com /** 243b310f381Svenkatesh.pallipadi@intel.com * ioremap_wc - map memory into CPU space write combined 2449efc31b8SWanpeng Li * @phys_addr: bus address of the memory 245b310f381Svenkatesh.pallipadi@intel.com * @size: size of the resource to map 246b310f381Svenkatesh.pallipadi@intel.com * 247b310f381Svenkatesh.pallipadi@intel.com * This version of ioremap ensures that the memory is marked write combining. 248b310f381Svenkatesh.pallipadi@intel.com * Write combining allows faster writes to some hardware devices. 249b310f381Svenkatesh.pallipadi@intel.com * 250b310f381Svenkatesh.pallipadi@intel.com * Must be freed with iounmap. 251b310f381Svenkatesh.pallipadi@intel.com */ 252d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 253b310f381Svenkatesh.pallipadi@intel.com { 254499f8f84SAndreas Herrmann if (pat_enabled) 25523016969SChristoph Lameter return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, 25623016969SChristoph Lameter __builtin_return_address(0)); 257b310f381Svenkatesh.pallipadi@intel.com else 258b310f381Svenkatesh.pallipadi@intel.com return ioremap_nocache(phys_addr, size); 259b310f381Svenkatesh.pallipadi@intel.com } 260b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc); 261b310f381Svenkatesh.pallipadi@intel.com 262b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 2635f868152SThomas Gleixner { 26423016969SChristoph Lameter return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, 26523016969SChristoph Lameter __builtin_return_address(0)); 2665f868152SThomas Gleixner } 2675f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache); 2685f868152SThomas Gleixner 26928b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 27028b2ee20SRik van Riel unsigned long prot_val) 27128b2ee20SRik van Riel { 27228b2ee20SRik van Riel return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), 27328b2ee20SRik van Riel __builtin_return_address(0)); 27428b2ee20SRik van Riel } 27528b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot); 27628b2ee20SRik van Riel 277e64c8aa0SThomas Gleixner /** 278e64c8aa0SThomas Gleixner * iounmap - Free a IO remapping 279e64c8aa0SThomas Gleixner * @addr: virtual address from ioremap_* 280e64c8aa0SThomas Gleixner * 281e64c8aa0SThomas Gleixner * Caller must ensure there is only one unmapping for the same pointer. 282e64c8aa0SThomas Gleixner */ 283e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr) 284e64c8aa0SThomas Gleixner { 285e64c8aa0SThomas Gleixner struct vm_struct *p, *o; 286e64c8aa0SThomas Gleixner 287e64c8aa0SThomas Gleixner if ((void __force *)addr <= high_memory) 288e64c8aa0SThomas Gleixner return; 289e64c8aa0SThomas Gleixner 290e64c8aa0SThomas Gleixner /* 291e64c8aa0SThomas Gleixner * __ioremap special-cases the PCI/ISA range by not instantiating a 292e64c8aa0SThomas Gleixner * vm_area and by simply returning an address into the kernel mapping 293e64c8aa0SThomas Gleixner * of ISA space. So handle that here. 294e64c8aa0SThomas Gleixner */ 2956e92a5a6SThomas Gleixner if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && 2966e92a5a6SThomas Gleixner (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) 297e64c8aa0SThomas Gleixner return; 298e64c8aa0SThomas Gleixner 299e64c8aa0SThomas Gleixner addr = (volatile void __iomem *) 300e64c8aa0SThomas Gleixner (PAGE_MASK & (unsigned long __force)addr); 301e64c8aa0SThomas Gleixner 302d61fc448SPekka Paalanen mmiotrace_iounmap(addr); 303d61fc448SPekka Paalanen 304e64c8aa0SThomas Gleixner /* Use the vm area unlocked, assuming the caller 305e64c8aa0SThomas Gleixner ensures there isn't another iounmap for the same address 306e64c8aa0SThomas Gleixner in parallel. Reuse of the virtual address is prevented by 307e64c8aa0SThomas Gleixner leaving it in the global lists until we're done with it. 308e64c8aa0SThomas Gleixner cpa takes care of the direct mappings. */ 309ef932473SJoonsoo Kim p = find_vm_area((void __force *)addr); 310e64c8aa0SThomas Gleixner 311e64c8aa0SThomas Gleixner if (!p) { 312e64c8aa0SThomas Gleixner printk(KERN_ERR "iounmap: bad address %p\n", addr); 313e64c8aa0SThomas Gleixner dump_stack(); 314e64c8aa0SThomas Gleixner return; 315e64c8aa0SThomas Gleixner } 316e64c8aa0SThomas Gleixner 317d7677d40Svenkatesh.pallipadi@intel.com free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 318d7677d40Svenkatesh.pallipadi@intel.com 319e64c8aa0SThomas Gleixner /* Finally remove it */ 3206e92a5a6SThomas Gleixner o = remove_vm_area((void __force *)addr); 321e64c8aa0SThomas Gleixner BUG_ON(p != o || o == NULL); 322e64c8aa0SThomas Gleixner kfree(p); 323e64c8aa0SThomas Gleixner } 324e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap); 325e64c8aa0SThomas Gleixner 326e045fb2aSvenkatesh.pallipadi@intel.com /* 327e045fb2aSvenkatesh.pallipadi@intel.com * Convert a physical pointer to a virtual kernel pointer for /dev/mem 328e045fb2aSvenkatesh.pallipadi@intel.com * access 329e045fb2aSvenkatesh.pallipadi@intel.com */ 330*4707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys) 331e045fb2aSvenkatesh.pallipadi@intel.com { 332e045fb2aSvenkatesh.pallipadi@intel.com void *addr; 333e045fb2aSvenkatesh.pallipadi@intel.com unsigned long start = phys & PAGE_MASK; 334e045fb2aSvenkatesh.pallipadi@intel.com 335e045fb2aSvenkatesh.pallipadi@intel.com /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ 336e045fb2aSvenkatesh.pallipadi@intel.com if (page_is_ram(start >> PAGE_SHIFT)) 337e045fb2aSvenkatesh.pallipadi@intel.com return __va(phys); 338e045fb2aSvenkatesh.pallipadi@intel.com 3392fb8f4e6SXiaotian Feng addr = (void __force *)ioremap_cache(start, PAGE_SIZE); 340e045fb2aSvenkatesh.pallipadi@intel.com if (addr) 341e045fb2aSvenkatesh.pallipadi@intel.com addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); 342e045fb2aSvenkatesh.pallipadi@intel.com 343e045fb2aSvenkatesh.pallipadi@intel.com return addr; 344e045fb2aSvenkatesh.pallipadi@intel.com } 345e045fb2aSvenkatesh.pallipadi@intel.com 346*4707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 347e045fb2aSvenkatesh.pallipadi@intel.com { 348e045fb2aSvenkatesh.pallipadi@intel.com if (page_is_ram(phys >> PAGE_SHIFT)) 349e045fb2aSvenkatesh.pallipadi@intel.com return; 350e045fb2aSvenkatesh.pallipadi@intel.com 351e045fb2aSvenkatesh.pallipadi@intel.com iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); 352e045fb2aSvenkatesh.pallipadi@intel.com return; 353e045fb2aSvenkatesh.pallipadi@intel.com } 354e045fb2aSvenkatesh.pallipadi@intel.com 35545c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 356e64c8aa0SThomas Gleixner 357551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 358e64c8aa0SThomas Gleixner { 35937cc8d7fSJeremy Fitzhardinge /* Don't assume we're using swapper_pg_dir at this point */ 36037cc8d7fSJeremy Fitzhardinge pgd_t *base = __va(read_cr3()); 36137cc8d7fSJeremy Fitzhardinge pgd_t *pgd = &base[pgd_index(addr)]; 362551889a6SIan Campbell pud_t *pud = pud_offset(pgd, addr); 363551889a6SIan Campbell pmd_t *pmd = pmd_offset(pud, addr); 364551889a6SIan Campbell 365551889a6SIan Campbell return pmd; 366e64c8aa0SThomas Gleixner } 367e64c8aa0SThomas Gleixner 368551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr) 369e64c8aa0SThomas Gleixner { 370551889a6SIan Campbell return &bm_pte[pte_index(addr)]; 371e64c8aa0SThomas Gleixner } 372e64c8aa0SThomas Gleixner 373fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep) 374fef5ba79SJeremy Fitzhardinge { 375fef5ba79SJeremy Fitzhardinge return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; 376fef5ba79SJeremy Fitzhardinge } 377fef5ba79SJeremy Fitzhardinge 378e64c8aa0SThomas Gleixner void __init early_ioremap_init(void) 379e64c8aa0SThomas Gleixner { 380551889a6SIan Campbell pmd_t *pmd; 381e64c8aa0SThomas Gleixner 38273159fdcSAndy Lutomirski #ifdef CONFIG_X86_64 38373159fdcSAndy Lutomirski BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 38473159fdcSAndy Lutomirski #else 38573159fdcSAndy Lutomirski WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 38673159fdcSAndy Lutomirski #endif 38773159fdcSAndy Lutomirski 3885b7c73e0SMark Salter early_ioremap_setup(); 3898827247fSWang Chen 390551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 391e64c8aa0SThomas Gleixner memset(bm_pte, 0, sizeof(bm_pte)); 392b6fbb669SIan Campbell pmd_populate_kernel(&init_mm, pmd, bm_pte); 393551889a6SIan Campbell 394e64c8aa0SThomas Gleixner /* 395551889a6SIan Campbell * The boot-ioremap range spans multiple pmds, for which 396e64c8aa0SThomas Gleixner * we are not prepared: 397e64c8aa0SThomas Gleixner */ 398499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE) 399499a5f1eSJan Beulich BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 400499a5f1eSJan Beulich != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 401499a5f1eSJan Beulich #undef __FIXADDR_TOP 402551889a6SIan Campbell if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 403e64c8aa0SThomas Gleixner WARN_ON(1); 404551889a6SIan Campbell printk(KERN_WARNING "pmd %p != %p\n", 405551889a6SIan Campbell pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 406e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 407e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_BEGIN)); 408e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 409e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_END)); 410e64c8aa0SThomas Gleixner 411e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 412e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 413e64c8aa0SThomas Gleixner FIX_BTMAP_BEGIN); 414e64c8aa0SThomas Gleixner } 415e64c8aa0SThomas Gleixner } 416e64c8aa0SThomas Gleixner 4175b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx, 4189b987aebSMasami Hiramatsu phys_addr_t phys, pgprot_t flags) 419e64c8aa0SThomas Gleixner { 420551889a6SIan Campbell unsigned long addr = __fix_to_virt(idx); 421551889a6SIan Campbell pte_t *pte; 422e64c8aa0SThomas Gleixner 423e64c8aa0SThomas Gleixner if (idx >= __end_of_fixed_addresses) { 424e64c8aa0SThomas Gleixner BUG(); 425e64c8aa0SThomas Gleixner return; 426e64c8aa0SThomas Gleixner } 427e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr); 4284583ed51SJeremy Fitzhardinge 429e64c8aa0SThomas Gleixner if (pgprot_val(flags)) 430551889a6SIan Campbell set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 431e64c8aa0SThomas Gleixner else 4324f9c11ddSJeremy Fitzhardinge pte_clear(&init_mm, addr, pte); 433e64c8aa0SThomas Gleixner __flush_tlb_one(addr); 434e64c8aa0SThomas Gleixner } 435