1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e64c8aa0SThomas Gleixner /* 3e64c8aa0SThomas Gleixner * Re-map IO memory to kernel address space so that we can access it. 4e64c8aa0SThomas Gleixner * This is needed for high PCI addresses that aren't mapped in the 5e64c8aa0SThomas Gleixner * 640k-1MB IO memory area on PC's 6e64c8aa0SThomas Gleixner * 7e64c8aa0SThomas Gleixner * (C) Copyright 1995 1996 Linus Torvalds 8e64c8aa0SThomas Gleixner */ 9e64c8aa0SThomas Gleixner 1057c8a661SMike Rapoport #include <linux/memblock.h> 11e64c8aa0SThomas Gleixner #include <linux/init.h> 12e64c8aa0SThomas Gleixner #include <linux/io.h> 139de94dbbSIngo Molnar #include <linux/ioport.h> 14e64c8aa0SThomas Gleixner #include <linux/slab.h> 15e64c8aa0SThomas Gleixner #include <linux/vmalloc.h> 16d61fc448SPekka Paalanen #include <linux/mmiotrace.h> 178f716c9bSTom Lendacky #include <linux/mem_encrypt.h> 188f716c9bSTom Lendacky #include <linux/efi.h> 19e64c8aa0SThomas Gleixner 20d1163651SLaura Abbott #include <asm/set_memory.h> 2166441bd3SIngo Molnar #include <asm/e820/api.h> 22e55f31a5SArd Biesheuvel #include <asm/efi.h> 23e64c8aa0SThomas Gleixner #include <asm/fixmap.h> 24e64c8aa0SThomas Gleixner #include <asm/pgtable.h> 25e64c8aa0SThomas Gleixner #include <asm/tlbflush.h> 26f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h> 27eb243d1dSIngo Molnar #include <asm/memtype.h> 288f716c9bSTom Lendacky #include <asm/setup.h> 29e64c8aa0SThomas Gleixner 3078c86e5eSJeremy Fitzhardinge #include "physaddr.h" 31e64c8aa0SThomas Gleixner 325da04cc8SLianbo Jiang /* 335da04cc8SLianbo Jiang * Descriptor controlling ioremap() behavior. 345da04cc8SLianbo Jiang */ 355da04cc8SLianbo Jiang struct ioremap_desc { 365da04cc8SLianbo Jiang unsigned int flags; 370e4c12b4STom Lendacky }; 380e4c12b4STom Lendacky 39e64c8aa0SThomas Gleixner /* 40e64c8aa0SThomas Gleixner * Fix up the linear direct mapping of the kernel to avoid cache attribute 41e64c8aa0SThomas Gleixner * conflicts. 42e64c8aa0SThomas Gleixner */ 433a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size, 44b14097bdSJuergen Gross enum page_cache_mode pcm) 45e64c8aa0SThomas Gleixner { 46d806e5eeSThomas Gleixner unsigned long nrpages = size >> PAGE_SHIFT; 4793809be8SHarvey Harrison int err; 48e64c8aa0SThomas Gleixner 49b14097bdSJuergen Gross switch (pcm) { 50b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC: 51d806e5eeSThomas Gleixner default: 521219333dSvenkatesh.pallipadi@intel.com err = _set_memory_uc(vaddr, nrpages); 53d806e5eeSThomas Gleixner break; 54b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WC: 55b310f381Svenkatesh.pallipadi@intel.com err = _set_memory_wc(vaddr, nrpages); 56b310f381Svenkatesh.pallipadi@intel.com break; 57623dffb2SToshi Kani case _PAGE_CACHE_MODE_WT: 58623dffb2SToshi Kani err = _set_memory_wt(vaddr, nrpages); 59623dffb2SToshi Kani break; 60b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WB: 611219333dSvenkatesh.pallipadi@intel.com err = _set_memory_wb(vaddr, nrpages); 62d806e5eeSThomas Gleixner break; 63d806e5eeSThomas Gleixner } 64e64c8aa0SThomas Gleixner 65e64c8aa0SThomas Gleixner return err; 66e64c8aa0SThomas Gleixner } 67e64c8aa0SThomas Gleixner 685da04cc8SLianbo Jiang /* Does the range (or a subset of) contain normal RAM? */ 695da04cc8SLianbo Jiang static unsigned int __ioremap_check_ram(struct resource *res) 70c81c8a1eSRoland Dreier { 710e4c12b4STom Lendacky unsigned long start_pfn, stop_pfn; 72c81c8a1eSRoland Dreier unsigned long i; 73c81c8a1eSRoland Dreier 740e4c12b4STom Lendacky if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) 755da04cc8SLianbo Jiang return 0; 760e4c12b4STom Lendacky 770e4c12b4STom Lendacky start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; 780e4c12b4STom Lendacky stop_pfn = (res->end + 1) >> PAGE_SHIFT; 790e4c12b4STom Lendacky if (stop_pfn > start_pfn) { 800e4c12b4STom Lendacky for (i = 0; i < (stop_pfn - start_pfn); ++i) 81c81c8a1eSRoland Dreier if (pfn_valid(start_pfn + i) && 82c81c8a1eSRoland Dreier !PageReserved(pfn_to_page(start_pfn + i))) 835da04cc8SLianbo Jiang return IORES_MAP_SYSTEM_RAM; 840e4c12b4STom Lendacky } 85c81c8a1eSRoland Dreier 865da04cc8SLianbo Jiang return 0; 870e4c12b4STom Lendacky } 880e4c12b4STom Lendacky 895da04cc8SLianbo Jiang /* 905da04cc8SLianbo Jiang * In a SEV guest, NONE and RESERVED should not be mapped encrypted because 915da04cc8SLianbo Jiang * there the whole memory is already encrypted. 925da04cc8SLianbo Jiang */ 935da04cc8SLianbo Jiang static unsigned int __ioremap_check_encrypted(struct resource *res) 940e4c12b4STom Lendacky { 955da04cc8SLianbo Jiang if (!sev_active()) 965da04cc8SLianbo Jiang return 0; 975da04cc8SLianbo Jiang 985da04cc8SLianbo Jiang switch (res->desc) { 995da04cc8SLianbo Jiang case IORES_DESC_NONE: 1005da04cc8SLianbo Jiang case IORES_DESC_RESERVED: 1015da04cc8SLianbo Jiang break; 1025da04cc8SLianbo Jiang default: 1035da04cc8SLianbo Jiang return IORES_MAP_ENCRYPTED; 1040e4c12b4STom Lendacky } 1050e4c12b4STom Lendacky 1065da04cc8SLianbo Jiang return 0; 1075da04cc8SLianbo Jiang } 1085da04cc8SLianbo Jiang 109985e537aSTom Lendacky /* 110985e537aSTom Lendacky * The EFI runtime services data area is not covered by walk_mem_res(), but must 111985e537aSTom Lendacky * be mapped encrypted when SEV is active. 112985e537aSTom Lendacky */ 113985e537aSTom Lendacky static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) 114985e537aSTom Lendacky { 115985e537aSTom Lendacky if (!sev_active()) 116985e537aSTom Lendacky return; 117985e537aSTom Lendacky 118*870b4333SBorislav Petkov if (!IS_ENABLED(CONFIG_EFI)) 119*870b4333SBorislav Petkov return; 120*870b4333SBorislav Petkov 121985e537aSTom Lendacky if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA) 122985e537aSTom Lendacky desc->flags |= IORES_MAP_ENCRYPTED; 123985e537aSTom Lendacky } 124985e537aSTom Lendacky 1255da04cc8SLianbo Jiang static int __ioremap_collect_map_flags(struct resource *res, void *arg) 1260e4c12b4STom Lendacky { 1275da04cc8SLianbo Jiang struct ioremap_desc *desc = arg; 1280e4c12b4STom Lendacky 1295da04cc8SLianbo Jiang if (!(desc->flags & IORES_MAP_SYSTEM_RAM)) 1305da04cc8SLianbo Jiang desc->flags |= __ioremap_check_ram(res); 1310e4c12b4STom Lendacky 1325da04cc8SLianbo Jiang if (!(desc->flags & IORES_MAP_ENCRYPTED)) 1335da04cc8SLianbo Jiang desc->flags |= __ioremap_check_encrypted(res); 1340e4c12b4STom Lendacky 1355da04cc8SLianbo Jiang return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) == 1365da04cc8SLianbo Jiang (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)); 1370e4c12b4STom Lendacky } 1380e4c12b4STom Lendacky 1390e4c12b4STom Lendacky /* 1400e4c12b4STom Lendacky * To avoid multiple resource walks, this function walks resources marked as 1410e4c12b4STom Lendacky * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a 1420e4c12b4STom Lendacky * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). 143985e537aSTom Lendacky * 144985e537aSTom Lendacky * After that, deal with misc other ranges in __ioremap_check_other() which do 145985e537aSTom Lendacky * not fall into the above category. 1460e4c12b4STom Lendacky */ 1470e4c12b4STom Lendacky static void __ioremap_check_mem(resource_size_t addr, unsigned long size, 1485da04cc8SLianbo Jiang struct ioremap_desc *desc) 1490e4c12b4STom Lendacky { 1500e4c12b4STom Lendacky u64 start, end; 1510e4c12b4STom Lendacky 1520e4c12b4STom Lendacky start = (u64)addr; 1530e4c12b4STom Lendacky end = start + size - 1; 1545da04cc8SLianbo Jiang memset(desc, 0, sizeof(struct ioremap_desc)); 1550e4c12b4STom Lendacky 1565da04cc8SLianbo Jiang walk_mem_res(start, end, desc, __ioremap_collect_map_flags); 157985e537aSTom Lendacky 158985e537aSTom Lendacky __ioremap_check_other(addr, desc); 159c81c8a1eSRoland Dreier } 160c81c8a1eSRoland Dreier 161e64c8aa0SThomas Gleixner /* 162e64c8aa0SThomas Gleixner * Remap an arbitrary physical address space into the kernel virtual 1635d72b4fbSToshi Kani * address space. It transparently creates kernel huge I/O mapping when 1645d72b4fbSToshi Kani * the physical address is aligned by a huge page size (1GB or 2MB) and 1655d72b4fbSToshi Kani * the requested size is at least the huge page size. 1665d72b4fbSToshi Kani * 1675d72b4fbSToshi Kani * NOTE: MTRRs can override PAT memory types with a 4KB granularity. 1685d72b4fbSToshi Kani * Therefore, the mapping code falls back to use a smaller page toward 4KB 1695d72b4fbSToshi Kani * when a mapping range is covered by non-WB type of MTRRs. 170e64c8aa0SThomas Gleixner * 171e64c8aa0SThomas Gleixner * NOTE! We need to allow non-page-aligned mappings too: we will obviously 172e64c8aa0SThomas Gleixner * have to convert them into an offset in a page-aligned mapping, but the 173e64c8aa0SThomas Gleixner * caller shouldn't need to know that small detail. 174e64c8aa0SThomas Gleixner */ 1755da04cc8SLianbo Jiang static void __iomem * 1765da04cc8SLianbo Jiang __ioremap_caller(resource_size_t phys_addr, unsigned long size, 1775da04cc8SLianbo Jiang enum page_cache_mode pcm, void *caller, bool encrypted) 178e64c8aa0SThomas Gleixner { 179ffa71f33SKenji Kaneshige unsigned long offset, vaddr; 1800e4c12b4STom Lendacky resource_size_t last_addr; 18187e547feSPekka Paalanen const resource_size_t unaligned_phys_addr = phys_addr; 18287e547feSPekka Paalanen const unsigned long unaligned_size = size; 1835da04cc8SLianbo Jiang struct ioremap_desc io_desc; 184e64c8aa0SThomas Gleixner struct vm_struct *area; 185b14097bdSJuergen Gross enum page_cache_mode new_pcm; 186d806e5eeSThomas Gleixner pgprot_t prot; 187dee7cbb2SVenki Pallipadi int retval; 188d61fc448SPekka Paalanen void __iomem *ret_addr; 189e64c8aa0SThomas Gleixner 190e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */ 191e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1; 192e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr) 193e64c8aa0SThomas Gleixner return NULL; 194e64c8aa0SThomas Gleixner 195e3100c82SThomas Gleixner if (!phys_addr_valid(phys_addr)) { 1966997ab49Svenkatesh.pallipadi@intel.com printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 1974c8337acSRandy Dunlap (unsigned long long)phys_addr); 198e3100c82SThomas Gleixner WARN_ON_ONCE(1); 199e3100c82SThomas Gleixner return NULL; 200e3100c82SThomas Gleixner } 201e3100c82SThomas Gleixner 2025da04cc8SLianbo Jiang __ioremap_check_mem(phys_addr, size, &io_desc); 2030e4c12b4STom Lendacky 204e64c8aa0SThomas Gleixner /* 205e64c8aa0SThomas Gleixner * Don't allow anybody to remap normal RAM that we're using.. 206e64c8aa0SThomas Gleixner */ 2075da04cc8SLianbo Jiang if (io_desc.flags & IORES_MAP_SYSTEM_RAM) { 2088a0a5da6SThomas Gleixner WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 2098a0a5da6SThomas Gleixner &phys_addr, &last_addr); 210e64c8aa0SThomas Gleixner return NULL; 211906e36c5SMike Travis } 2129a58eebeSToshi Kani 213d7677d40Svenkatesh.pallipadi@intel.com /* 214d7677d40Svenkatesh.pallipadi@intel.com * Mappings have to be page-aligned 215d7677d40Svenkatesh.pallipadi@intel.com */ 216d7677d40Svenkatesh.pallipadi@intel.com offset = phys_addr & ~PAGE_MASK; 217ffa71f33SKenji Kaneshige phys_addr &= PHYSICAL_PAGE_MASK; 218d7677d40Svenkatesh.pallipadi@intel.com size = PAGE_ALIGN(last_addr+1) - phys_addr; 219d7677d40Svenkatesh.pallipadi@intel.com 220ecdd6ee7SIngo Molnar retval = memtype_reserve(phys_addr, (u64)phys_addr + size, 221e00c8cc9SJuergen Gross pcm, &new_pcm); 222dee7cbb2SVenki Pallipadi if (retval) { 223ecdd6ee7SIngo Molnar printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval); 224dee7cbb2SVenki Pallipadi return NULL; 225dee7cbb2SVenki Pallipadi } 226dee7cbb2SVenki Pallipadi 227b14097bdSJuergen Gross if (pcm != new_pcm) { 228b14097bdSJuergen Gross if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { 229279e669bSVenkatesh Pallipadi printk(KERN_ERR 230b14097bdSJuergen Gross "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", 2314c8337acSRandy Dunlap (unsigned long long)phys_addr, 2324c8337acSRandy Dunlap (unsigned long long)(phys_addr + size), 233b14097bdSJuergen Gross pcm, new_pcm); 234de2a47cfSXiaotian Feng goto err_free_memtype; 235d7677d40Svenkatesh.pallipadi@intel.com } 236b14097bdSJuergen Gross pcm = new_pcm; 237d7677d40Svenkatesh.pallipadi@intel.com } 238d7677d40Svenkatesh.pallipadi@intel.com 2390e4c12b4STom Lendacky /* 2400e4c12b4STom Lendacky * If the page being mapped is in memory and SEV is active then 2410e4c12b4STom Lendacky * make sure the memory encryption attribute is enabled in the 2420e4c12b4STom Lendacky * resulting mapping. 2430e4c12b4STom Lendacky */ 244be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO; 2455da04cc8SLianbo Jiang if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted) 2460e4c12b4STom Lendacky prot = pgprot_encrypted(prot); 2470e4c12b4STom Lendacky 248b14097bdSJuergen Gross switch (pcm) { 249b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC: 250b14097bdSJuergen Gross default: 251b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) | 252b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_UC)); 253b14097bdSJuergen Gross break; 254b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC_MINUS: 255b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) | 256b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); 257b14097bdSJuergen Gross break; 258b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WC: 259b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) | 260b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_WC)); 261b14097bdSJuergen Gross break; 262d838270eSToshi Kani case _PAGE_CACHE_MODE_WT: 263d838270eSToshi Kani prot = __pgprot(pgprot_val(prot) | 264d838270eSToshi Kani cachemode2protval(_PAGE_CACHE_MODE_WT)); 265d838270eSToshi Kani break; 266b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WB: 267d806e5eeSThomas Gleixner break; 268d806e5eeSThomas Gleixner } 269e64c8aa0SThomas Gleixner 270e64c8aa0SThomas Gleixner /* 271e64c8aa0SThomas Gleixner * Ok, go for it.. 272e64c8aa0SThomas Gleixner */ 27323016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, caller); 274e64c8aa0SThomas Gleixner if (!area) 275de2a47cfSXiaotian Feng goto err_free_memtype; 276e64c8aa0SThomas Gleixner area->phys_addr = phys_addr; 277e66aadbeSThomas Gleixner vaddr = (unsigned long) area->addr; 27843a432b1SSuresh Siddha 279ecdd6ee7SIngo Molnar if (memtype_kernel_map_sync(phys_addr, size, pcm)) 280de2a47cfSXiaotian Feng goto err_free_area; 281e64c8aa0SThomas Gleixner 282de2a47cfSXiaotian Feng if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 283de2a47cfSXiaotian Feng goto err_free_area; 284e64c8aa0SThomas Gleixner 285d61fc448SPekka Paalanen ret_addr = (void __iomem *) (vaddr + offset); 28687e547feSPekka Paalanen mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 287d61fc448SPekka Paalanen 288c7a7b814STim Gardner /* 289c7a7b814STim Gardner * Check if the request spans more than any BAR in the iomem resource 290c7a7b814STim Gardner * tree. 291c7a7b814STim Gardner */ 2929abb0ecdSLaura Abbott if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size)) 2939abb0ecdSLaura Abbott pr_warn("caller %pS mapping multiple BARs\n", caller); 294c7a7b814STim Gardner 295d61fc448SPekka Paalanen return ret_addr; 296de2a47cfSXiaotian Feng err_free_area: 297de2a47cfSXiaotian Feng free_vm_area(area); 298de2a47cfSXiaotian Feng err_free_memtype: 299ecdd6ee7SIngo Molnar memtype_free(phys_addr, phys_addr + size); 300de2a47cfSXiaotian Feng return NULL; 301e64c8aa0SThomas Gleixner } 302e64c8aa0SThomas Gleixner 303e64c8aa0SThomas Gleixner /** 304c0d94aa5SChristoph Hellwig * ioremap - map bus memory into CPU space 3059efc31b8SWanpeng Li * @phys_addr: bus address of the memory 306e64c8aa0SThomas Gleixner * @size: size of the resource to map 307e64c8aa0SThomas Gleixner * 308c0d94aa5SChristoph Hellwig * ioremap performs a platform specific sequence of operations to 309e64c8aa0SThomas Gleixner * make bus memory CPU accessible via the readb/readw/readl/writeb/ 310e64c8aa0SThomas Gleixner * writew/writel functions and the other mmio helpers. The returned 311e64c8aa0SThomas Gleixner * address is not guaranteed to be usable directly as a virtual 312e64c8aa0SThomas Gleixner * address. 313e64c8aa0SThomas Gleixner * 314e64c8aa0SThomas Gleixner * This version of ioremap ensures that the memory is marked uncachable 315e64c8aa0SThomas Gleixner * on the CPU as well as honouring existing caching rules from things like 316e64c8aa0SThomas Gleixner * the PCI bus. Note that there are other caches and buffers on many 317e64c8aa0SThomas Gleixner * busses. In particular driver authors should read up on PCI writes 318e64c8aa0SThomas Gleixner * 319e64c8aa0SThomas Gleixner * It's useful if some control registers are in such an area and 320e64c8aa0SThomas Gleixner * write combining or read caching is not desirable: 321e64c8aa0SThomas Gleixner * 322e64c8aa0SThomas Gleixner * Must be freed with iounmap. 323e64c8aa0SThomas Gleixner */ 324c0d94aa5SChristoph Hellwig void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) 325e64c8aa0SThomas Gleixner { 326de33c442SSuresh Siddha /* 327de33c442SSuresh Siddha * Ideally, this should be: 328cb32edf6SLuis R. Rodriguez * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; 329de33c442SSuresh Siddha * 330de33c442SSuresh Siddha * Till we fix all X drivers to use ioremap_wc(), we will use 331e4b6be33SLuis R. Rodriguez * UC MINUS. Drivers that are certain they need or can already 332e4b6be33SLuis R. Rodriguez * be converted over to strong UC can use ioremap_uc(). 333de33c442SSuresh Siddha */ 334b14097bdSJuergen Gross enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; 335de33c442SSuresh Siddha 336b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, pcm, 337c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 338e64c8aa0SThomas Gleixner } 339c0d94aa5SChristoph Hellwig EXPORT_SYMBOL(ioremap); 340e64c8aa0SThomas Gleixner 341b310f381Svenkatesh.pallipadi@intel.com /** 342e4b6be33SLuis R. Rodriguez * ioremap_uc - map bus memory into CPU space as strongly uncachable 343e4b6be33SLuis R. Rodriguez * @phys_addr: bus address of the memory 344e4b6be33SLuis R. Rodriguez * @size: size of the resource to map 345e4b6be33SLuis R. Rodriguez * 346e4b6be33SLuis R. Rodriguez * ioremap_uc performs a platform specific sequence of operations to 347e4b6be33SLuis R. Rodriguez * make bus memory CPU accessible via the readb/readw/readl/writeb/ 348e4b6be33SLuis R. Rodriguez * writew/writel functions and the other mmio helpers. The returned 349e4b6be33SLuis R. Rodriguez * address is not guaranteed to be usable directly as a virtual 350e4b6be33SLuis R. Rodriguez * address. 351e4b6be33SLuis R. Rodriguez * 352e4b6be33SLuis R. Rodriguez * This version of ioremap ensures that the memory is marked with a strong 353e4b6be33SLuis R. Rodriguez * preference as completely uncachable on the CPU when possible. For non-PAT 354e4b6be33SLuis R. Rodriguez * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT 355e4b6be33SLuis R. Rodriguez * systems this will set the PAT entry for the pages as strong UC. This call 356e4b6be33SLuis R. Rodriguez * will honor existing caching rules from things like the PCI bus. Note that 357e4b6be33SLuis R. Rodriguez * there are other caches and buffers on many busses. In particular driver 358e4b6be33SLuis R. Rodriguez * authors should read up on PCI writes. 359e4b6be33SLuis R. Rodriguez * 360e4b6be33SLuis R. Rodriguez * It's useful if some control registers are in such an area and 361e4b6be33SLuis R. Rodriguez * write combining or read caching is not desirable: 362e4b6be33SLuis R. Rodriguez * 363e4b6be33SLuis R. Rodriguez * Must be freed with iounmap. 364e4b6be33SLuis R. Rodriguez */ 365e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) 366e4b6be33SLuis R. Rodriguez { 367e4b6be33SLuis R. Rodriguez enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC; 368e4b6be33SLuis R. Rodriguez 369e4b6be33SLuis R. Rodriguez return __ioremap_caller(phys_addr, size, pcm, 370c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 371e4b6be33SLuis R. Rodriguez } 372e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc); 373e4b6be33SLuis R. Rodriguez 374e4b6be33SLuis R. Rodriguez /** 375b310f381Svenkatesh.pallipadi@intel.com * ioremap_wc - map memory into CPU space write combined 3769efc31b8SWanpeng Li * @phys_addr: bus address of the memory 377b310f381Svenkatesh.pallipadi@intel.com * @size: size of the resource to map 378b310f381Svenkatesh.pallipadi@intel.com * 379b310f381Svenkatesh.pallipadi@intel.com * This version of ioremap ensures that the memory is marked write combining. 380b310f381Svenkatesh.pallipadi@intel.com * Write combining allows faster writes to some hardware devices. 381b310f381Svenkatesh.pallipadi@intel.com * 382b310f381Svenkatesh.pallipadi@intel.com * Must be freed with iounmap. 383b310f381Svenkatesh.pallipadi@intel.com */ 384d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 385b310f381Svenkatesh.pallipadi@intel.com { 386b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, 387c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 388b310f381Svenkatesh.pallipadi@intel.com } 389b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc); 390b310f381Svenkatesh.pallipadi@intel.com 391d838270eSToshi Kani /** 392d838270eSToshi Kani * ioremap_wt - map memory into CPU space write through 393d838270eSToshi Kani * @phys_addr: bus address of the memory 394d838270eSToshi Kani * @size: size of the resource to map 395d838270eSToshi Kani * 396d838270eSToshi Kani * This version of ioremap ensures that the memory is marked write through. 397d838270eSToshi Kani * Write through stores data into memory while keeping the cache up-to-date. 398d838270eSToshi Kani * 399d838270eSToshi Kani * Must be freed with iounmap. 400d838270eSToshi Kani */ 401d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) 402d838270eSToshi Kani { 403d838270eSToshi Kani return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, 404c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 405d838270eSToshi Kani } 406d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt); 407d838270eSToshi Kani 408c3a7a61cSLianbo Jiang void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) 409c3a7a61cSLianbo Jiang { 410c3a7a61cSLianbo Jiang return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, 411c3a7a61cSLianbo Jiang __builtin_return_address(0), true); 412c3a7a61cSLianbo Jiang } 413c3a7a61cSLianbo Jiang EXPORT_SYMBOL(ioremap_encrypted); 414c3a7a61cSLianbo Jiang 415b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 4165f868152SThomas Gleixner { 417b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, 418c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 4195f868152SThomas Gleixner } 4205f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache); 4215f868152SThomas Gleixner 42228b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 42328b2ee20SRik van Riel unsigned long prot_val) 42428b2ee20SRik van Riel { 425b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, 426b14097bdSJuergen Gross pgprot2cachemode(__pgprot(prot_val)), 427c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 42828b2ee20SRik van Riel } 42928b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot); 43028b2ee20SRik van Riel 431e64c8aa0SThomas Gleixner /** 432e64c8aa0SThomas Gleixner * iounmap - Free a IO remapping 433e64c8aa0SThomas Gleixner * @addr: virtual address from ioremap_* 434e64c8aa0SThomas Gleixner * 435e64c8aa0SThomas Gleixner * Caller must ensure there is only one unmapping for the same pointer. 436e64c8aa0SThomas Gleixner */ 437e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr) 438e64c8aa0SThomas Gleixner { 439e64c8aa0SThomas Gleixner struct vm_struct *p, *o; 440e64c8aa0SThomas Gleixner 441e64c8aa0SThomas Gleixner if ((void __force *)addr <= high_memory) 442e64c8aa0SThomas Gleixner return; 443e64c8aa0SThomas Gleixner 444e64c8aa0SThomas Gleixner /* 44533c2b803STom Lendacky * The PCI/ISA range special-casing was removed from __ioremap() 44633c2b803STom Lendacky * so this check, in theory, can be removed. However, there are 44733c2b803STom Lendacky * cases where iounmap() is called for addresses not obtained via 44833c2b803STom Lendacky * ioremap() (vga16fb for example). Add a warning so that these 44933c2b803STom Lendacky * cases can be caught and fixed. 450e64c8aa0SThomas Gleixner */ 4516e92a5a6SThomas Gleixner if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && 45233c2b803STom Lendacky (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) { 45333c2b803STom Lendacky WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n"); 454e64c8aa0SThomas Gleixner return; 45533c2b803STom Lendacky } 456e64c8aa0SThomas Gleixner 4576d60ce38SKarol Herbst mmiotrace_iounmap(addr); 4586d60ce38SKarol Herbst 459e64c8aa0SThomas Gleixner addr = (volatile void __iomem *) 460e64c8aa0SThomas Gleixner (PAGE_MASK & (unsigned long __force)addr); 461e64c8aa0SThomas Gleixner 462e64c8aa0SThomas Gleixner /* Use the vm area unlocked, assuming the caller 463e64c8aa0SThomas Gleixner ensures there isn't another iounmap for the same address 464e64c8aa0SThomas Gleixner in parallel. Reuse of the virtual address is prevented by 465e64c8aa0SThomas Gleixner leaving it in the global lists until we're done with it. 466e64c8aa0SThomas Gleixner cpa takes care of the direct mappings. */ 467ef932473SJoonsoo Kim p = find_vm_area((void __force *)addr); 468e64c8aa0SThomas Gleixner 469e64c8aa0SThomas Gleixner if (!p) { 470e64c8aa0SThomas Gleixner printk(KERN_ERR "iounmap: bad address %p\n", addr); 471e64c8aa0SThomas Gleixner dump_stack(); 472e64c8aa0SThomas Gleixner return; 473e64c8aa0SThomas Gleixner } 474e64c8aa0SThomas Gleixner 475ecdd6ee7SIngo Molnar memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 476d7677d40Svenkatesh.pallipadi@intel.com 477e64c8aa0SThomas Gleixner /* Finally remove it */ 4786e92a5a6SThomas Gleixner o = remove_vm_area((void __force *)addr); 479e64c8aa0SThomas Gleixner BUG_ON(p != o || o == NULL); 480e64c8aa0SThomas Gleixner kfree(p); 481e64c8aa0SThomas Gleixner } 482e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap); 483e64c8aa0SThomas Gleixner 4840f472d04SAnshuman Khandual int __init arch_ioremap_p4d_supported(void) 4850f472d04SAnshuman Khandual { 4860f472d04SAnshuman Khandual return 0; 4870f472d04SAnshuman Khandual } 4880f472d04SAnshuman Khandual 4891e6277deSJan Beulich int __init arch_ioremap_pud_supported(void) 4905d72b4fbSToshi Kani { 4915d72b4fbSToshi Kani #ifdef CONFIG_X86_64 492b8291adcSBorislav Petkov return boot_cpu_has(X86_FEATURE_GBPAGES); 4935d72b4fbSToshi Kani #else 4945d72b4fbSToshi Kani return 0; 4955d72b4fbSToshi Kani #endif 4965d72b4fbSToshi Kani } 4975d72b4fbSToshi Kani 4981e6277deSJan Beulich int __init arch_ioremap_pmd_supported(void) 4995d72b4fbSToshi Kani { 50016bf9226SBorislav Petkov return boot_cpu_has(X86_FEATURE_PSE); 5015d72b4fbSToshi Kani } 5025d72b4fbSToshi Kani 503e045fb2aSvenkatesh.pallipadi@intel.com /* 504e045fb2aSvenkatesh.pallipadi@intel.com * Convert a physical pointer to a virtual kernel pointer for /dev/mem 505e045fb2aSvenkatesh.pallipadi@intel.com * access 506e045fb2aSvenkatesh.pallipadi@intel.com */ 5074707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys) 508e045fb2aSvenkatesh.pallipadi@intel.com { 509e045fb2aSvenkatesh.pallipadi@intel.com unsigned long start = phys & PAGE_MASK; 51094d4b476SIngo Molnar unsigned long offset = phys & ~PAGE_MASK; 511562bfca4SIngo Molnar void *vaddr; 512e045fb2aSvenkatesh.pallipadi@intel.com 5138458bf94STom Lendacky /* memremap() maps if RAM, otherwise falls back to ioremap() */ 5148458bf94STom Lendacky vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); 515e045fb2aSvenkatesh.pallipadi@intel.com 5168458bf94STom Lendacky /* Only add the offset on success and return NULL if memremap() failed */ 51794d4b476SIngo Molnar if (vaddr) 51894d4b476SIngo Molnar vaddr += offset; 519e045fb2aSvenkatesh.pallipadi@intel.com 520562bfca4SIngo Molnar return vaddr; 521e045fb2aSvenkatesh.pallipadi@intel.com } 522e045fb2aSvenkatesh.pallipadi@intel.com 5234707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 524e045fb2aSvenkatesh.pallipadi@intel.com { 5258458bf94STom Lendacky memunmap((void *)((unsigned long)addr & PAGE_MASK)); 526e045fb2aSvenkatesh.pallipadi@intel.com } 527e045fb2aSvenkatesh.pallipadi@intel.com 5288f716c9bSTom Lendacky /* 5298f716c9bSTom Lendacky * Examine the physical address to determine if it is an area of memory 5308f716c9bSTom Lendacky * that should be mapped decrypted. If the memory is not part of the 5318f716c9bSTom Lendacky * kernel usable area it was accessed and created decrypted, so these 5321de32862STom Lendacky * areas should be mapped decrypted. And since the encryption key can 5331de32862STom Lendacky * change across reboots, persistent memory should also be mapped 5341de32862STom Lendacky * decrypted. 535072f58c6STom Lendacky * 536072f58c6STom Lendacky * If SEV is active, that implies that BIOS/UEFI also ran encrypted so 537072f58c6STom Lendacky * only persistent memory should be mapped decrypted. 5388f716c9bSTom Lendacky */ 5398f716c9bSTom Lendacky static bool memremap_should_map_decrypted(resource_size_t phys_addr, 5408f716c9bSTom Lendacky unsigned long size) 5418f716c9bSTom Lendacky { 5421de32862STom Lendacky int is_pmem; 5431de32862STom Lendacky 5441de32862STom Lendacky /* 5451de32862STom Lendacky * Check if the address is part of a persistent memory region. 5461de32862STom Lendacky * This check covers areas added by E820, EFI and ACPI. 5471de32862STom Lendacky */ 5481de32862STom Lendacky is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, 5491de32862STom Lendacky IORES_DESC_PERSISTENT_MEMORY); 5501de32862STom Lendacky if (is_pmem != REGION_DISJOINT) 5511de32862STom Lendacky return true; 5521de32862STom Lendacky 5531de32862STom Lendacky /* 5541de32862STom Lendacky * Check if the non-volatile attribute is set for an EFI 5551de32862STom Lendacky * reserved area. 5561de32862STom Lendacky */ 5571de32862STom Lendacky if (efi_enabled(EFI_BOOT)) { 5581de32862STom Lendacky switch (efi_mem_type(phys_addr)) { 5591de32862STom Lendacky case EFI_RESERVED_TYPE: 5601de32862STom Lendacky if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV) 5611de32862STom Lendacky return true; 5621de32862STom Lendacky break; 5631de32862STom Lendacky default: 5641de32862STom Lendacky break; 5651de32862STom Lendacky } 5661de32862STom Lendacky } 5671de32862STom Lendacky 5688f716c9bSTom Lendacky /* Check if the address is outside kernel usable area */ 5698f716c9bSTom Lendacky switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { 5708f716c9bSTom Lendacky case E820_TYPE_RESERVED: 5718f716c9bSTom Lendacky case E820_TYPE_ACPI: 5728f716c9bSTom Lendacky case E820_TYPE_NVS: 5738f716c9bSTom Lendacky case E820_TYPE_UNUSABLE: 574072f58c6STom Lendacky /* For SEV, these areas are encrypted */ 575072f58c6STom Lendacky if (sev_active()) 576072f58c6STom Lendacky break; 577072f58c6STom Lendacky /* Fallthrough */ 578072f58c6STom Lendacky 5791de32862STom Lendacky case E820_TYPE_PRAM: 5808f716c9bSTom Lendacky return true; 5818f716c9bSTom Lendacky default: 5828f716c9bSTom Lendacky break; 5838f716c9bSTom Lendacky } 5848f716c9bSTom Lendacky 5858f716c9bSTom Lendacky return false; 5868f716c9bSTom Lendacky } 5878f716c9bSTom Lendacky 5888f716c9bSTom Lendacky /* 5898f716c9bSTom Lendacky * Examine the physical address to determine if it is EFI data. Check 5908f716c9bSTom Lendacky * it against the boot params structure and EFI tables and memory types. 5918f716c9bSTom Lendacky */ 5928f716c9bSTom Lendacky static bool memremap_is_efi_data(resource_size_t phys_addr, 5938f716c9bSTom Lendacky unsigned long size) 5948f716c9bSTom Lendacky { 5958f716c9bSTom Lendacky u64 paddr; 5968f716c9bSTom Lendacky 5978f716c9bSTom Lendacky /* Check if the address is part of EFI boot/runtime data */ 5988f716c9bSTom Lendacky if (!efi_enabled(EFI_BOOT)) 5998f716c9bSTom Lendacky return false; 6008f716c9bSTom Lendacky 6018f716c9bSTom Lendacky paddr = boot_params.efi_info.efi_memmap_hi; 6028f716c9bSTom Lendacky paddr <<= 32; 6038f716c9bSTom Lendacky paddr |= boot_params.efi_info.efi_memmap; 6048f716c9bSTom Lendacky if (phys_addr == paddr) 6058f716c9bSTom Lendacky return true; 6068f716c9bSTom Lendacky 6078f716c9bSTom Lendacky paddr = boot_params.efi_info.efi_systab_hi; 6088f716c9bSTom Lendacky paddr <<= 32; 6098f716c9bSTom Lendacky paddr |= boot_params.efi_info.efi_systab; 6108f716c9bSTom Lendacky if (phys_addr == paddr) 6118f716c9bSTom Lendacky return true; 6128f716c9bSTom Lendacky 6138f716c9bSTom Lendacky if (efi_is_table_address(phys_addr)) 6148f716c9bSTom Lendacky return true; 6158f716c9bSTom Lendacky 6168f716c9bSTom Lendacky switch (efi_mem_type(phys_addr)) { 6178f716c9bSTom Lendacky case EFI_BOOT_SERVICES_DATA: 6188f716c9bSTom Lendacky case EFI_RUNTIME_SERVICES_DATA: 6198f716c9bSTom Lendacky return true; 6208f716c9bSTom Lendacky default: 6218f716c9bSTom Lendacky break; 6228f716c9bSTom Lendacky } 6238f716c9bSTom Lendacky 6248f716c9bSTom Lendacky return false; 6258f716c9bSTom Lendacky } 6268f716c9bSTom Lendacky 6278f716c9bSTom Lendacky /* 6288f716c9bSTom Lendacky * Examine the physical address to determine if it is boot data by checking 6298f716c9bSTom Lendacky * it against the boot params setup_data chain. 6308f716c9bSTom Lendacky */ 6318f716c9bSTom Lendacky static bool memremap_is_setup_data(resource_size_t phys_addr, 6328f716c9bSTom Lendacky unsigned long size) 6338f716c9bSTom Lendacky { 6348f716c9bSTom Lendacky struct setup_data *data; 6358f716c9bSTom Lendacky u64 paddr, paddr_next; 6368f716c9bSTom Lendacky 6378f716c9bSTom Lendacky paddr = boot_params.hdr.setup_data; 6388f716c9bSTom Lendacky while (paddr) { 6398f716c9bSTom Lendacky unsigned int len; 6408f716c9bSTom Lendacky 6418f716c9bSTom Lendacky if (phys_addr == paddr) 6428f716c9bSTom Lendacky return true; 6438f716c9bSTom Lendacky 6448f716c9bSTom Lendacky data = memremap(paddr, sizeof(*data), 6458f716c9bSTom Lendacky MEMREMAP_WB | MEMREMAP_DEC); 6468f716c9bSTom Lendacky 6478f716c9bSTom Lendacky paddr_next = data->next; 6488f716c9bSTom Lendacky len = data->len; 6498f716c9bSTom Lendacky 650b3c72fc9SDaniel Kiper if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 651b3c72fc9SDaniel Kiper memunmap(data); 652b3c72fc9SDaniel Kiper return true; 653b3c72fc9SDaniel Kiper } 654b3c72fc9SDaniel Kiper 655b3c72fc9SDaniel Kiper if (data->type == SETUP_INDIRECT && 656b3c72fc9SDaniel Kiper ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { 657b3c72fc9SDaniel Kiper paddr = ((struct setup_indirect *)data->data)->addr; 658b3c72fc9SDaniel Kiper len = ((struct setup_indirect *)data->data)->len; 659b3c72fc9SDaniel Kiper } 660b3c72fc9SDaniel Kiper 6618f716c9bSTom Lendacky memunmap(data); 6628f716c9bSTom Lendacky 6638f716c9bSTom Lendacky if ((phys_addr > paddr) && (phys_addr < (paddr + len))) 6648f716c9bSTom Lendacky return true; 6658f716c9bSTom Lendacky 6668f716c9bSTom Lendacky paddr = paddr_next; 6678f716c9bSTom Lendacky } 6688f716c9bSTom Lendacky 6698f716c9bSTom Lendacky return false; 6708f716c9bSTom Lendacky } 6718f716c9bSTom Lendacky 6728f716c9bSTom Lendacky /* 6738f716c9bSTom Lendacky * Examine the physical address to determine if it is boot data by checking 6748f716c9bSTom Lendacky * it against the boot params setup_data chain (early boot version). 6758f716c9bSTom Lendacky */ 6768f716c9bSTom Lendacky static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, 6778f716c9bSTom Lendacky unsigned long size) 6788f716c9bSTom Lendacky { 6798f716c9bSTom Lendacky struct setup_data *data; 6808f716c9bSTom Lendacky u64 paddr, paddr_next; 6818f716c9bSTom Lendacky 6828f716c9bSTom Lendacky paddr = boot_params.hdr.setup_data; 6838f716c9bSTom Lendacky while (paddr) { 6848f716c9bSTom Lendacky unsigned int len; 6858f716c9bSTom Lendacky 6868f716c9bSTom Lendacky if (phys_addr == paddr) 6878f716c9bSTom Lendacky return true; 6888f716c9bSTom Lendacky 6898f716c9bSTom Lendacky data = early_memremap_decrypted(paddr, sizeof(*data)); 6908f716c9bSTom Lendacky 6918f716c9bSTom Lendacky paddr_next = data->next; 6928f716c9bSTom Lendacky len = data->len; 6938f716c9bSTom Lendacky 6948f716c9bSTom Lendacky early_memunmap(data, sizeof(*data)); 6958f716c9bSTom Lendacky 6968f716c9bSTom Lendacky if ((phys_addr > paddr) && (phys_addr < (paddr + len))) 6978f716c9bSTom Lendacky return true; 6988f716c9bSTom Lendacky 6998f716c9bSTom Lendacky paddr = paddr_next; 7008f716c9bSTom Lendacky } 7018f716c9bSTom Lendacky 7028f716c9bSTom Lendacky return false; 7038f716c9bSTom Lendacky } 7048f716c9bSTom Lendacky 7058f716c9bSTom Lendacky /* 7068f716c9bSTom Lendacky * Architecture function to determine if RAM remap is allowed. By default, a 7078f716c9bSTom Lendacky * RAM remap will map the data as encrypted. Determine if a RAM remap should 7088f716c9bSTom Lendacky * not be done so that the data will be mapped decrypted. 7098f716c9bSTom Lendacky */ 7108f716c9bSTom Lendacky bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, 7118f716c9bSTom Lendacky unsigned long flags) 7128f716c9bSTom Lendacky { 713072f58c6STom Lendacky if (!mem_encrypt_active()) 7148f716c9bSTom Lendacky return true; 7158f716c9bSTom Lendacky 7168f716c9bSTom Lendacky if (flags & MEMREMAP_ENC) 7178f716c9bSTom Lendacky return true; 7188f716c9bSTom Lendacky 7198f716c9bSTom Lendacky if (flags & MEMREMAP_DEC) 7208f716c9bSTom Lendacky return false; 7218f716c9bSTom Lendacky 722072f58c6STom Lendacky if (sme_active()) { 7238f716c9bSTom Lendacky if (memremap_is_setup_data(phys_addr, size) || 724072f58c6STom Lendacky memremap_is_efi_data(phys_addr, size)) 7258f716c9bSTom Lendacky return false; 726072f58c6STom Lendacky } 7278f716c9bSTom Lendacky 728072f58c6STom Lendacky return !memremap_should_map_decrypted(phys_addr, size); 7298f716c9bSTom Lendacky } 7308f716c9bSTom Lendacky 7318f716c9bSTom Lendacky /* 7328f716c9bSTom Lendacky * Architecture override of __weak function to adjust the protection attributes 7338f716c9bSTom Lendacky * used when remapping memory. By default, early_memremap() will map the data 7348f716c9bSTom Lendacky * as encrypted. Determine if an encrypted mapping should not be done and set 7358f716c9bSTom Lendacky * the appropriate protection attributes. 7368f716c9bSTom Lendacky */ 7378f716c9bSTom Lendacky pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, 7388f716c9bSTom Lendacky unsigned long size, 7398f716c9bSTom Lendacky pgprot_t prot) 7408f716c9bSTom Lendacky { 741072f58c6STom Lendacky bool encrypted_prot; 742072f58c6STom Lendacky 743072f58c6STom Lendacky if (!mem_encrypt_active()) 7448f716c9bSTom Lendacky return prot; 7458f716c9bSTom Lendacky 746072f58c6STom Lendacky encrypted_prot = true; 747072f58c6STom Lendacky 748072f58c6STom Lendacky if (sme_active()) { 7498f716c9bSTom Lendacky if (early_memremap_is_setup_data(phys_addr, size) || 750072f58c6STom Lendacky memremap_is_efi_data(phys_addr, size)) 751072f58c6STom Lendacky encrypted_prot = false; 752072f58c6STom Lendacky } 7538f716c9bSTom Lendacky 754072f58c6STom Lendacky if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size)) 755072f58c6STom Lendacky encrypted_prot = false; 756072f58c6STom Lendacky 757072f58c6STom Lendacky return encrypted_prot ? pgprot_encrypted(prot) 758072f58c6STom Lendacky : pgprot_decrypted(prot); 7598f716c9bSTom Lendacky } 7608f716c9bSTom Lendacky 7618458bf94STom Lendacky bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) 7628458bf94STom Lendacky { 7638458bf94STom Lendacky return arch_memremap_can_ram_remap(phys_addr, size, 0); 7648458bf94STom Lendacky } 7658458bf94STom Lendacky 766ce9084baSArd Biesheuvel #ifdef CONFIG_AMD_MEM_ENCRYPT 767f88a68faSTom Lendacky /* Remap memory with encryption */ 768f88a68faSTom Lendacky void __init *early_memremap_encrypted(resource_size_t phys_addr, 769f88a68faSTom Lendacky unsigned long size) 770f88a68faSTom Lendacky { 771f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); 772f88a68faSTom Lendacky } 773f88a68faSTom Lendacky 774f88a68faSTom Lendacky /* 775f88a68faSTom Lendacky * Remap memory with encryption and write-protected - cannot be called 776f88a68faSTom Lendacky * before pat_init() is called 777f88a68faSTom Lendacky */ 778f88a68faSTom Lendacky void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, 779f88a68faSTom Lendacky unsigned long size) 780f88a68faSTom Lendacky { 781f88a68faSTom Lendacky /* Be sure the write-protect PAT entry is set for write-protect */ 782f88a68faSTom Lendacky if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP) 783f88a68faSTom Lendacky return NULL; 784f88a68faSTom Lendacky 785f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); 786f88a68faSTom Lendacky } 787f88a68faSTom Lendacky 788f88a68faSTom Lendacky /* Remap memory without encryption */ 789f88a68faSTom Lendacky void __init *early_memremap_decrypted(resource_size_t phys_addr, 790f88a68faSTom Lendacky unsigned long size) 791f88a68faSTom Lendacky { 792f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); 793f88a68faSTom Lendacky } 794f88a68faSTom Lendacky 795f88a68faSTom Lendacky /* 796f88a68faSTom Lendacky * Remap memory without encryption and write-protected - cannot be called 797f88a68faSTom Lendacky * before pat_init() is called 798f88a68faSTom Lendacky */ 799f88a68faSTom Lendacky void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, 800f88a68faSTom Lendacky unsigned long size) 801f88a68faSTom Lendacky { 802f88a68faSTom Lendacky /* Be sure the write-protect PAT entry is set for write-protect */ 803f88a68faSTom Lendacky if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP) 804f88a68faSTom Lendacky return NULL; 805f88a68faSTom Lendacky 806f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); 807f88a68faSTom Lendacky } 808ce9084baSArd Biesheuvel #endif /* CONFIG_AMD_MEM_ENCRYPT */ 809f88a68faSTom Lendacky 81045c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 811e64c8aa0SThomas Gleixner 812551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 813e64c8aa0SThomas Gleixner { 81437cc8d7fSJeremy Fitzhardinge /* Don't assume we're using swapper_pg_dir at this point */ 8156c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 81637cc8d7fSJeremy Fitzhardinge pgd_t *pgd = &base[pgd_index(addr)]; 817e0c4f675SKirill A. Shutemov p4d_t *p4d = p4d_offset(pgd, addr); 818e0c4f675SKirill A. Shutemov pud_t *pud = pud_offset(p4d, addr); 819551889a6SIan Campbell pmd_t *pmd = pmd_offset(pud, addr); 820551889a6SIan Campbell 821551889a6SIan Campbell return pmd; 822e64c8aa0SThomas Gleixner } 823e64c8aa0SThomas Gleixner 824551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr) 825e64c8aa0SThomas Gleixner { 826551889a6SIan Campbell return &bm_pte[pte_index(addr)]; 827e64c8aa0SThomas Gleixner } 828e64c8aa0SThomas Gleixner 829fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep) 830fef5ba79SJeremy Fitzhardinge { 831fef5ba79SJeremy Fitzhardinge return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; 832fef5ba79SJeremy Fitzhardinge } 833fef5ba79SJeremy Fitzhardinge 834e64c8aa0SThomas Gleixner void __init early_ioremap_init(void) 835e64c8aa0SThomas Gleixner { 836551889a6SIan Campbell pmd_t *pmd; 837e64c8aa0SThomas Gleixner 83873159fdcSAndy Lutomirski #ifdef CONFIG_X86_64 83973159fdcSAndy Lutomirski BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 84073159fdcSAndy Lutomirski #else 84173159fdcSAndy Lutomirski WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 84273159fdcSAndy Lutomirski #endif 84373159fdcSAndy Lutomirski 8445b7c73e0SMark Salter early_ioremap_setup(); 8458827247fSWang Chen 846551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 847e64c8aa0SThomas Gleixner memset(bm_pte, 0, sizeof(bm_pte)); 848b6fbb669SIan Campbell pmd_populate_kernel(&init_mm, pmd, bm_pte); 849551889a6SIan Campbell 850e64c8aa0SThomas Gleixner /* 851551889a6SIan Campbell * The boot-ioremap range spans multiple pmds, for which 852e64c8aa0SThomas Gleixner * we are not prepared: 853e64c8aa0SThomas Gleixner */ 854499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE) 855499a5f1eSJan Beulich BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 856499a5f1eSJan Beulich != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 857499a5f1eSJan Beulich #undef __FIXADDR_TOP 858551889a6SIan Campbell if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 859e64c8aa0SThomas Gleixner WARN_ON(1); 860551889a6SIan Campbell printk(KERN_WARNING "pmd %p != %p\n", 861551889a6SIan Campbell pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 862e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 863e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_BEGIN)); 864e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 865e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_END)); 866e64c8aa0SThomas Gleixner 867e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 868e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 869e64c8aa0SThomas Gleixner FIX_BTMAP_BEGIN); 870e64c8aa0SThomas Gleixner } 871e64c8aa0SThomas Gleixner } 872e64c8aa0SThomas Gleixner 8735b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx, 8749b987aebSMasami Hiramatsu phys_addr_t phys, pgprot_t flags) 875e64c8aa0SThomas Gleixner { 876551889a6SIan Campbell unsigned long addr = __fix_to_virt(idx); 877551889a6SIan Campbell pte_t *pte; 878e64c8aa0SThomas Gleixner 879e64c8aa0SThomas Gleixner if (idx >= __end_of_fixed_addresses) { 880e64c8aa0SThomas Gleixner BUG(); 881e64c8aa0SThomas Gleixner return; 882e64c8aa0SThomas Gleixner } 883e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr); 8844583ed51SJeremy Fitzhardinge 885fb43d6cbSDave Hansen /* Sanitize 'prot' against any unsupported bits: */ 886510bb96fSThomas Gleixner pgprot_val(flags) &= __supported_pte_mask; 887fb43d6cbSDave Hansen 888e64c8aa0SThomas Gleixner if (pgprot_val(flags)) 889551889a6SIan Campbell set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 890e64c8aa0SThomas Gleixner else 8914f9c11ddSJeremy Fitzhardinge pte_clear(&init_mm, addr, pte); 8921299ef1dSAndy Lutomirski __flush_tlb_one_kernel(addr); 893e64c8aa0SThomas Gleixner } 894