1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2e64c8aa0SThomas Gleixner /* 3e64c8aa0SThomas Gleixner * Re-map IO memory to kernel address space so that we can access it. 4e64c8aa0SThomas Gleixner * This is needed for high PCI addresses that aren't mapped in the 5e64c8aa0SThomas Gleixner * 640k-1MB IO memory area on PC's 6e64c8aa0SThomas Gleixner * 7e64c8aa0SThomas Gleixner * (C) Copyright 1995 1996 Linus Torvalds 8e64c8aa0SThomas Gleixner */ 9e64c8aa0SThomas Gleixner 1057c8a661SMike Rapoport #include <linux/memblock.h> 11e64c8aa0SThomas Gleixner #include <linux/init.h> 12e64c8aa0SThomas Gleixner #include <linux/io.h> 139de94dbbSIngo Molnar #include <linux/ioport.h> 14e64c8aa0SThomas Gleixner #include <linux/slab.h> 15e64c8aa0SThomas Gleixner #include <linux/vmalloc.h> 16d61fc448SPekka Paalanen #include <linux/mmiotrace.h> 1732cb4d02STom Lendacky #include <linux/cc_platform.h> 188f716c9bSTom Lendacky #include <linux/efi.h> 1965fddcfcSMike Rapoport #include <linux/pgtable.h> 20b073d7f8SAlexander Potapenko #include <linux/kmsan.h> 21e64c8aa0SThomas Gleixner 22d1163651SLaura Abbott #include <asm/set_memory.h> 2366441bd3SIngo Molnar #include <asm/e820/api.h> 24e55f31a5SArd Biesheuvel #include <asm/efi.h> 25e64c8aa0SThomas Gleixner #include <asm/fixmap.h> 26e64c8aa0SThomas Gleixner #include <asm/tlbflush.h> 27f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h> 28eb243d1dSIngo Molnar #include <asm/memtype.h> 298f716c9bSTom Lendacky #include <asm/setup.h> 30e64c8aa0SThomas Gleixner 3178c86e5eSJeremy Fitzhardinge #include "physaddr.h" 32e64c8aa0SThomas Gleixner 335da04cc8SLianbo Jiang /* 345da04cc8SLianbo Jiang * Descriptor controlling ioremap() behavior. 355da04cc8SLianbo Jiang */ 365da04cc8SLianbo Jiang struct ioremap_desc { 375da04cc8SLianbo Jiang unsigned int flags; 380e4c12b4STom Lendacky }; 390e4c12b4STom Lendacky 40e64c8aa0SThomas Gleixner /* 41e64c8aa0SThomas Gleixner * Fix up the linear direct mapping of the kernel to avoid cache attribute 42e64c8aa0SThomas Gleixner * conflicts. 43e64c8aa0SThomas Gleixner */ 443a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size, 45b14097bdSJuergen Gross enum page_cache_mode pcm) 46e64c8aa0SThomas Gleixner { 47d806e5eeSThomas Gleixner unsigned long nrpages = size >> PAGE_SHIFT; 4893809be8SHarvey Harrison int err; 49e64c8aa0SThomas Gleixner 50b14097bdSJuergen Gross switch (pcm) { 51b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC: 52d806e5eeSThomas Gleixner default: 531219333dSvenkatesh.pallipadi@intel.com err = _set_memory_uc(vaddr, nrpages); 54d806e5eeSThomas Gleixner break; 55b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WC: 56b310f381Svenkatesh.pallipadi@intel.com err = _set_memory_wc(vaddr, nrpages); 57b310f381Svenkatesh.pallipadi@intel.com break; 58623dffb2SToshi Kani case _PAGE_CACHE_MODE_WT: 59623dffb2SToshi Kani err = _set_memory_wt(vaddr, nrpages); 60623dffb2SToshi Kani break; 61b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WB: 621219333dSvenkatesh.pallipadi@intel.com err = _set_memory_wb(vaddr, nrpages); 63d806e5eeSThomas Gleixner break; 64d806e5eeSThomas Gleixner } 65e64c8aa0SThomas Gleixner 66e64c8aa0SThomas Gleixner return err; 67e64c8aa0SThomas Gleixner } 68e64c8aa0SThomas Gleixner 695da04cc8SLianbo Jiang /* Does the range (or a subset of) contain normal RAM? */ 705da04cc8SLianbo Jiang static unsigned int __ioremap_check_ram(struct resource *res) 71c81c8a1eSRoland Dreier { 720e4c12b4STom Lendacky unsigned long start_pfn, stop_pfn; 73c81c8a1eSRoland Dreier unsigned long i; 74c81c8a1eSRoland Dreier 750e4c12b4STom Lendacky if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) 765da04cc8SLianbo Jiang return 0; 770e4c12b4STom Lendacky 780e4c12b4STom Lendacky start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; 790e4c12b4STom Lendacky stop_pfn = (res->end + 1) >> PAGE_SHIFT; 800e4c12b4STom Lendacky if (stop_pfn > start_pfn) { 810e4c12b4STom Lendacky for (i = 0; i < (stop_pfn - start_pfn); ++i) 82c81c8a1eSRoland Dreier if (pfn_valid(start_pfn + i) && 83c81c8a1eSRoland Dreier !PageReserved(pfn_to_page(start_pfn + i))) 845da04cc8SLianbo Jiang return IORES_MAP_SYSTEM_RAM; 850e4c12b4STom Lendacky } 86c81c8a1eSRoland Dreier 875da04cc8SLianbo Jiang return 0; 880e4c12b4STom Lendacky } 890e4c12b4STom Lendacky 905da04cc8SLianbo Jiang /* 915da04cc8SLianbo Jiang * In a SEV guest, NONE and RESERVED should not be mapped encrypted because 925da04cc8SLianbo Jiang * there the whole memory is already encrypted. 935da04cc8SLianbo Jiang */ 945da04cc8SLianbo Jiang static unsigned int __ioremap_check_encrypted(struct resource *res) 950e4c12b4STom Lendacky { 964d96f910STom Lendacky if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 975da04cc8SLianbo Jiang return 0; 985da04cc8SLianbo Jiang 995da04cc8SLianbo Jiang switch (res->desc) { 1005da04cc8SLianbo Jiang case IORES_DESC_NONE: 1015da04cc8SLianbo Jiang case IORES_DESC_RESERVED: 1025da04cc8SLianbo Jiang break; 1035da04cc8SLianbo Jiang default: 1045da04cc8SLianbo Jiang return IORES_MAP_ENCRYPTED; 1050e4c12b4STom Lendacky } 1060e4c12b4STom Lendacky 1075da04cc8SLianbo Jiang return 0; 1085da04cc8SLianbo Jiang } 1095da04cc8SLianbo Jiang 110985e537aSTom Lendacky /* 111985e537aSTom Lendacky * The EFI runtime services data area is not covered by walk_mem_res(), but must 112985e537aSTom Lendacky * be mapped encrypted when SEV is active. 113985e537aSTom Lendacky */ 114985e537aSTom Lendacky static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) 115985e537aSTom Lendacky { 1164d96f910STom Lendacky if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 117985e537aSTom Lendacky return; 118985e537aSTom Lendacky 119870b4333SBorislav Petkov if (!IS_ENABLED(CONFIG_EFI)) 120870b4333SBorislav Petkov return; 121870b4333SBorislav Petkov 1228d651ee9STom Lendacky if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA || 1238d651ee9STom Lendacky (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA && 1248d651ee9STom Lendacky efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME)) 125985e537aSTom Lendacky desc->flags |= IORES_MAP_ENCRYPTED; 126985e537aSTom Lendacky } 127985e537aSTom Lendacky 1285da04cc8SLianbo Jiang static int __ioremap_collect_map_flags(struct resource *res, void *arg) 1290e4c12b4STom Lendacky { 1305da04cc8SLianbo Jiang struct ioremap_desc *desc = arg; 1310e4c12b4STom Lendacky 1325da04cc8SLianbo Jiang if (!(desc->flags & IORES_MAP_SYSTEM_RAM)) 1335da04cc8SLianbo Jiang desc->flags |= __ioremap_check_ram(res); 1340e4c12b4STom Lendacky 1355da04cc8SLianbo Jiang if (!(desc->flags & IORES_MAP_ENCRYPTED)) 1365da04cc8SLianbo Jiang desc->flags |= __ioremap_check_encrypted(res); 1370e4c12b4STom Lendacky 1385da04cc8SLianbo Jiang return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) == 1395da04cc8SLianbo Jiang (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)); 1400e4c12b4STom Lendacky } 1410e4c12b4STom Lendacky 1420e4c12b4STom Lendacky /* 1430e4c12b4STom Lendacky * To avoid multiple resource walks, this function walks resources marked as 1440e4c12b4STom Lendacky * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a 1450e4c12b4STom Lendacky * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). 146985e537aSTom Lendacky * 147985e537aSTom Lendacky * After that, deal with misc other ranges in __ioremap_check_other() which do 148985e537aSTom Lendacky * not fall into the above category. 1490e4c12b4STom Lendacky */ 1500e4c12b4STom Lendacky static void __ioremap_check_mem(resource_size_t addr, unsigned long size, 1515da04cc8SLianbo Jiang struct ioremap_desc *desc) 1520e4c12b4STom Lendacky { 1530e4c12b4STom Lendacky u64 start, end; 1540e4c12b4STom Lendacky 1550e4c12b4STom Lendacky start = (u64)addr; 1560e4c12b4STom Lendacky end = start + size - 1; 1575da04cc8SLianbo Jiang memset(desc, 0, sizeof(struct ioremap_desc)); 1580e4c12b4STom Lendacky 1595da04cc8SLianbo Jiang walk_mem_res(start, end, desc, __ioremap_collect_map_flags); 160985e537aSTom Lendacky 161985e537aSTom Lendacky __ioremap_check_other(addr, desc); 162c81c8a1eSRoland Dreier } 163c81c8a1eSRoland Dreier 164e64c8aa0SThomas Gleixner /* 165e64c8aa0SThomas Gleixner * Remap an arbitrary physical address space into the kernel virtual 1665d72b4fbSToshi Kani * address space. It transparently creates kernel huge I/O mapping when 1675d72b4fbSToshi Kani * the physical address is aligned by a huge page size (1GB or 2MB) and 1685d72b4fbSToshi Kani * the requested size is at least the huge page size. 1695d72b4fbSToshi Kani * 1705d72b4fbSToshi Kani * NOTE: MTRRs can override PAT memory types with a 4KB granularity. 1715d72b4fbSToshi Kani * Therefore, the mapping code falls back to use a smaller page toward 4KB 1725d72b4fbSToshi Kani * when a mapping range is covered by non-WB type of MTRRs. 173e64c8aa0SThomas Gleixner * 174e64c8aa0SThomas Gleixner * NOTE! We need to allow non-page-aligned mappings too: we will obviously 175e64c8aa0SThomas Gleixner * have to convert them into an offset in a page-aligned mapping, but the 176e64c8aa0SThomas Gleixner * caller shouldn't need to know that small detail. 177e64c8aa0SThomas Gleixner */ 1785da04cc8SLianbo Jiang static void __iomem * 1795da04cc8SLianbo Jiang __ioremap_caller(resource_size_t phys_addr, unsigned long size, 1805da04cc8SLianbo Jiang enum page_cache_mode pcm, void *caller, bool encrypted) 181e64c8aa0SThomas Gleixner { 182ffa71f33SKenji Kaneshige unsigned long offset, vaddr; 1830e4c12b4STom Lendacky resource_size_t last_addr; 18487e547feSPekka Paalanen const resource_size_t unaligned_phys_addr = phys_addr; 18587e547feSPekka Paalanen const unsigned long unaligned_size = size; 1865da04cc8SLianbo Jiang struct ioremap_desc io_desc; 187e64c8aa0SThomas Gleixner struct vm_struct *area; 188b14097bdSJuergen Gross enum page_cache_mode new_pcm; 189d806e5eeSThomas Gleixner pgprot_t prot; 190dee7cbb2SVenki Pallipadi int retval; 191d61fc448SPekka Paalanen void __iomem *ret_addr; 192e64c8aa0SThomas Gleixner 193e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */ 194e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1; 195e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr) 196e64c8aa0SThomas Gleixner return NULL; 197e64c8aa0SThomas Gleixner 198e3100c82SThomas Gleixner if (!phys_addr_valid(phys_addr)) { 1996997ab49Svenkatesh.pallipadi@intel.com printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 2004c8337acSRandy Dunlap (unsigned long long)phys_addr); 201e3100c82SThomas Gleixner WARN_ON_ONCE(1); 202e3100c82SThomas Gleixner return NULL; 203e3100c82SThomas Gleixner } 204e3100c82SThomas Gleixner 2055da04cc8SLianbo Jiang __ioremap_check_mem(phys_addr, size, &io_desc); 2060e4c12b4STom Lendacky 207e64c8aa0SThomas Gleixner /* 208e64c8aa0SThomas Gleixner * Don't allow anybody to remap normal RAM that we're using.. 209e64c8aa0SThomas Gleixner */ 2105da04cc8SLianbo Jiang if (io_desc.flags & IORES_MAP_SYSTEM_RAM) { 2118a0a5da6SThomas Gleixner WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 2128a0a5da6SThomas Gleixner &phys_addr, &last_addr); 213e64c8aa0SThomas Gleixner return NULL; 214906e36c5SMike Travis } 2159a58eebeSToshi Kani 216d7677d40Svenkatesh.pallipadi@intel.com /* 217d7677d40Svenkatesh.pallipadi@intel.com * Mappings have to be page-aligned 218d7677d40Svenkatesh.pallipadi@intel.com */ 219d7677d40Svenkatesh.pallipadi@intel.com offset = phys_addr & ~PAGE_MASK; 220*4dbd6a3eSMichael Kelley phys_addr &= PAGE_MASK; 221d7677d40Svenkatesh.pallipadi@intel.com size = PAGE_ALIGN(last_addr+1) - phys_addr; 222d7677d40Svenkatesh.pallipadi@intel.com 223*4dbd6a3eSMichael Kelley /* 224*4dbd6a3eSMichael Kelley * Mask out any bits not part of the actual physical 225*4dbd6a3eSMichael Kelley * address, like memory encryption bits. 226*4dbd6a3eSMichael Kelley */ 227*4dbd6a3eSMichael Kelley phys_addr &= PHYSICAL_PAGE_MASK; 228*4dbd6a3eSMichael Kelley 229ecdd6ee7SIngo Molnar retval = memtype_reserve(phys_addr, (u64)phys_addr + size, 230e00c8cc9SJuergen Gross pcm, &new_pcm); 231dee7cbb2SVenki Pallipadi if (retval) { 232ecdd6ee7SIngo Molnar printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval); 233dee7cbb2SVenki Pallipadi return NULL; 234dee7cbb2SVenki Pallipadi } 235dee7cbb2SVenki Pallipadi 236b14097bdSJuergen Gross if (pcm != new_pcm) { 237b14097bdSJuergen Gross if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { 238279e669bSVenkatesh Pallipadi printk(KERN_ERR 239b14097bdSJuergen Gross "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", 2404c8337acSRandy Dunlap (unsigned long long)phys_addr, 2414c8337acSRandy Dunlap (unsigned long long)(phys_addr + size), 242b14097bdSJuergen Gross pcm, new_pcm); 243de2a47cfSXiaotian Feng goto err_free_memtype; 244d7677d40Svenkatesh.pallipadi@intel.com } 245b14097bdSJuergen Gross pcm = new_pcm; 246d7677d40Svenkatesh.pallipadi@intel.com } 247d7677d40Svenkatesh.pallipadi@intel.com 2480e4c12b4STom Lendacky /* 2490e4c12b4STom Lendacky * If the page being mapped is in memory and SEV is active then 2500e4c12b4STom Lendacky * make sure the memory encryption attribute is enabled in the 2510e4c12b4STom Lendacky * resulting mapping. 2529aa6ea69SKirill A. Shutemov * In TDX guests, memory is marked private by default. If encryption 2539aa6ea69SKirill A. Shutemov * is not requested (using encrypted), explicitly set decrypt 2549aa6ea69SKirill A. Shutemov * attribute in all IOREMAPPED memory. 2550e4c12b4STom Lendacky */ 256be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO; 2575da04cc8SLianbo Jiang if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted) 2580e4c12b4STom Lendacky prot = pgprot_encrypted(prot); 2599aa6ea69SKirill A. Shutemov else 2609aa6ea69SKirill A. Shutemov prot = pgprot_decrypted(prot); 2610e4c12b4STom Lendacky 262b14097bdSJuergen Gross switch (pcm) { 263b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC: 264b14097bdSJuergen Gross default: 265b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) | 266b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_UC)); 267b14097bdSJuergen Gross break; 268b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC_MINUS: 269b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) | 270b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); 271b14097bdSJuergen Gross break; 272b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WC: 273b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) | 274b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_WC)); 275b14097bdSJuergen Gross break; 276d838270eSToshi Kani case _PAGE_CACHE_MODE_WT: 277d838270eSToshi Kani prot = __pgprot(pgprot_val(prot) | 278d838270eSToshi Kani cachemode2protval(_PAGE_CACHE_MODE_WT)); 279d838270eSToshi Kani break; 280b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WB: 281d806e5eeSThomas Gleixner break; 282d806e5eeSThomas Gleixner } 283e64c8aa0SThomas Gleixner 284e64c8aa0SThomas Gleixner /* 285e64c8aa0SThomas Gleixner * Ok, go for it.. 286e64c8aa0SThomas Gleixner */ 28723016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, caller); 288e64c8aa0SThomas Gleixner if (!area) 289de2a47cfSXiaotian Feng goto err_free_memtype; 290e64c8aa0SThomas Gleixner area->phys_addr = phys_addr; 291e66aadbeSThomas Gleixner vaddr = (unsigned long) area->addr; 29243a432b1SSuresh Siddha 293ecdd6ee7SIngo Molnar if (memtype_kernel_map_sync(phys_addr, size, pcm)) 294de2a47cfSXiaotian Feng goto err_free_area; 295e64c8aa0SThomas Gleixner 296de2a47cfSXiaotian Feng if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) 297de2a47cfSXiaotian Feng goto err_free_area; 298e64c8aa0SThomas Gleixner 299d61fc448SPekka Paalanen ret_addr = (void __iomem *) (vaddr + offset); 30087e547feSPekka Paalanen mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); 301d61fc448SPekka Paalanen 302c7a7b814STim Gardner /* 303c7a7b814STim Gardner * Check if the request spans more than any BAR in the iomem resource 304c7a7b814STim Gardner * tree. 305c7a7b814STim Gardner */ 3069abb0ecdSLaura Abbott if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size)) 3079abb0ecdSLaura Abbott pr_warn("caller %pS mapping multiple BARs\n", caller); 308c7a7b814STim Gardner 309d61fc448SPekka Paalanen return ret_addr; 310de2a47cfSXiaotian Feng err_free_area: 311de2a47cfSXiaotian Feng free_vm_area(area); 312de2a47cfSXiaotian Feng err_free_memtype: 313ecdd6ee7SIngo Molnar memtype_free(phys_addr, phys_addr + size); 314de2a47cfSXiaotian Feng return NULL; 315e64c8aa0SThomas Gleixner } 316e64c8aa0SThomas Gleixner 317e64c8aa0SThomas Gleixner /** 318c0d94aa5SChristoph Hellwig * ioremap - map bus memory into CPU space 3199efc31b8SWanpeng Li * @phys_addr: bus address of the memory 320e64c8aa0SThomas Gleixner * @size: size of the resource to map 321e64c8aa0SThomas Gleixner * 322c0d94aa5SChristoph Hellwig * ioremap performs a platform specific sequence of operations to 323e64c8aa0SThomas Gleixner * make bus memory CPU accessible via the readb/readw/readl/writeb/ 324e64c8aa0SThomas Gleixner * writew/writel functions and the other mmio helpers. The returned 325e64c8aa0SThomas Gleixner * address is not guaranteed to be usable directly as a virtual 326e64c8aa0SThomas Gleixner * address. 327e64c8aa0SThomas Gleixner * 328e64c8aa0SThomas Gleixner * This version of ioremap ensures that the memory is marked uncachable 329e64c8aa0SThomas Gleixner * on the CPU as well as honouring existing caching rules from things like 330e64c8aa0SThomas Gleixner * the PCI bus. Note that there are other caches and buffers on many 331e64c8aa0SThomas Gleixner * busses. In particular driver authors should read up on PCI writes 332e64c8aa0SThomas Gleixner * 333e64c8aa0SThomas Gleixner * It's useful if some control registers are in such an area and 334e64c8aa0SThomas Gleixner * write combining or read caching is not desirable: 335e64c8aa0SThomas Gleixner * 336e64c8aa0SThomas Gleixner * Must be freed with iounmap. 337e64c8aa0SThomas Gleixner */ 338c0d94aa5SChristoph Hellwig void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) 339e64c8aa0SThomas Gleixner { 340de33c442SSuresh Siddha /* 341de33c442SSuresh Siddha * Ideally, this should be: 342cb32edf6SLuis R. Rodriguez * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; 343de33c442SSuresh Siddha * 344de33c442SSuresh Siddha * Till we fix all X drivers to use ioremap_wc(), we will use 345e4b6be33SLuis R. Rodriguez * UC MINUS. Drivers that are certain they need or can already 346e4b6be33SLuis R. Rodriguez * be converted over to strong UC can use ioremap_uc(). 347de33c442SSuresh Siddha */ 348b14097bdSJuergen Gross enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; 349de33c442SSuresh Siddha 350b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, pcm, 351c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 352e64c8aa0SThomas Gleixner } 353c0d94aa5SChristoph Hellwig EXPORT_SYMBOL(ioremap); 354e64c8aa0SThomas Gleixner 355b310f381Svenkatesh.pallipadi@intel.com /** 356e4b6be33SLuis R. Rodriguez * ioremap_uc - map bus memory into CPU space as strongly uncachable 357e4b6be33SLuis R. Rodriguez * @phys_addr: bus address of the memory 358e4b6be33SLuis R. Rodriguez * @size: size of the resource to map 359e4b6be33SLuis R. Rodriguez * 360e4b6be33SLuis R. Rodriguez * ioremap_uc performs a platform specific sequence of operations to 361e4b6be33SLuis R. Rodriguez * make bus memory CPU accessible via the readb/readw/readl/writeb/ 362e4b6be33SLuis R. Rodriguez * writew/writel functions and the other mmio helpers. The returned 363e4b6be33SLuis R. Rodriguez * address is not guaranteed to be usable directly as a virtual 364e4b6be33SLuis R. Rodriguez * address. 365e4b6be33SLuis R. Rodriguez * 366e4b6be33SLuis R. Rodriguez * This version of ioremap ensures that the memory is marked with a strong 367e4b6be33SLuis R. Rodriguez * preference as completely uncachable on the CPU when possible. For non-PAT 368e4b6be33SLuis R. Rodriguez * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT 369e4b6be33SLuis R. Rodriguez * systems this will set the PAT entry for the pages as strong UC. This call 370e4b6be33SLuis R. Rodriguez * will honor existing caching rules from things like the PCI bus. Note that 371e4b6be33SLuis R. Rodriguez * there are other caches and buffers on many busses. In particular driver 372e4b6be33SLuis R. Rodriguez * authors should read up on PCI writes. 373e4b6be33SLuis R. Rodriguez * 374e4b6be33SLuis R. Rodriguez * It's useful if some control registers are in such an area and 375e4b6be33SLuis R. Rodriguez * write combining or read caching is not desirable: 376e4b6be33SLuis R. Rodriguez * 377e4b6be33SLuis R. Rodriguez * Must be freed with iounmap. 378e4b6be33SLuis R. Rodriguez */ 379e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) 380e4b6be33SLuis R. Rodriguez { 381e4b6be33SLuis R. Rodriguez enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC; 382e4b6be33SLuis R. Rodriguez 383e4b6be33SLuis R. Rodriguez return __ioremap_caller(phys_addr, size, pcm, 384c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 385e4b6be33SLuis R. Rodriguez } 386e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc); 387e4b6be33SLuis R. Rodriguez 388e4b6be33SLuis R. Rodriguez /** 389b310f381Svenkatesh.pallipadi@intel.com * ioremap_wc - map memory into CPU space write combined 3909efc31b8SWanpeng Li * @phys_addr: bus address of the memory 391b310f381Svenkatesh.pallipadi@intel.com * @size: size of the resource to map 392b310f381Svenkatesh.pallipadi@intel.com * 393b310f381Svenkatesh.pallipadi@intel.com * This version of ioremap ensures that the memory is marked write combining. 394b310f381Svenkatesh.pallipadi@intel.com * Write combining allows faster writes to some hardware devices. 395b310f381Svenkatesh.pallipadi@intel.com * 396b310f381Svenkatesh.pallipadi@intel.com * Must be freed with iounmap. 397b310f381Svenkatesh.pallipadi@intel.com */ 398d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) 399b310f381Svenkatesh.pallipadi@intel.com { 400b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, 401c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 402b310f381Svenkatesh.pallipadi@intel.com } 403b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc); 404b310f381Svenkatesh.pallipadi@intel.com 405d838270eSToshi Kani /** 406d838270eSToshi Kani * ioremap_wt - map memory into CPU space write through 407d838270eSToshi Kani * @phys_addr: bus address of the memory 408d838270eSToshi Kani * @size: size of the resource to map 409d838270eSToshi Kani * 410d838270eSToshi Kani * This version of ioremap ensures that the memory is marked write through. 411d838270eSToshi Kani * Write through stores data into memory while keeping the cache up-to-date. 412d838270eSToshi Kani * 413d838270eSToshi Kani * Must be freed with iounmap. 414d838270eSToshi Kani */ 415d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) 416d838270eSToshi Kani { 417d838270eSToshi Kani return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, 418c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 419d838270eSToshi Kani } 420d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt); 421d838270eSToshi Kani 422c3a7a61cSLianbo Jiang void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) 423c3a7a61cSLianbo Jiang { 424c3a7a61cSLianbo Jiang return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, 425c3a7a61cSLianbo Jiang __builtin_return_address(0), true); 426c3a7a61cSLianbo Jiang } 427c3a7a61cSLianbo Jiang EXPORT_SYMBOL(ioremap_encrypted); 428c3a7a61cSLianbo Jiang 429b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 4305f868152SThomas Gleixner { 431b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, 432c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 4335f868152SThomas Gleixner } 4345f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache); 4355f868152SThomas Gleixner 43628b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 43728b2ee20SRik van Riel unsigned long prot_val) 43828b2ee20SRik van Riel { 439b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, 440b14097bdSJuergen Gross pgprot2cachemode(__pgprot(prot_val)), 441c3a7a61cSLianbo Jiang __builtin_return_address(0), false); 44228b2ee20SRik van Riel } 44328b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot); 44428b2ee20SRik van Riel 445e64c8aa0SThomas Gleixner /** 446e64c8aa0SThomas Gleixner * iounmap - Free a IO remapping 447e64c8aa0SThomas Gleixner * @addr: virtual address from ioremap_* 448e64c8aa0SThomas Gleixner * 449e64c8aa0SThomas Gleixner * Caller must ensure there is only one unmapping for the same pointer. 450e64c8aa0SThomas Gleixner */ 451e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr) 452e64c8aa0SThomas Gleixner { 453e64c8aa0SThomas Gleixner struct vm_struct *p, *o; 454e64c8aa0SThomas Gleixner 455e64c8aa0SThomas Gleixner if ((void __force *)addr <= high_memory) 456e64c8aa0SThomas Gleixner return; 457e64c8aa0SThomas Gleixner 458e64c8aa0SThomas Gleixner /* 45933c2b803STom Lendacky * The PCI/ISA range special-casing was removed from __ioremap() 46033c2b803STom Lendacky * so this check, in theory, can be removed. However, there are 46133c2b803STom Lendacky * cases where iounmap() is called for addresses not obtained via 46233c2b803STom Lendacky * ioremap() (vga16fb for example). Add a warning so that these 46333c2b803STom Lendacky * cases can be caught and fixed. 464e64c8aa0SThomas Gleixner */ 4656e92a5a6SThomas Gleixner if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && 46633c2b803STom Lendacky (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) { 46733c2b803STom Lendacky WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n"); 468e64c8aa0SThomas Gleixner return; 46933c2b803STom Lendacky } 470e64c8aa0SThomas Gleixner 4716d60ce38SKarol Herbst mmiotrace_iounmap(addr); 4726d60ce38SKarol Herbst 473e64c8aa0SThomas Gleixner addr = (volatile void __iomem *) 474e64c8aa0SThomas Gleixner (PAGE_MASK & (unsigned long __force)addr); 475e64c8aa0SThomas Gleixner 476e64c8aa0SThomas Gleixner /* Use the vm area unlocked, assuming the caller 477e64c8aa0SThomas Gleixner ensures there isn't another iounmap for the same address 478e64c8aa0SThomas Gleixner in parallel. Reuse of the virtual address is prevented by 479e64c8aa0SThomas Gleixner leaving it in the global lists until we're done with it. 480e64c8aa0SThomas Gleixner cpa takes care of the direct mappings. */ 481ef932473SJoonsoo Kim p = find_vm_area((void __force *)addr); 482e64c8aa0SThomas Gleixner 483e64c8aa0SThomas Gleixner if (!p) { 484e64c8aa0SThomas Gleixner printk(KERN_ERR "iounmap: bad address %p\n", addr); 485e64c8aa0SThomas Gleixner dump_stack(); 486e64c8aa0SThomas Gleixner return; 487e64c8aa0SThomas Gleixner } 488e64c8aa0SThomas Gleixner 489b073d7f8SAlexander Potapenko kmsan_iounmap_page_range((unsigned long)addr, 490b073d7f8SAlexander Potapenko (unsigned long)addr + get_vm_area_size(p)); 491ecdd6ee7SIngo Molnar memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 492d7677d40Svenkatesh.pallipadi@intel.com 493e64c8aa0SThomas Gleixner /* Finally remove it */ 4946e92a5a6SThomas Gleixner o = remove_vm_area((void __force *)addr); 495e64c8aa0SThomas Gleixner BUG_ON(p != o || o == NULL); 496e64c8aa0SThomas Gleixner kfree(p); 497e64c8aa0SThomas Gleixner } 498e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap); 499e64c8aa0SThomas Gleixner 500e045fb2aSvenkatesh.pallipadi@intel.com /* 501e045fb2aSvenkatesh.pallipadi@intel.com * Convert a physical pointer to a virtual kernel pointer for /dev/mem 502e045fb2aSvenkatesh.pallipadi@intel.com * access 503e045fb2aSvenkatesh.pallipadi@intel.com */ 5044707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys) 505e045fb2aSvenkatesh.pallipadi@intel.com { 506e045fb2aSvenkatesh.pallipadi@intel.com unsigned long start = phys & PAGE_MASK; 50794d4b476SIngo Molnar unsigned long offset = phys & ~PAGE_MASK; 508562bfca4SIngo Molnar void *vaddr; 509e045fb2aSvenkatesh.pallipadi@intel.com 5108458bf94STom Lendacky /* memremap() maps if RAM, otherwise falls back to ioremap() */ 5118458bf94STom Lendacky vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); 512e045fb2aSvenkatesh.pallipadi@intel.com 5138458bf94STom Lendacky /* Only add the offset on success and return NULL if memremap() failed */ 51494d4b476SIngo Molnar if (vaddr) 51594d4b476SIngo Molnar vaddr += offset; 516e045fb2aSvenkatesh.pallipadi@intel.com 517562bfca4SIngo Molnar return vaddr; 518e045fb2aSvenkatesh.pallipadi@intel.com } 519e045fb2aSvenkatesh.pallipadi@intel.com 5204707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 521e045fb2aSvenkatesh.pallipadi@intel.com { 5228458bf94STom Lendacky memunmap((void *)((unsigned long)addr & PAGE_MASK)); 523e045fb2aSvenkatesh.pallipadi@intel.com } 524e045fb2aSvenkatesh.pallipadi@intel.com 525402fe0cbSTom Lendacky #ifdef CONFIG_AMD_MEM_ENCRYPT 5268f716c9bSTom Lendacky /* 5278f716c9bSTom Lendacky * Examine the physical address to determine if it is an area of memory 5288f716c9bSTom Lendacky * that should be mapped decrypted. If the memory is not part of the 5298f716c9bSTom Lendacky * kernel usable area it was accessed and created decrypted, so these 5301de32862STom Lendacky * areas should be mapped decrypted. And since the encryption key can 5311de32862STom Lendacky * change across reboots, persistent memory should also be mapped 5321de32862STom Lendacky * decrypted. 533072f58c6STom Lendacky * 534072f58c6STom Lendacky * If SEV is active, that implies that BIOS/UEFI also ran encrypted so 535072f58c6STom Lendacky * only persistent memory should be mapped decrypted. 5368f716c9bSTom Lendacky */ 5378f716c9bSTom Lendacky static bool memremap_should_map_decrypted(resource_size_t phys_addr, 5388f716c9bSTom Lendacky unsigned long size) 5398f716c9bSTom Lendacky { 5401de32862STom Lendacky int is_pmem; 5411de32862STom Lendacky 5421de32862STom Lendacky /* 5431de32862STom Lendacky * Check if the address is part of a persistent memory region. 5441de32862STom Lendacky * This check covers areas added by E820, EFI and ACPI. 5451de32862STom Lendacky */ 5461de32862STom Lendacky is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, 5471de32862STom Lendacky IORES_DESC_PERSISTENT_MEMORY); 5481de32862STom Lendacky if (is_pmem != REGION_DISJOINT) 5491de32862STom Lendacky return true; 5501de32862STom Lendacky 5511de32862STom Lendacky /* 5521de32862STom Lendacky * Check if the non-volatile attribute is set for an EFI 5531de32862STom Lendacky * reserved area. 5541de32862STom Lendacky */ 5551de32862STom Lendacky if (efi_enabled(EFI_BOOT)) { 5561de32862STom Lendacky switch (efi_mem_type(phys_addr)) { 5571de32862STom Lendacky case EFI_RESERVED_TYPE: 5581de32862STom Lendacky if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV) 5591de32862STom Lendacky return true; 5601de32862STom Lendacky break; 5611de32862STom Lendacky default: 5621de32862STom Lendacky break; 5631de32862STom Lendacky } 5641de32862STom Lendacky } 5651de32862STom Lendacky 5668f716c9bSTom Lendacky /* Check if the address is outside kernel usable area */ 5678f716c9bSTom Lendacky switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { 5688f716c9bSTom Lendacky case E820_TYPE_RESERVED: 5698f716c9bSTom Lendacky case E820_TYPE_ACPI: 5708f716c9bSTom Lendacky case E820_TYPE_NVS: 5718f716c9bSTom Lendacky case E820_TYPE_UNUSABLE: 572072f58c6STom Lendacky /* For SEV, these areas are encrypted */ 5734d96f910STom Lendacky if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 574072f58c6STom Lendacky break; 575df561f66SGustavo A. R. Silva fallthrough; 576072f58c6STom Lendacky 5771de32862STom Lendacky case E820_TYPE_PRAM: 5788f716c9bSTom Lendacky return true; 5798f716c9bSTom Lendacky default: 5808f716c9bSTom Lendacky break; 5818f716c9bSTom Lendacky } 5828f716c9bSTom Lendacky 5838f716c9bSTom Lendacky return false; 5848f716c9bSTom Lendacky } 5858f716c9bSTom Lendacky 5868f716c9bSTom Lendacky /* 5878f716c9bSTom Lendacky * Examine the physical address to determine if it is EFI data. Check 5888f716c9bSTom Lendacky * it against the boot params structure and EFI tables and memory types. 5898f716c9bSTom Lendacky */ 5908f716c9bSTom Lendacky static bool memremap_is_efi_data(resource_size_t phys_addr, 5918f716c9bSTom Lendacky unsigned long size) 5928f716c9bSTom Lendacky { 5938f716c9bSTom Lendacky u64 paddr; 5948f716c9bSTom Lendacky 5958f716c9bSTom Lendacky /* Check if the address is part of EFI boot/runtime data */ 5968f716c9bSTom Lendacky if (!efi_enabled(EFI_BOOT)) 5978f716c9bSTom Lendacky return false; 5988f716c9bSTom Lendacky 5998f716c9bSTom Lendacky paddr = boot_params.efi_info.efi_memmap_hi; 6008f716c9bSTom Lendacky paddr <<= 32; 6018f716c9bSTom Lendacky paddr |= boot_params.efi_info.efi_memmap; 6028f716c9bSTom Lendacky if (phys_addr == paddr) 6038f716c9bSTom Lendacky return true; 6048f716c9bSTom Lendacky 6058f716c9bSTom Lendacky paddr = boot_params.efi_info.efi_systab_hi; 6068f716c9bSTom Lendacky paddr <<= 32; 6078f716c9bSTom Lendacky paddr |= boot_params.efi_info.efi_systab; 6088f716c9bSTom Lendacky if (phys_addr == paddr) 6098f716c9bSTom Lendacky return true; 6108f716c9bSTom Lendacky 6118f716c9bSTom Lendacky if (efi_is_table_address(phys_addr)) 6128f716c9bSTom Lendacky return true; 6138f716c9bSTom Lendacky 6148f716c9bSTom Lendacky switch (efi_mem_type(phys_addr)) { 6158f716c9bSTom Lendacky case EFI_BOOT_SERVICES_DATA: 6168f716c9bSTom Lendacky case EFI_RUNTIME_SERVICES_DATA: 6178f716c9bSTom Lendacky return true; 6188f716c9bSTom Lendacky default: 6198f716c9bSTom Lendacky break; 6208f716c9bSTom Lendacky } 6218f716c9bSTom Lendacky 6228f716c9bSTom Lendacky return false; 6238f716c9bSTom Lendacky } 6248f716c9bSTom Lendacky 6258f716c9bSTom Lendacky /* 6268f716c9bSTom Lendacky * Examine the physical address to determine if it is boot data by checking 6278f716c9bSTom Lendacky * it against the boot params setup_data chain. 6288f716c9bSTom Lendacky */ 6298f716c9bSTom Lendacky static bool memremap_is_setup_data(resource_size_t phys_addr, 6308f716c9bSTom Lendacky unsigned long size) 6318f716c9bSTom Lendacky { 6327228918bSRoss Philipson struct setup_indirect *indirect; 6338f716c9bSTom Lendacky struct setup_data *data; 6348f716c9bSTom Lendacky u64 paddr, paddr_next; 6358f716c9bSTom Lendacky 6368f716c9bSTom Lendacky paddr = boot_params.hdr.setup_data; 6378f716c9bSTom Lendacky while (paddr) { 6388f716c9bSTom Lendacky unsigned int len; 6398f716c9bSTom Lendacky 6408f716c9bSTom Lendacky if (phys_addr == paddr) 6418f716c9bSTom Lendacky return true; 6428f716c9bSTom Lendacky 6438f716c9bSTom Lendacky data = memremap(paddr, sizeof(*data), 6448f716c9bSTom Lendacky MEMREMAP_WB | MEMREMAP_DEC); 6457228918bSRoss Philipson if (!data) { 6467228918bSRoss Philipson pr_warn("failed to memremap setup_data entry\n"); 6477228918bSRoss Philipson return false; 6487228918bSRoss Philipson } 6498f716c9bSTom Lendacky 6508f716c9bSTom Lendacky paddr_next = data->next; 6518f716c9bSTom Lendacky len = data->len; 6528f716c9bSTom Lendacky 653b3c72fc9SDaniel Kiper if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 654b3c72fc9SDaniel Kiper memunmap(data); 655b3c72fc9SDaniel Kiper return true; 656b3c72fc9SDaniel Kiper } 657b3c72fc9SDaniel Kiper 6587228918bSRoss Philipson if (data->type == SETUP_INDIRECT) { 6597228918bSRoss Philipson memunmap(data); 6607228918bSRoss Philipson data = memremap(paddr, sizeof(*data) + len, 6617228918bSRoss Philipson MEMREMAP_WB | MEMREMAP_DEC); 6627228918bSRoss Philipson if (!data) { 6637228918bSRoss Philipson pr_warn("failed to memremap indirect setup_data\n"); 6647228918bSRoss Philipson return false; 6657228918bSRoss Philipson } 6667228918bSRoss Philipson 6677228918bSRoss Philipson indirect = (struct setup_indirect *)data->data; 6687228918bSRoss Philipson 6697228918bSRoss Philipson if (indirect->type != SETUP_INDIRECT) { 6707228918bSRoss Philipson paddr = indirect->addr; 6717228918bSRoss Philipson len = indirect->len; 6727228918bSRoss Philipson } 673b3c72fc9SDaniel Kiper } 674b3c72fc9SDaniel Kiper 6758f716c9bSTom Lendacky memunmap(data); 6768f716c9bSTom Lendacky 6778f716c9bSTom Lendacky if ((phys_addr > paddr) && (phys_addr < (paddr + len))) 6788f716c9bSTom Lendacky return true; 6798f716c9bSTom Lendacky 6808f716c9bSTom Lendacky paddr = paddr_next; 6818f716c9bSTom Lendacky } 6828f716c9bSTom Lendacky 6838f716c9bSTom Lendacky return false; 6848f716c9bSTom Lendacky } 6858f716c9bSTom Lendacky 6868f716c9bSTom Lendacky /* 6878f716c9bSTom Lendacky * Examine the physical address to determine if it is boot data by checking 6888f716c9bSTom Lendacky * it against the boot params setup_data chain (early boot version). 6898f716c9bSTom Lendacky */ 6908f716c9bSTom Lendacky static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, 6918f716c9bSTom Lendacky unsigned long size) 6928f716c9bSTom Lendacky { 693445c1470SRoss Philipson struct setup_indirect *indirect; 6948f716c9bSTom Lendacky struct setup_data *data; 6958f716c9bSTom Lendacky u64 paddr, paddr_next; 6968f716c9bSTom Lendacky 6978f716c9bSTom Lendacky paddr = boot_params.hdr.setup_data; 6988f716c9bSTom Lendacky while (paddr) { 699445c1470SRoss Philipson unsigned int len, size; 7008f716c9bSTom Lendacky 7018f716c9bSTom Lendacky if (phys_addr == paddr) 7028f716c9bSTom Lendacky return true; 7038f716c9bSTom Lendacky 7048f716c9bSTom Lendacky data = early_memremap_decrypted(paddr, sizeof(*data)); 705445c1470SRoss Philipson if (!data) { 706445c1470SRoss Philipson pr_warn("failed to early memremap setup_data entry\n"); 707445c1470SRoss Philipson return false; 708445c1470SRoss Philipson } 709445c1470SRoss Philipson 710445c1470SRoss Philipson size = sizeof(*data); 7118f716c9bSTom Lendacky 7128f716c9bSTom Lendacky paddr_next = data->next; 7138f716c9bSTom Lendacky len = data->len; 7148f716c9bSTom Lendacky 715445c1470SRoss Philipson if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 7168f716c9bSTom Lendacky early_memunmap(data, sizeof(*data)); 717445c1470SRoss Philipson return true; 718445c1470SRoss Philipson } 719445c1470SRoss Philipson 720445c1470SRoss Philipson if (data->type == SETUP_INDIRECT) { 721445c1470SRoss Philipson size += len; 722445c1470SRoss Philipson early_memunmap(data, sizeof(*data)); 723445c1470SRoss Philipson data = early_memremap_decrypted(paddr, size); 724445c1470SRoss Philipson if (!data) { 725445c1470SRoss Philipson pr_warn("failed to early memremap indirect setup_data\n"); 726445c1470SRoss Philipson return false; 727445c1470SRoss Philipson } 728445c1470SRoss Philipson 729445c1470SRoss Philipson indirect = (struct setup_indirect *)data->data; 730445c1470SRoss Philipson 731445c1470SRoss Philipson if (indirect->type != SETUP_INDIRECT) { 732445c1470SRoss Philipson paddr = indirect->addr; 733445c1470SRoss Philipson len = indirect->len; 734445c1470SRoss Philipson } 735445c1470SRoss Philipson } 736445c1470SRoss Philipson 737445c1470SRoss Philipson early_memunmap(data, size); 7388f716c9bSTom Lendacky 7398f716c9bSTom Lendacky if ((phys_addr > paddr) && (phys_addr < (paddr + len))) 7408f716c9bSTom Lendacky return true; 7418f716c9bSTom Lendacky 7428f716c9bSTom Lendacky paddr = paddr_next; 7438f716c9bSTom Lendacky } 7448f716c9bSTom Lendacky 7458f716c9bSTom Lendacky return false; 7468f716c9bSTom Lendacky } 7478f716c9bSTom Lendacky 7488f716c9bSTom Lendacky /* 7498f716c9bSTom Lendacky * Architecture function to determine if RAM remap is allowed. By default, a 7508f716c9bSTom Lendacky * RAM remap will map the data as encrypted. Determine if a RAM remap should 7518f716c9bSTom Lendacky * not be done so that the data will be mapped decrypted. 7528f716c9bSTom Lendacky */ 7538f716c9bSTom Lendacky bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, 7548f716c9bSTom Lendacky unsigned long flags) 7558f716c9bSTom Lendacky { 756e9d1d2bbSTom Lendacky if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 7578f716c9bSTom Lendacky return true; 7588f716c9bSTom Lendacky 7598f716c9bSTom Lendacky if (flags & MEMREMAP_ENC) 7608f716c9bSTom Lendacky return true; 7618f716c9bSTom Lendacky 7628f716c9bSTom Lendacky if (flags & MEMREMAP_DEC) 7638f716c9bSTom Lendacky return false; 7648f716c9bSTom Lendacky 76532cb4d02STom Lendacky if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 7668f716c9bSTom Lendacky if (memremap_is_setup_data(phys_addr, size) || 767072f58c6STom Lendacky memremap_is_efi_data(phys_addr, size)) 7688f716c9bSTom Lendacky return false; 769072f58c6STom Lendacky } 7708f716c9bSTom Lendacky 771072f58c6STom Lendacky return !memremap_should_map_decrypted(phys_addr, size); 7728f716c9bSTom Lendacky } 7738f716c9bSTom Lendacky 7748f716c9bSTom Lendacky /* 7758f716c9bSTom Lendacky * Architecture override of __weak function to adjust the protection attributes 7768f716c9bSTom Lendacky * used when remapping memory. By default, early_memremap() will map the data 7778f716c9bSTom Lendacky * as encrypted. Determine if an encrypted mapping should not be done and set 7788f716c9bSTom Lendacky * the appropriate protection attributes. 7798f716c9bSTom Lendacky */ 7808f716c9bSTom Lendacky pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, 7818f716c9bSTom Lendacky unsigned long size, 7828f716c9bSTom Lendacky pgprot_t prot) 7838f716c9bSTom Lendacky { 784072f58c6STom Lendacky bool encrypted_prot; 785072f58c6STom Lendacky 786e9d1d2bbSTom Lendacky if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 7878f716c9bSTom Lendacky return prot; 7888f716c9bSTom Lendacky 789072f58c6STom Lendacky encrypted_prot = true; 790072f58c6STom Lendacky 79132cb4d02STom Lendacky if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 7928f716c9bSTom Lendacky if (early_memremap_is_setup_data(phys_addr, size) || 793072f58c6STom Lendacky memremap_is_efi_data(phys_addr, size)) 794072f58c6STom Lendacky encrypted_prot = false; 795072f58c6STom Lendacky } 7968f716c9bSTom Lendacky 797072f58c6STom Lendacky if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size)) 798072f58c6STom Lendacky encrypted_prot = false; 799072f58c6STom Lendacky 800072f58c6STom Lendacky return encrypted_prot ? pgprot_encrypted(prot) 801072f58c6STom Lendacky : pgprot_decrypted(prot); 8028f716c9bSTom Lendacky } 8038f716c9bSTom Lendacky 8048458bf94STom Lendacky bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) 8058458bf94STom Lendacky { 8068458bf94STom Lendacky return arch_memremap_can_ram_remap(phys_addr, size, 0); 8078458bf94STom Lendacky } 8088458bf94STom Lendacky 809f88a68faSTom Lendacky /* Remap memory with encryption */ 810f88a68faSTom Lendacky void __init *early_memremap_encrypted(resource_size_t phys_addr, 811f88a68faSTom Lendacky unsigned long size) 812f88a68faSTom Lendacky { 813f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); 814f88a68faSTom Lendacky } 815f88a68faSTom Lendacky 816f88a68faSTom Lendacky /* 817f88a68faSTom Lendacky * Remap memory with encryption and write-protected - cannot be called 818f88a68faSTom Lendacky * before pat_init() is called 819f88a68faSTom Lendacky */ 820f88a68faSTom Lendacky void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, 821f88a68faSTom Lendacky unsigned long size) 822f88a68faSTom Lendacky { 8231f6f655eSChristoph Hellwig if (!x86_has_pat_wp()) 824f88a68faSTom Lendacky return NULL; 825f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); 826f88a68faSTom Lendacky } 827f88a68faSTom Lendacky 828f88a68faSTom Lendacky /* Remap memory without encryption */ 829f88a68faSTom Lendacky void __init *early_memremap_decrypted(resource_size_t phys_addr, 830f88a68faSTom Lendacky unsigned long size) 831f88a68faSTom Lendacky { 832f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); 833f88a68faSTom Lendacky } 834f88a68faSTom Lendacky 835f88a68faSTom Lendacky /* 836f88a68faSTom Lendacky * Remap memory without encryption and write-protected - cannot be called 837f88a68faSTom Lendacky * before pat_init() is called 838f88a68faSTom Lendacky */ 839f88a68faSTom Lendacky void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, 840f88a68faSTom Lendacky unsigned long size) 841f88a68faSTom Lendacky { 8421f6f655eSChristoph Hellwig if (!x86_has_pat_wp()) 843f88a68faSTom Lendacky return NULL; 844f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); 845f88a68faSTom Lendacky } 846ce9084baSArd Biesheuvel #endif /* CONFIG_AMD_MEM_ENCRYPT */ 847f88a68faSTom Lendacky 84845c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; 849e64c8aa0SThomas Gleixner 850551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) 851e64c8aa0SThomas Gleixner { 85237cc8d7fSJeremy Fitzhardinge /* Don't assume we're using swapper_pg_dir at this point */ 8536c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa()); 85437cc8d7fSJeremy Fitzhardinge pgd_t *pgd = &base[pgd_index(addr)]; 855e0c4f675SKirill A. Shutemov p4d_t *p4d = p4d_offset(pgd, addr); 856e0c4f675SKirill A. Shutemov pud_t *pud = pud_offset(p4d, addr); 857551889a6SIan Campbell pmd_t *pmd = pmd_offset(pud, addr); 858551889a6SIan Campbell 859551889a6SIan Campbell return pmd; 860e64c8aa0SThomas Gleixner } 861e64c8aa0SThomas Gleixner 862551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr) 863e64c8aa0SThomas Gleixner { 864551889a6SIan Campbell return &bm_pte[pte_index(addr)]; 865e64c8aa0SThomas Gleixner } 866e64c8aa0SThomas Gleixner 867fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep) 868fef5ba79SJeremy Fitzhardinge { 869fef5ba79SJeremy Fitzhardinge return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; 870fef5ba79SJeremy Fitzhardinge } 871fef5ba79SJeremy Fitzhardinge 872e64c8aa0SThomas Gleixner void __init early_ioremap_init(void) 873e64c8aa0SThomas Gleixner { 874551889a6SIan Campbell pmd_t *pmd; 875e64c8aa0SThomas Gleixner 87673159fdcSAndy Lutomirski #ifdef CONFIG_X86_64 87773159fdcSAndy Lutomirski BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 87873159fdcSAndy Lutomirski #else 87973159fdcSAndy Lutomirski WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); 88073159fdcSAndy Lutomirski #endif 88173159fdcSAndy Lutomirski 8825b7c73e0SMark Salter early_ioremap_setup(); 8838827247fSWang Chen 884551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 885e64c8aa0SThomas Gleixner memset(bm_pte, 0, sizeof(bm_pte)); 886b6fbb669SIan Campbell pmd_populate_kernel(&init_mm, pmd, bm_pte); 887551889a6SIan Campbell 888e64c8aa0SThomas Gleixner /* 889551889a6SIan Campbell * The boot-ioremap range spans multiple pmds, for which 890e64c8aa0SThomas Gleixner * we are not prepared: 891e64c8aa0SThomas Gleixner */ 892499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE) 893499a5f1eSJan Beulich BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 894499a5f1eSJan Beulich != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 895499a5f1eSJan Beulich #undef __FIXADDR_TOP 896551889a6SIan Campbell if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { 897e64c8aa0SThomas Gleixner WARN_ON(1); 898551889a6SIan Campbell printk(KERN_WARNING "pmd %p != %p\n", 899551889a6SIan Campbell pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); 900e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 901e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_BEGIN)); 902e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 903e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_END)); 904e64c8aa0SThomas Gleixner 905e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 906e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 907e64c8aa0SThomas Gleixner FIX_BTMAP_BEGIN); 908e64c8aa0SThomas Gleixner } 909e64c8aa0SThomas Gleixner } 910e64c8aa0SThomas Gleixner 9115b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx, 9129b987aebSMasami Hiramatsu phys_addr_t phys, pgprot_t flags) 913e64c8aa0SThomas Gleixner { 914551889a6SIan Campbell unsigned long addr = __fix_to_virt(idx); 915551889a6SIan Campbell pte_t *pte; 916e64c8aa0SThomas Gleixner 917e64c8aa0SThomas Gleixner if (idx >= __end_of_fixed_addresses) { 918e64c8aa0SThomas Gleixner BUG(); 919e64c8aa0SThomas Gleixner return; 920e64c8aa0SThomas Gleixner } 921e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr); 9224583ed51SJeremy Fitzhardinge 923fb43d6cbSDave Hansen /* Sanitize 'prot' against any unsupported bits: */ 924510bb96fSThomas Gleixner pgprot_val(flags) &= __supported_pte_mask; 925fb43d6cbSDave Hansen 926e64c8aa0SThomas Gleixner if (pgprot_val(flags)) 927551889a6SIan Campbell set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 928e64c8aa0SThomas Gleixner else 9294f9c11ddSJeremy Fitzhardinge pte_clear(&init_mm, addr, pte); 93058430c5dSThomas Gleixner flush_tlb_one_kernel(addr); 931e64c8aa0SThomas Gleixner } 932