1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e64c8aa0SThomas Gleixner /*
3e64c8aa0SThomas Gleixner * Re-map IO memory to kernel address space so that we can access it.
4e64c8aa0SThomas Gleixner * This is needed for high PCI addresses that aren't mapped in the
5e64c8aa0SThomas Gleixner * 640k-1MB IO memory area on PC's
6e64c8aa0SThomas Gleixner *
7e64c8aa0SThomas Gleixner * (C) Copyright 1995 1996 Linus Torvalds
8e64c8aa0SThomas Gleixner */
9e64c8aa0SThomas Gleixner
1057c8a661SMike Rapoport #include <linux/memblock.h>
11e64c8aa0SThomas Gleixner #include <linux/init.h>
12e64c8aa0SThomas Gleixner #include <linux/io.h>
139de94dbbSIngo Molnar #include <linux/ioport.h>
14*50c6dbdfSMax Ramanouski #include <linux/ioremap.h>
15e64c8aa0SThomas Gleixner #include <linux/slab.h>
16e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
17d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
1832cb4d02STom Lendacky #include <linux/cc_platform.h>
198f716c9bSTom Lendacky #include <linux/efi.h>
2065fddcfcSMike Rapoport #include <linux/pgtable.h>
21b073d7f8SAlexander Potapenko #include <linux/kmsan.h>
22e64c8aa0SThomas Gleixner
23d1163651SLaura Abbott #include <asm/set_memory.h>
2466441bd3SIngo Molnar #include <asm/e820/api.h>
25e55f31a5SArd Biesheuvel #include <asm/efi.h>
26e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
27e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
28f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
29eb243d1dSIngo Molnar #include <asm/memtype.h>
308f716c9bSTom Lendacky #include <asm/setup.h>
31e64c8aa0SThomas Gleixner
3278c86e5eSJeremy Fitzhardinge #include "physaddr.h"
33e64c8aa0SThomas Gleixner
345da04cc8SLianbo Jiang /*
355da04cc8SLianbo Jiang * Descriptor controlling ioremap() behavior.
365da04cc8SLianbo Jiang */
375da04cc8SLianbo Jiang struct ioremap_desc {
385da04cc8SLianbo Jiang unsigned int flags;
390e4c12b4STom Lendacky };
400e4c12b4STom Lendacky
41e64c8aa0SThomas Gleixner /*
42e64c8aa0SThomas Gleixner * Fix up the linear direct mapping of the kernel to avoid cache attribute
43e64c8aa0SThomas Gleixner * conflicts.
44e64c8aa0SThomas Gleixner */
ioremap_change_attr(unsigned long vaddr,unsigned long size,enum page_cache_mode pcm)453a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
46b14097bdSJuergen Gross enum page_cache_mode pcm)
47e64c8aa0SThomas Gleixner {
48d806e5eeSThomas Gleixner unsigned long nrpages = size >> PAGE_SHIFT;
4993809be8SHarvey Harrison int err;
50e64c8aa0SThomas Gleixner
51b14097bdSJuergen Gross switch (pcm) {
52b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC:
53d806e5eeSThomas Gleixner default:
541219333dSvenkatesh.pallipadi@intel.com err = _set_memory_uc(vaddr, nrpages);
55d806e5eeSThomas Gleixner break;
56b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WC:
57b310f381Svenkatesh.pallipadi@intel.com err = _set_memory_wc(vaddr, nrpages);
58b310f381Svenkatesh.pallipadi@intel.com break;
59623dffb2SToshi Kani case _PAGE_CACHE_MODE_WT:
60623dffb2SToshi Kani err = _set_memory_wt(vaddr, nrpages);
61623dffb2SToshi Kani break;
62b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WB:
631219333dSvenkatesh.pallipadi@intel.com err = _set_memory_wb(vaddr, nrpages);
64d806e5eeSThomas Gleixner break;
65d806e5eeSThomas Gleixner }
66e64c8aa0SThomas Gleixner
67e64c8aa0SThomas Gleixner return err;
68e64c8aa0SThomas Gleixner }
69e64c8aa0SThomas Gleixner
705da04cc8SLianbo Jiang /* Does the range (or a subset of) contain normal RAM? */
__ioremap_check_ram(struct resource * res)715da04cc8SLianbo Jiang static unsigned int __ioremap_check_ram(struct resource *res)
72c81c8a1eSRoland Dreier {
730e4c12b4STom Lendacky unsigned long start_pfn, stop_pfn;
74c81c8a1eSRoland Dreier unsigned long i;
75c81c8a1eSRoland Dreier
760e4c12b4STom Lendacky if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
775da04cc8SLianbo Jiang return 0;
780e4c12b4STom Lendacky
790e4c12b4STom Lendacky start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
800e4c12b4STom Lendacky stop_pfn = (res->end + 1) >> PAGE_SHIFT;
810e4c12b4STom Lendacky if (stop_pfn > start_pfn) {
820e4c12b4STom Lendacky for (i = 0; i < (stop_pfn - start_pfn); ++i)
83c81c8a1eSRoland Dreier if (pfn_valid(start_pfn + i) &&
84c81c8a1eSRoland Dreier !PageReserved(pfn_to_page(start_pfn + i)))
855da04cc8SLianbo Jiang return IORES_MAP_SYSTEM_RAM;
860e4c12b4STom Lendacky }
87c81c8a1eSRoland Dreier
885da04cc8SLianbo Jiang return 0;
890e4c12b4STom Lendacky }
900e4c12b4STom Lendacky
915da04cc8SLianbo Jiang /*
925da04cc8SLianbo Jiang * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
935da04cc8SLianbo Jiang * there the whole memory is already encrypted.
945da04cc8SLianbo Jiang */
__ioremap_check_encrypted(struct resource * res)955da04cc8SLianbo Jiang static unsigned int __ioremap_check_encrypted(struct resource *res)
960e4c12b4STom Lendacky {
974d96f910STom Lendacky if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
985da04cc8SLianbo Jiang return 0;
995da04cc8SLianbo Jiang
1005da04cc8SLianbo Jiang switch (res->desc) {
1015da04cc8SLianbo Jiang case IORES_DESC_NONE:
1025da04cc8SLianbo Jiang case IORES_DESC_RESERVED:
1035da04cc8SLianbo Jiang break;
1045da04cc8SLianbo Jiang default:
1055da04cc8SLianbo Jiang return IORES_MAP_ENCRYPTED;
1060e4c12b4STom Lendacky }
1070e4c12b4STom Lendacky
1085da04cc8SLianbo Jiang return 0;
1095da04cc8SLianbo Jiang }
1105da04cc8SLianbo Jiang
111985e537aSTom Lendacky /*
112985e537aSTom Lendacky * The EFI runtime services data area is not covered by walk_mem_res(), but must
113985e537aSTom Lendacky * be mapped encrypted when SEV is active.
114985e537aSTom Lendacky */
__ioremap_check_other(resource_size_t addr,struct ioremap_desc * desc)115985e537aSTom Lendacky static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
116985e537aSTom Lendacky {
1174d96f910STom Lendacky if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
118985e537aSTom Lendacky return;
119985e537aSTom Lendacky
12088e378d4SMichael Kelley if (x86_platform.hyper.is_private_mmio(addr)) {
12188e378d4SMichael Kelley desc->flags |= IORES_MAP_ENCRYPTED;
12288e378d4SMichael Kelley return;
12388e378d4SMichael Kelley }
12488e378d4SMichael Kelley
125870b4333SBorislav Petkov if (!IS_ENABLED(CONFIG_EFI))
126870b4333SBorislav Petkov return;
127870b4333SBorislav Petkov
1288d651ee9STom Lendacky if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
1298d651ee9STom Lendacky (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
1308d651ee9STom Lendacky efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
131985e537aSTom Lendacky desc->flags |= IORES_MAP_ENCRYPTED;
132985e537aSTom Lendacky }
133985e537aSTom Lendacky
__ioremap_collect_map_flags(struct resource * res,void * arg)1345da04cc8SLianbo Jiang static int __ioremap_collect_map_flags(struct resource *res, void *arg)
1350e4c12b4STom Lendacky {
1365da04cc8SLianbo Jiang struct ioremap_desc *desc = arg;
1370e4c12b4STom Lendacky
1385da04cc8SLianbo Jiang if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
1395da04cc8SLianbo Jiang desc->flags |= __ioremap_check_ram(res);
1400e4c12b4STom Lendacky
1415da04cc8SLianbo Jiang if (!(desc->flags & IORES_MAP_ENCRYPTED))
1425da04cc8SLianbo Jiang desc->flags |= __ioremap_check_encrypted(res);
1430e4c12b4STom Lendacky
1445da04cc8SLianbo Jiang return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
1455da04cc8SLianbo Jiang (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
1460e4c12b4STom Lendacky }
1470e4c12b4STom Lendacky
1480e4c12b4STom Lendacky /*
1490e4c12b4STom Lendacky * To avoid multiple resource walks, this function walks resources marked as
1500e4c12b4STom Lendacky * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
1510e4c12b4STom Lendacky * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
152985e537aSTom Lendacky *
153985e537aSTom Lendacky * After that, deal with misc other ranges in __ioremap_check_other() which do
154985e537aSTom Lendacky * not fall into the above category.
1550e4c12b4STom Lendacky */
__ioremap_check_mem(resource_size_t addr,unsigned long size,struct ioremap_desc * desc)1560e4c12b4STom Lendacky static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
1575da04cc8SLianbo Jiang struct ioremap_desc *desc)
1580e4c12b4STom Lendacky {
1590e4c12b4STom Lendacky u64 start, end;
1600e4c12b4STom Lendacky
1610e4c12b4STom Lendacky start = (u64)addr;
1620e4c12b4STom Lendacky end = start + size - 1;
1635da04cc8SLianbo Jiang memset(desc, 0, sizeof(struct ioremap_desc));
1640e4c12b4STom Lendacky
1655da04cc8SLianbo Jiang walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
166985e537aSTom Lendacky
167985e537aSTom Lendacky __ioremap_check_other(addr, desc);
168c81c8a1eSRoland Dreier }
169c81c8a1eSRoland Dreier
170e64c8aa0SThomas Gleixner /*
171e64c8aa0SThomas Gleixner * Remap an arbitrary physical address space into the kernel virtual
1725d72b4fbSToshi Kani * address space. It transparently creates kernel huge I/O mapping when
1735d72b4fbSToshi Kani * the physical address is aligned by a huge page size (1GB or 2MB) and
1745d72b4fbSToshi Kani * the requested size is at least the huge page size.
1755d72b4fbSToshi Kani *
1765d72b4fbSToshi Kani * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
1775d72b4fbSToshi Kani * Therefore, the mapping code falls back to use a smaller page toward 4KB
1785d72b4fbSToshi Kani * when a mapping range is covered by non-WB type of MTRRs.
179e64c8aa0SThomas Gleixner *
180e64c8aa0SThomas Gleixner * NOTE! We need to allow non-page-aligned mappings too: we will obviously
181e64c8aa0SThomas Gleixner * have to convert them into an offset in a page-aligned mapping, but the
182e64c8aa0SThomas Gleixner * caller shouldn't need to know that small detail.
183e64c8aa0SThomas Gleixner */
1845da04cc8SLianbo Jiang static void __iomem *
__ioremap_caller(resource_size_t phys_addr,unsigned long size,enum page_cache_mode pcm,void * caller,bool encrypted)1855da04cc8SLianbo Jiang __ioremap_caller(resource_size_t phys_addr, unsigned long size,
1865da04cc8SLianbo Jiang enum page_cache_mode pcm, void *caller, bool encrypted)
187e64c8aa0SThomas Gleixner {
188ffa71f33SKenji Kaneshige unsigned long offset, vaddr;
1890e4c12b4STom Lendacky resource_size_t last_addr;
19087e547feSPekka Paalanen const resource_size_t unaligned_phys_addr = phys_addr;
19187e547feSPekka Paalanen const unsigned long unaligned_size = size;
1925da04cc8SLianbo Jiang struct ioremap_desc io_desc;
193e64c8aa0SThomas Gleixner struct vm_struct *area;
194b14097bdSJuergen Gross enum page_cache_mode new_pcm;
195d806e5eeSThomas Gleixner pgprot_t prot;
196dee7cbb2SVenki Pallipadi int retval;
197d61fc448SPekka Paalanen void __iomem *ret_addr;
198e64c8aa0SThomas Gleixner
199e64c8aa0SThomas Gleixner /* Don't allow wraparound or zero size */
200e64c8aa0SThomas Gleixner last_addr = phys_addr + size - 1;
201e64c8aa0SThomas Gleixner if (!size || last_addr < phys_addr)
202e64c8aa0SThomas Gleixner return NULL;
203e64c8aa0SThomas Gleixner
204e3100c82SThomas Gleixner if (!phys_addr_valid(phys_addr)) {
2056997ab49Svenkatesh.pallipadi@intel.com printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
2064c8337acSRandy Dunlap (unsigned long long)phys_addr);
207e3100c82SThomas Gleixner WARN_ON_ONCE(1);
208e3100c82SThomas Gleixner return NULL;
209e3100c82SThomas Gleixner }
210e3100c82SThomas Gleixner
2115da04cc8SLianbo Jiang __ioremap_check_mem(phys_addr, size, &io_desc);
2120e4c12b4STom Lendacky
213e64c8aa0SThomas Gleixner /*
214e64c8aa0SThomas Gleixner * Don't allow anybody to remap normal RAM that we're using..
215e64c8aa0SThomas Gleixner */
2165da04cc8SLianbo Jiang if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
2178a0a5da6SThomas Gleixner WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
2188a0a5da6SThomas Gleixner &phys_addr, &last_addr);
219e64c8aa0SThomas Gleixner return NULL;
220906e36c5SMike Travis }
2219a58eebeSToshi Kani
222d7677d40Svenkatesh.pallipadi@intel.com /*
223d7677d40Svenkatesh.pallipadi@intel.com * Mappings have to be page-aligned
224d7677d40Svenkatesh.pallipadi@intel.com */
225d7677d40Svenkatesh.pallipadi@intel.com offset = phys_addr & ~PAGE_MASK;
2264dbd6a3eSMichael Kelley phys_addr &= PAGE_MASK;
227d7677d40Svenkatesh.pallipadi@intel.com size = PAGE_ALIGN(last_addr+1) - phys_addr;
228d7677d40Svenkatesh.pallipadi@intel.com
2294dbd6a3eSMichael Kelley /*
2304dbd6a3eSMichael Kelley * Mask out any bits not part of the actual physical
2314dbd6a3eSMichael Kelley * address, like memory encryption bits.
2324dbd6a3eSMichael Kelley */
2334dbd6a3eSMichael Kelley phys_addr &= PHYSICAL_PAGE_MASK;
2344dbd6a3eSMichael Kelley
235ecdd6ee7SIngo Molnar retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
236e00c8cc9SJuergen Gross pcm, &new_pcm);
237dee7cbb2SVenki Pallipadi if (retval) {
238ecdd6ee7SIngo Molnar printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
239dee7cbb2SVenki Pallipadi return NULL;
240dee7cbb2SVenki Pallipadi }
241dee7cbb2SVenki Pallipadi
242b14097bdSJuergen Gross if (pcm != new_pcm) {
243b14097bdSJuergen Gross if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
244279e669bSVenkatesh Pallipadi printk(KERN_ERR
245b14097bdSJuergen Gross "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
2464c8337acSRandy Dunlap (unsigned long long)phys_addr,
2474c8337acSRandy Dunlap (unsigned long long)(phys_addr + size),
248b14097bdSJuergen Gross pcm, new_pcm);
249de2a47cfSXiaotian Feng goto err_free_memtype;
250d7677d40Svenkatesh.pallipadi@intel.com }
251b14097bdSJuergen Gross pcm = new_pcm;
252d7677d40Svenkatesh.pallipadi@intel.com }
253d7677d40Svenkatesh.pallipadi@intel.com
2540e4c12b4STom Lendacky /*
2550e4c12b4STom Lendacky * If the page being mapped is in memory and SEV is active then
2560e4c12b4STom Lendacky * make sure the memory encryption attribute is enabled in the
2570e4c12b4STom Lendacky * resulting mapping.
2589aa6ea69SKirill A. Shutemov * In TDX guests, memory is marked private by default. If encryption
2599aa6ea69SKirill A. Shutemov * is not requested (using encrypted), explicitly set decrypt
2609aa6ea69SKirill A. Shutemov * attribute in all IOREMAPPED memory.
2610e4c12b4STom Lendacky */
262be43d728SJeremy Fitzhardinge prot = PAGE_KERNEL_IO;
2635da04cc8SLianbo Jiang if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
2640e4c12b4STom Lendacky prot = pgprot_encrypted(prot);
2659aa6ea69SKirill A. Shutemov else
2669aa6ea69SKirill A. Shutemov prot = pgprot_decrypted(prot);
2670e4c12b4STom Lendacky
268b14097bdSJuergen Gross switch (pcm) {
269b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC:
270b14097bdSJuergen Gross default:
271b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) |
272b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_UC));
273b14097bdSJuergen Gross break;
274b14097bdSJuergen Gross case _PAGE_CACHE_MODE_UC_MINUS:
275b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) |
276b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
277b14097bdSJuergen Gross break;
278b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WC:
279b14097bdSJuergen Gross prot = __pgprot(pgprot_val(prot) |
280b14097bdSJuergen Gross cachemode2protval(_PAGE_CACHE_MODE_WC));
281b14097bdSJuergen Gross break;
282d838270eSToshi Kani case _PAGE_CACHE_MODE_WT:
283d838270eSToshi Kani prot = __pgprot(pgprot_val(prot) |
284d838270eSToshi Kani cachemode2protval(_PAGE_CACHE_MODE_WT));
285d838270eSToshi Kani break;
286b14097bdSJuergen Gross case _PAGE_CACHE_MODE_WB:
287d806e5eeSThomas Gleixner break;
288d806e5eeSThomas Gleixner }
289e64c8aa0SThomas Gleixner
290e64c8aa0SThomas Gleixner /*
291e64c8aa0SThomas Gleixner * Ok, go for it..
292e64c8aa0SThomas Gleixner */
29323016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, caller);
294e64c8aa0SThomas Gleixner if (!area)
295de2a47cfSXiaotian Feng goto err_free_memtype;
296e64c8aa0SThomas Gleixner area->phys_addr = phys_addr;
297e66aadbeSThomas Gleixner vaddr = (unsigned long) area->addr;
29843a432b1SSuresh Siddha
299ecdd6ee7SIngo Molnar if (memtype_kernel_map_sync(phys_addr, size, pcm))
300de2a47cfSXiaotian Feng goto err_free_area;
301e64c8aa0SThomas Gleixner
302de2a47cfSXiaotian Feng if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
303de2a47cfSXiaotian Feng goto err_free_area;
304e64c8aa0SThomas Gleixner
305d61fc448SPekka Paalanen ret_addr = (void __iomem *) (vaddr + offset);
30687e547feSPekka Paalanen mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
307d61fc448SPekka Paalanen
308c7a7b814STim Gardner /*
309c7a7b814STim Gardner * Check if the request spans more than any BAR in the iomem resource
310c7a7b814STim Gardner * tree.
311c7a7b814STim Gardner */
3129abb0ecdSLaura Abbott if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
3139abb0ecdSLaura Abbott pr_warn("caller %pS mapping multiple BARs\n", caller);
314c7a7b814STim Gardner
315d61fc448SPekka Paalanen return ret_addr;
316de2a47cfSXiaotian Feng err_free_area:
317de2a47cfSXiaotian Feng free_vm_area(area);
318de2a47cfSXiaotian Feng err_free_memtype:
319ecdd6ee7SIngo Molnar memtype_free(phys_addr, phys_addr + size);
320de2a47cfSXiaotian Feng return NULL;
321e64c8aa0SThomas Gleixner }
322e64c8aa0SThomas Gleixner
323e64c8aa0SThomas Gleixner /**
324c0d94aa5SChristoph Hellwig * ioremap - map bus memory into CPU space
3259efc31b8SWanpeng Li * @phys_addr: bus address of the memory
326e64c8aa0SThomas Gleixner * @size: size of the resource to map
327e64c8aa0SThomas Gleixner *
328c0d94aa5SChristoph Hellwig * ioremap performs a platform specific sequence of operations to
329e64c8aa0SThomas Gleixner * make bus memory CPU accessible via the readb/readw/readl/writeb/
330e64c8aa0SThomas Gleixner * writew/writel functions and the other mmio helpers. The returned
331e64c8aa0SThomas Gleixner * address is not guaranteed to be usable directly as a virtual
332e64c8aa0SThomas Gleixner * address.
333e64c8aa0SThomas Gleixner *
334e64c8aa0SThomas Gleixner * This version of ioremap ensures that the memory is marked uncachable
335e64c8aa0SThomas Gleixner * on the CPU as well as honouring existing caching rules from things like
336e64c8aa0SThomas Gleixner * the PCI bus. Note that there are other caches and buffers on many
337e64c8aa0SThomas Gleixner * busses. In particular driver authors should read up on PCI writes
338e64c8aa0SThomas Gleixner *
339e64c8aa0SThomas Gleixner * It's useful if some control registers are in such an area and
340e64c8aa0SThomas Gleixner * write combining or read caching is not desirable:
341e64c8aa0SThomas Gleixner *
342e64c8aa0SThomas Gleixner * Must be freed with iounmap.
343e64c8aa0SThomas Gleixner */
ioremap(resource_size_t phys_addr,unsigned long size)344c0d94aa5SChristoph Hellwig void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
345e64c8aa0SThomas Gleixner {
346de33c442SSuresh Siddha /*
347de33c442SSuresh Siddha * Ideally, this should be:
348cb32edf6SLuis R. Rodriguez * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
349de33c442SSuresh Siddha *
350de33c442SSuresh Siddha * Till we fix all X drivers to use ioremap_wc(), we will use
351e4b6be33SLuis R. Rodriguez * UC MINUS. Drivers that are certain they need or can already
352e4b6be33SLuis R. Rodriguez * be converted over to strong UC can use ioremap_uc().
353de33c442SSuresh Siddha */
354b14097bdSJuergen Gross enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
355de33c442SSuresh Siddha
356b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, pcm,
357c3a7a61cSLianbo Jiang __builtin_return_address(0), false);
358e64c8aa0SThomas Gleixner }
359c0d94aa5SChristoph Hellwig EXPORT_SYMBOL(ioremap);
360e64c8aa0SThomas Gleixner
361b310f381Svenkatesh.pallipadi@intel.com /**
362e4b6be33SLuis R. Rodriguez * ioremap_uc - map bus memory into CPU space as strongly uncachable
363e4b6be33SLuis R. Rodriguez * @phys_addr: bus address of the memory
364e4b6be33SLuis R. Rodriguez * @size: size of the resource to map
365e4b6be33SLuis R. Rodriguez *
366e4b6be33SLuis R. Rodriguez * ioremap_uc performs a platform specific sequence of operations to
367e4b6be33SLuis R. Rodriguez * make bus memory CPU accessible via the readb/readw/readl/writeb/
368e4b6be33SLuis R. Rodriguez * writew/writel functions and the other mmio helpers. The returned
369e4b6be33SLuis R. Rodriguez * address is not guaranteed to be usable directly as a virtual
370e4b6be33SLuis R. Rodriguez * address.
371e4b6be33SLuis R. Rodriguez *
372e4b6be33SLuis R. Rodriguez * This version of ioremap ensures that the memory is marked with a strong
373e4b6be33SLuis R. Rodriguez * preference as completely uncachable on the CPU when possible. For non-PAT
374e4b6be33SLuis R. Rodriguez * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
375e4b6be33SLuis R. Rodriguez * systems this will set the PAT entry for the pages as strong UC. This call
376e4b6be33SLuis R. Rodriguez * will honor existing caching rules from things like the PCI bus. Note that
377e4b6be33SLuis R. Rodriguez * there are other caches and buffers on many busses. In particular driver
378e4b6be33SLuis R. Rodriguez * authors should read up on PCI writes.
379e4b6be33SLuis R. Rodriguez *
380e4b6be33SLuis R. Rodriguez * It's useful if some control registers are in such an area and
381e4b6be33SLuis R. Rodriguez * write combining or read caching is not desirable:
382e4b6be33SLuis R. Rodriguez *
383e4b6be33SLuis R. Rodriguez * Must be freed with iounmap.
384e4b6be33SLuis R. Rodriguez */
ioremap_uc(resource_size_t phys_addr,unsigned long size)385e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
386e4b6be33SLuis R. Rodriguez {
387e4b6be33SLuis R. Rodriguez enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
388e4b6be33SLuis R. Rodriguez
389e4b6be33SLuis R. Rodriguez return __ioremap_caller(phys_addr, size, pcm,
390c3a7a61cSLianbo Jiang __builtin_return_address(0), false);
391e4b6be33SLuis R. Rodriguez }
392e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
393e4b6be33SLuis R. Rodriguez
394e4b6be33SLuis R. Rodriguez /**
395b310f381Svenkatesh.pallipadi@intel.com * ioremap_wc - map memory into CPU space write combined
3969efc31b8SWanpeng Li * @phys_addr: bus address of the memory
397b310f381Svenkatesh.pallipadi@intel.com * @size: size of the resource to map
398b310f381Svenkatesh.pallipadi@intel.com *
399b310f381Svenkatesh.pallipadi@intel.com * This version of ioremap ensures that the memory is marked write combining.
400b310f381Svenkatesh.pallipadi@intel.com * Write combining allows faster writes to some hardware devices.
401b310f381Svenkatesh.pallipadi@intel.com *
402b310f381Svenkatesh.pallipadi@intel.com * Must be freed with iounmap.
403b310f381Svenkatesh.pallipadi@intel.com */
ioremap_wc(resource_size_t phys_addr,unsigned long size)404d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
405b310f381Svenkatesh.pallipadi@intel.com {
406b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
407c3a7a61cSLianbo Jiang __builtin_return_address(0), false);
408b310f381Svenkatesh.pallipadi@intel.com }
409b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
410b310f381Svenkatesh.pallipadi@intel.com
411d838270eSToshi Kani /**
412d838270eSToshi Kani * ioremap_wt - map memory into CPU space write through
413d838270eSToshi Kani * @phys_addr: bus address of the memory
414d838270eSToshi Kani * @size: size of the resource to map
415d838270eSToshi Kani *
416d838270eSToshi Kani * This version of ioremap ensures that the memory is marked write through.
417d838270eSToshi Kani * Write through stores data into memory while keeping the cache up-to-date.
418d838270eSToshi Kani *
419d838270eSToshi Kani * Must be freed with iounmap.
420d838270eSToshi Kani */
ioremap_wt(resource_size_t phys_addr,unsigned long size)421d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
422d838270eSToshi Kani {
423d838270eSToshi Kani return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
424c3a7a61cSLianbo Jiang __builtin_return_address(0), false);
425d838270eSToshi Kani }
426d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt);
427d838270eSToshi Kani
ioremap_encrypted(resource_size_t phys_addr,unsigned long size)428c3a7a61cSLianbo Jiang void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
429c3a7a61cSLianbo Jiang {
430c3a7a61cSLianbo Jiang return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
431c3a7a61cSLianbo Jiang __builtin_return_address(0), true);
432c3a7a61cSLianbo Jiang }
433c3a7a61cSLianbo Jiang EXPORT_SYMBOL(ioremap_encrypted);
434c3a7a61cSLianbo Jiang
ioremap_cache(resource_size_t phys_addr,unsigned long size)435b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
4365f868152SThomas Gleixner {
437b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
438c3a7a61cSLianbo Jiang __builtin_return_address(0), false);
4395f868152SThomas Gleixner }
4405f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
4415f868152SThomas Gleixner
ioremap_prot(resource_size_t phys_addr,unsigned long size,unsigned long prot_val)44228b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
44328b2ee20SRik van Riel unsigned long prot_val)
44428b2ee20SRik van Riel {
445b14097bdSJuergen Gross return __ioremap_caller(phys_addr, size,
446b14097bdSJuergen Gross pgprot2cachemode(__pgprot(prot_val)),
447c3a7a61cSLianbo Jiang __builtin_return_address(0), false);
44828b2ee20SRik van Riel }
44928b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
45028b2ee20SRik van Riel
451e64c8aa0SThomas Gleixner /**
452e64c8aa0SThomas Gleixner * iounmap - Free a IO remapping
453e64c8aa0SThomas Gleixner * @addr: virtual address from ioremap_*
454e64c8aa0SThomas Gleixner *
455e64c8aa0SThomas Gleixner * Caller must ensure there is only one unmapping for the same pointer.
456e64c8aa0SThomas Gleixner */
iounmap(volatile void __iomem * addr)457e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
458e64c8aa0SThomas Gleixner {
459e64c8aa0SThomas Gleixner struct vm_struct *p, *o;
460e64c8aa0SThomas Gleixner
461*50c6dbdfSMax Ramanouski if (WARN_ON_ONCE(!is_ioremap_addr((void __force *)addr)))
462e64c8aa0SThomas Gleixner return;
463e64c8aa0SThomas Gleixner
464e64c8aa0SThomas Gleixner /*
46533c2b803STom Lendacky * The PCI/ISA range special-casing was removed from __ioremap()
46633c2b803STom Lendacky * so this check, in theory, can be removed. However, there are
46733c2b803STom Lendacky * cases where iounmap() is called for addresses not obtained via
46833c2b803STom Lendacky * ioremap() (vga16fb for example). Add a warning so that these
46933c2b803STom Lendacky * cases can be caught and fixed.
470e64c8aa0SThomas Gleixner */
4716e92a5a6SThomas Gleixner if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
47233c2b803STom Lendacky (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
47333c2b803STom Lendacky WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
474e64c8aa0SThomas Gleixner return;
47533c2b803STom Lendacky }
476e64c8aa0SThomas Gleixner
4776d60ce38SKarol Herbst mmiotrace_iounmap(addr);
4786d60ce38SKarol Herbst
479e64c8aa0SThomas Gleixner addr = (volatile void __iomem *)
480e64c8aa0SThomas Gleixner (PAGE_MASK & (unsigned long __force)addr);
481e64c8aa0SThomas Gleixner
482e64c8aa0SThomas Gleixner /* Use the vm area unlocked, assuming the caller
483e64c8aa0SThomas Gleixner ensures there isn't another iounmap for the same address
484e64c8aa0SThomas Gleixner in parallel. Reuse of the virtual address is prevented by
485e64c8aa0SThomas Gleixner leaving it in the global lists until we're done with it.
486e64c8aa0SThomas Gleixner cpa takes care of the direct mappings. */
487ef932473SJoonsoo Kim p = find_vm_area((void __force *)addr);
488e64c8aa0SThomas Gleixner
489e64c8aa0SThomas Gleixner if (!p) {
490e64c8aa0SThomas Gleixner printk(KERN_ERR "iounmap: bad address %p\n", addr);
491e64c8aa0SThomas Gleixner dump_stack();
492e64c8aa0SThomas Gleixner return;
493e64c8aa0SThomas Gleixner }
494e64c8aa0SThomas Gleixner
495b073d7f8SAlexander Potapenko kmsan_iounmap_page_range((unsigned long)addr,
496b073d7f8SAlexander Potapenko (unsigned long)addr + get_vm_area_size(p));
497ecdd6ee7SIngo Molnar memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
498d7677d40Svenkatesh.pallipadi@intel.com
499e64c8aa0SThomas Gleixner /* Finally remove it */
5006e92a5a6SThomas Gleixner o = remove_vm_area((void __force *)addr);
501e64c8aa0SThomas Gleixner BUG_ON(p != o || o == NULL);
502e64c8aa0SThomas Gleixner kfree(p);
503e64c8aa0SThomas Gleixner }
504e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
505e64c8aa0SThomas Gleixner
506e045fb2aSvenkatesh.pallipadi@intel.com /*
507e045fb2aSvenkatesh.pallipadi@intel.com * Convert a physical pointer to a virtual kernel pointer for /dev/mem
508e045fb2aSvenkatesh.pallipadi@intel.com * access
509e045fb2aSvenkatesh.pallipadi@intel.com */
xlate_dev_mem_ptr(phys_addr_t phys)5104707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
511e045fb2aSvenkatesh.pallipadi@intel.com {
512e045fb2aSvenkatesh.pallipadi@intel.com unsigned long start = phys & PAGE_MASK;
51394d4b476SIngo Molnar unsigned long offset = phys & ~PAGE_MASK;
514562bfca4SIngo Molnar void *vaddr;
515e045fb2aSvenkatesh.pallipadi@intel.com
5168458bf94STom Lendacky /* memremap() maps if RAM, otherwise falls back to ioremap() */
5178458bf94STom Lendacky vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
518e045fb2aSvenkatesh.pallipadi@intel.com
5198458bf94STom Lendacky /* Only add the offset on success and return NULL if memremap() failed */
52094d4b476SIngo Molnar if (vaddr)
52194d4b476SIngo Molnar vaddr += offset;
522e045fb2aSvenkatesh.pallipadi@intel.com
523562bfca4SIngo Molnar return vaddr;
524e045fb2aSvenkatesh.pallipadi@intel.com }
525e045fb2aSvenkatesh.pallipadi@intel.com
unxlate_dev_mem_ptr(phys_addr_t phys,void * addr)5264707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
527e045fb2aSvenkatesh.pallipadi@intel.com {
5288458bf94STom Lendacky memunmap((void *)((unsigned long)addr & PAGE_MASK));
529e045fb2aSvenkatesh.pallipadi@intel.com }
530e045fb2aSvenkatesh.pallipadi@intel.com
531402fe0cbSTom Lendacky #ifdef CONFIG_AMD_MEM_ENCRYPT
5328f716c9bSTom Lendacky /*
5338f716c9bSTom Lendacky * Examine the physical address to determine if it is an area of memory
5348f716c9bSTom Lendacky * that should be mapped decrypted. If the memory is not part of the
5358f716c9bSTom Lendacky * kernel usable area it was accessed and created decrypted, so these
5361de32862STom Lendacky * areas should be mapped decrypted. And since the encryption key can
5371de32862STom Lendacky * change across reboots, persistent memory should also be mapped
5381de32862STom Lendacky * decrypted.
539072f58c6STom Lendacky *
540072f58c6STom Lendacky * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
541072f58c6STom Lendacky * only persistent memory should be mapped decrypted.
5428f716c9bSTom Lendacky */
memremap_should_map_decrypted(resource_size_t phys_addr,unsigned long size)5438f716c9bSTom Lendacky static bool memremap_should_map_decrypted(resource_size_t phys_addr,
5448f716c9bSTom Lendacky unsigned long size)
5458f716c9bSTom Lendacky {
5461de32862STom Lendacky int is_pmem;
5471de32862STom Lendacky
5481de32862STom Lendacky /*
5491de32862STom Lendacky * Check if the address is part of a persistent memory region.
5501de32862STom Lendacky * This check covers areas added by E820, EFI and ACPI.
5511de32862STom Lendacky */
5521de32862STom Lendacky is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
5531de32862STom Lendacky IORES_DESC_PERSISTENT_MEMORY);
5541de32862STom Lendacky if (is_pmem != REGION_DISJOINT)
5551de32862STom Lendacky return true;
5561de32862STom Lendacky
5571de32862STom Lendacky /*
5581de32862STom Lendacky * Check if the non-volatile attribute is set for an EFI
5591de32862STom Lendacky * reserved area.
5601de32862STom Lendacky */
5611de32862STom Lendacky if (efi_enabled(EFI_BOOT)) {
5621de32862STom Lendacky switch (efi_mem_type(phys_addr)) {
5631de32862STom Lendacky case EFI_RESERVED_TYPE:
5641de32862STom Lendacky if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
5651de32862STom Lendacky return true;
5661de32862STom Lendacky break;
5671de32862STom Lendacky default:
5681de32862STom Lendacky break;
5691de32862STom Lendacky }
5701de32862STom Lendacky }
5711de32862STom Lendacky
5728f716c9bSTom Lendacky /* Check if the address is outside kernel usable area */
5738f716c9bSTom Lendacky switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
5748f716c9bSTom Lendacky case E820_TYPE_RESERVED:
5758f716c9bSTom Lendacky case E820_TYPE_ACPI:
5768f716c9bSTom Lendacky case E820_TYPE_NVS:
5778f716c9bSTom Lendacky case E820_TYPE_UNUSABLE:
578072f58c6STom Lendacky /* For SEV, these areas are encrypted */
5794d96f910STom Lendacky if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
580072f58c6STom Lendacky break;
581df561f66SGustavo A. R. Silva fallthrough;
582072f58c6STom Lendacky
5831de32862STom Lendacky case E820_TYPE_PRAM:
5848f716c9bSTom Lendacky return true;
5858f716c9bSTom Lendacky default:
5868f716c9bSTom Lendacky break;
5878f716c9bSTom Lendacky }
5888f716c9bSTom Lendacky
5898f716c9bSTom Lendacky return false;
5908f716c9bSTom Lendacky }
5918f716c9bSTom Lendacky
5928f716c9bSTom Lendacky /*
5938f716c9bSTom Lendacky * Examine the physical address to determine if it is EFI data. Check
5948f716c9bSTom Lendacky * it against the boot params structure and EFI tables and memory types.
5958f716c9bSTom Lendacky */
memremap_is_efi_data(resource_size_t phys_addr,unsigned long size)5968f716c9bSTom Lendacky static bool memremap_is_efi_data(resource_size_t phys_addr,
5978f716c9bSTom Lendacky unsigned long size)
5988f716c9bSTom Lendacky {
5998f716c9bSTom Lendacky u64 paddr;
6008f716c9bSTom Lendacky
6018f716c9bSTom Lendacky /* Check if the address is part of EFI boot/runtime data */
6028f716c9bSTom Lendacky if (!efi_enabled(EFI_BOOT))
6038f716c9bSTom Lendacky return false;
6048f716c9bSTom Lendacky
6058f716c9bSTom Lendacky paddr = boot_params.efi_info.efi_memmap_hi;
6068f716c9bSTom Lendacky paddr <<= 32;
6078f716c9bSTom Lendacky paddr |= boot_params.efi_info.efi_memmap;
6088f716c9bSTom Lendacky if (phys_addr == paddr)
6098f716c9bSTom Lendacky return true;
6108f716c9bSTom Lendacky
6118f716c9bSTom Lendacky paddr = boot_params.efi_info.efi_systab_hi;
6128f716c9bSTom Lendacky paddr <<= 32;
6138f716c9bSTom Lendacky paddr |= boot_params.efi_info.efi_systab;
6148f716c9bSTom Lendacky if (phys_addr == paddr)
6158f716c9bSTom Lendacky return true;
6168f716c9bSTom Lendacky
6178f716c9bSTom Lendacky if (efi_is_table_address(phys_addr))
6188f716c9bSTom Lendacky return true;
6198f716c9bSTom Lendacky
6208f716c9bSTom Lendacky switch (efi_mem_type(phys_addr)) {
6218f716c9bSTom Lendacky case EFI_BOOT_SERVICES_DATA:
6228f716c9bSTom Lendacky case EFI_RUNTIME_SERVICES_DATA:
6238f716c9bSTom Lendacky return true;
6248f716c9bSTom Lendacky default:
6258f716c9bSTom Lendacky break;
6268f716c9bSTom Lendacky }
6278f716c9bSTom Lendacky
6288f716c9bSTom Lendacky return false;
6298f716c9bSTom Lendacky }
6308f716c9bSTom Lendacky
6318f716c9bSTom Lendacky /*
6328f716c9bSTom Lendacky * Examine the physical address to determine if it is boot data by checking
6338f716c9bSTom Lendacky * it against the boot params setup_data chain.
6348f716c9bSTom Lendacky */
memremap_is_setup_data(resource_size_t phys_addr,unsigned long size)6358f716c9bSTom Lendacky static bool memremap_is_setup_data(resource_size_t phys_addr,
6368f716c9bSTom Lendacky unsigned long size)
6378f716c9bSTom Lendacky {
6387228918bSRoss Philipson struct setup_indirect *indirect;
6398f716c9bSTom Lendacky struct setup_data *data;
6408f716c9bSTom Lendacky u64 paddr, paddr_next;
6418f716c9bSTom Lendacky
6428f716c9bSTom Lendacky paddr = boot_params.hdr.setup_data;
6438f716c9bSTom Lendacky while (paddr) {
6448f716c9bSTom Lendacky unsigned int len;
6458f716c9bSTom Lendacky
6468f716c9bSTom Lendacky if (phys_addr == paddr)
6478f716c9bSTom Lendacky return true;
6488f716c9bSTom Lendacky
6498f716c9bSTom Lendacky data = memremap(paddr, sizeof(*data),
6508f716c9bSTom Lendacky MEMREMAP_WB | MEMREMAP_DEC);
6517228918bSRoss Philipson if (!data) {
6527228918bSRoss Philipson pr_warn("failed to memremap setup_data entry\n");
6537228918bSRoss Philipson return false;
6547228918bSRoss Philipson }
6558f716c9bSTom Lendacky
6568f716c9bSTom Lendacky paddr_next = data->next;
6578f716c9bSTom Lendacky len = data->len;
6588f716c9bSTom Lendacky
659b3c72fc9SDaniel Kiper if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
660b3c72fc9SDaniel Kiper memunmap(data);
661b3c72fc9SDaniel Kiper return true;
662b3c72fc9SDaniel Kiper }
663b3c72fc9SDaniel Kiper
6647228918bSRoss Philipson if (data->type == SETUP_INDIRECT) {
6657228918bSRoss Philipson memunmap(data);
6667228918bSRoss Philipson data = memremap(paddr, sizeof(*data) + len,
6677228918bSRoss Philipson MEMREMAP_WB | MEMREMAP_DEC);
6687228918bSRoss Philipson if (!data) {
6697228918bSRoss Philipson pr_warn("failed to memremap indirect setup_data\n");
6707228918bSRoss Philipson return false;
6717228918bSRoss Philipson }
6727228918bSRoss Philipson
6737228918bSRoss Philipson indirect = (struct setup_indirect *)data->data;
6747228918bSRoss Philipson
6757228918bSRoss Philipson if (indirect->type != SETUP_INDIRECT) {
6767228918bSRoss Philipson paddr = indirect->addr;
6777228918bSRoss Philipson len = indirect->len;
6787228918bSRoss Philipson }
679b3c72fc9SDaniel Kiper }
680b3c72fc9SDaniel Kiper
6818f716c9bSTom Lendacky memunmap(data);
6828f716c9bSTom Lendacky
6838f716c9bSTom Lendacky if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
6848f716c9bSTom Lendacky return true;
6858f716c9bSTom Lendacky
6868f716c9bSTom Lendacky paddr = paddr_next;
6878f716c9bSTom Lendacky }
6888f716c9bSTom Lendacky
6898f716c9bSTom Lendacky return false;
6908f716c9bSTom Lendacky }
6918f716c9bSTom Lendacky
6928f716c9bSTom Lendacky /*
6938f716c9bSTom Lendacky * Examine the physical address to determine if it is boot data by checking
6948f716c9bSTom Lendacky * it against the boot params setup_data chain (early boot version).
6958f716c9bSTom Lendacky */
early_memremap_is_setup_data(resource_size_t phys_addr,unsigned long size)6968f716c9bSTom Lendacky static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
6978f716c9bSTom Lendacky unsigned long size)
6988f716c9bSTom Lendacky {
699445c1470SRoss Philipson struct setup_indirect *indirect;
7008f716c9bSTom Lendacky struct setup_data *data;
7018f716c9bSTom Lendacky u64 paddr, paddr_next;
7028f716c9bSTom Lendacky
7038f716c9bSTom Lendacky paddr = boot_params.hdr.setup_data;
7048f716c9bSTom Lendacky while (paddr) {
705445c1470SRoss Philipson unsigned int len, size;
7068f716c9bSTom Lendacky
7078f716c9bSTom Lendacky if (phys_addr == paddr)
7088f716c9bSTom Lendacky return true;
7098f716c9bSTom Lendacky
7108f716c9bSTom Lendacky data = early_memremap_decrypted(paddr, sizeof(*data));
711445c1470SRoss Philipson if (!data) {
712445c1470SRoss Philipson pr_warn("failed to early memremap setup_data entry\n");
713445c1470SRoss Philipson return false;
714445c1470SRoss Philipson }
715445c1470SRoss Philipson
716445c1470SRoss Philipson size = sizeof(*data);
7178f716c9bSTom Lendacky
7188f716c9bSTom Lendacky paddr_next = data->next;
7198f716c9bSTom Lendacky len = data->len;
7208f716c9bSTom Lendacky
721445c1470SRoss Philipson if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
7228f716c9bSTom Lendacky early_memunmap(data, sizeof(*data));
723445c1470SRoss Philipson return true;
724445c1470SRoss Philipson }
725445c1470SRoss Philipson
726445c1470SRoss Philipson if (data->type == SETUP_INDIRECT) {
727445c1470SRoss Philipson size += len;
728445c1470SRoss Philipson early_memunmap(data, sizeof(*data));
729445c1470SRoss Philipson data = early_memremap_decrypted(paddr, size);
730445c1470SRoss Philipson if (!data) {
731445c1470SRoss Philipson pr_warn("failed to early memremap indirect setup_data\n");
732445c1470SRoss Philipson return false;
733445c1470SRoss Philipson }
734445c1470SRoss Philipson
735445c1470SRoss Philipson indirect = (struct setup_indirect *)data->data;
736445c1470SRoss Philipson
737445c1470SRoss Philipson if (indirect->type != SETUP_INDIRECT) {
738445c1470SRoss Philipson paddr = indirect->addr;
739445c1470SRoss Philipson len = indirect->len;
740445c1470SRoss Philipson }
741445c1470SRoss Philipson }
742445c1470SRoss Philipson
743445c1470SRoss Philipson early_memunmap(data, size);
7448f716c9bSTom Lendacky
7458f716c9bSTom Lendacky if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
7468f716c9bSTom Lendacky return true;
7478f716c9bSTom Lendacky
7488f716c9bSTom Lendacky paddr = paddr_next;
7498f716c9bSTom Lendacky }
7508f716c9bSTom Lendacky
7518f716c9bSTom Lendacky return false;
7528f716c9bSTom Lendacky }
7538f716c9bSTom Lendacky
7548f716c9bSTom Lendacky /*
7558f716c9bSTom Lendacky * Architecture function to determine if RAM remap is allowed. By default, a
7568f716c9bSTom Lendacky * RAM remap will map the data as encrypted. Determine if a RAM remap should
7578f716c9bSTom Lendacky * not be done so that the data will be mapped decrypted.
7588f716c9bSTom Lendacky */
arch_memremap_can_ram_remap(resource_size_t phys_addr,unsigned long size,unsigned long flags)7598f716c9bSTom Lendacky bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
7608f716c9bSTom Lendacky unsigned long flags)
7618f716c9bSTom Lendacky {
762e9d1d2bbSTom Lendacky if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
7638f716c9bSTom Lendacky return true;
7648f716c9bSTom Lendacky
7658f716c9bSTom Lendacky if (flags & MEMREMAP_ENC)
7668f716c9bSTom Lendacky return true;
7678f716c9bSTom Lendacky
7688f716c9bSTom Lendacky if (flags & MEMREMAP_DEC)
7698f716c9bSTom Lendacky return false;
7708f716c9bSTom Lendacky
77132cb4d02STom Lendacky if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
7728f716c9bSTom Lendacky if (memremap_is_setup_data(phys_addr, size) ||
773072f58c6STom Lendacky memremap_is_efi_data(phys_addr, size))
7748f716c9bSTom Lendacky return false;
775072f58c6STom Lendacky }
7768f716c9bSTom Lendacky
777072f58c6STom Lendacky return !memremap_should_map_decrypted(phys_addr, size);
7788f716c9bSTom Lendacky }
7798f716c9bSTom Lendacky
7808f716c9bSTom Lendacky /*
7818f716c9bSTom Lendacky * Architecture override of __weak function to adjust the protection attributes
7828f716c9bSTom Lendacky * used when remapping memory. By default, early_memremap() will map the data
7838f716c9bSTom Lendacky * as encrypted. Determine if an encrypted mapping should not be done and set
7848f716c9bSTom Lendacky * the appropriate protection attributes.
7858f716c9bSTom Lendacky */
early_memremap_pgprot_adjust(resource_size_t phys_addr,unsigned long size,pgprot_t prot)7868f716c9bSTom Lendacky pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
7878f716c9bSTom Lendacky unsigned long size,
7888f716c9bSTom Lendacky pgprot_t prot)
7898f716c9bSTom Lendacky {
790072f58c6STom Lendacky bool encrypted_prot;
791072f58c6STom Lendacky
792e9d1d2bbSTom Lendacky if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
7938f716c9bSTom Lendacky return prot;
7948f716c9bSTom Lendacky
795072f58c6STom Lendacky encrypted_prot = true;
796072f58c6STom Lendacky
79732cb4d02STom Lendacky if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
7988f716c9bSTom Lendacky if (early_memremap_is_setup_data(phys_addr, size) ||
799072f58c6STom Lendacky memremap_is_efi_data(phys_addr, size))
800072f58c6STom Lendacky encrypted_prot = false;
801072f58c6STom Lendacky }
8028f716c9bSTom Lendacky
803072f58c6STom Lendacky if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
804072f58c6STom Lendacky encrypted_prot = false;
805072f58c6STom Lendacky
806072f58c6STom Lendacky return encrypted_prot ? pgprot_encrypted(prot)
807072f58c6STom Lendacky : pgprot_decrypted(prot);
8088f716c9bSTom Lendacky }
8098f716c9bSTom Lendacky
phys_mem_access_encrypted(unsigned long phys_addr,unsigned long size)8108458bf94STom Lendacky bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
8118458bf94STom Lendacky {
8128458bf94STom Lendacky return arch_memremap_can_ram_remap(phys_addr, size, 0);
8138458bf94STom Lendacky }
8148458bf94STom Lendacky
815f88a68faSTom Lendacky /* Remap memory with encryption */
early_memremap_encrypted(resource_size_t phys_addr,unsigned long size)816f88a68faSTom Lendacky void __init *early_memremap_encrypted(resource_size_t phys_addr,
817f88a68faSTom Lendacky unsigned long size)
818f88a68faSTom Lendacky {
819f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
820f88a68faSTom Lendacky }
821f88a68faSTom Lendacky
822f88a68faSTom Lendacky /*
823f88a68faSTom Lendacky * Remap memory with encryption and write-protected - cannot be called
824f88a68faSTom Lendacky * before pat_init() is called
825f88a68faSTom Lendacky */
early_memremap_encrypted_wp(resource_size_t phys_addr,unsigned long size)826f88a68faSTom Lendacky void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
827f88a68faSTom Lendacky unsigned long size)
828f88a68faSTom Lendacky {
8291f6f655eSChristoph Hellwig if (!x86_has_pat_wp())
830f88a68faSTom Lendacky return NULL;
831f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
832f88a68faSTom Lendacky }
833f88a68faSTom Lendacky
834f88a68faSTom Lendacky /* Remap memory without encryption */
early_memremap_decrypted(resource_size_t phys_addr,unsigned long size)835f88a68faSTom Lendacky void __init *early_memremap_decrypted(resource_size_t phys_addr,
836f88a68faSTom Lendacky unsigned long size)
837f88a68faSTom Lendacky {
838f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
839f88a68faSTom Lendacky }
840f88a68faSTom Lendacky
841f88a68faSTom Lendacky /*
842f88a68faSTom Lendacky * Remap memory without encryption and write-protected - cannot be called
843f88a68faSTom Lendacky * before pat_init() is called
844f88a68faSTom Lendacky */
early_memremap_decrypted_wp(resource_size_t phys_addr,unsigned long size)845f88a68faSTom Lendacky void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
846f88a68faSTom Lendacky unsigned long size)
847f88a68faSTom Lendacky {
8481f6f655eSChristoph Hellwig if (!x86_has_pat_wp())
849f88a68faSTom Lendacky return NULL;
850f88a68faSTom Lendacky return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
851f88a68faSTom Lendacky }
852ce9084baSArd Biesheuvel #endif /* CONFIG_AMD_MEM_ENCRYPT */
853f88a68faSTom Lendacky
85445c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
855e64c8aa0SThomas Gleixner
early_ioremap_pmd(unsigned long addr)856551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
857e64c8aa0SThomas Gleixner {
85837cc8d7fSJeremy Fitzhardinge /* Don't assume we're using swapper_pg_dir at this point */
8596c690ee1SAndy Lutomirski pgd_t *base = __va(read_cr3_pa());
86037cc8d7fSJeremy Fitzhardinge pgd_t *pgd = &base[pgd_index(addr)];
861e0c4f675SKirill A. Shutemov p4d_t *p4d = p4d_offset(pgd, addr);
862e0c4f675SKirill A. Shutemov pud_t *pud = pud_offset(p4d, addr);
863551889a6SIan Campbell pmd_t *pmd = pmd_offset(pud, addr);
864551889a6SIan Campbell
865551889a6SIan Campbell return pmd;
866e64c8aa0SThomas Gleixner }
867e64c8aa0SThomas Gleixner
early_ioremap_pte(unsigned long addr)868551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
869e64c8aa0SThomas Gleixner {
870551889a6SIan Campbell return &bm_pte[pte_index(addr)];
871e64c8aa0SThomas Gleixner }
872e64c8aa0SThomas Gleixner
is_early_ioremap_ptep(pte_t * ptep)873fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
874fef5ba79SJeremy Fitzhardinge {
875fef5ba79SJeremy Fitzhardinge return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
876fef5ba79SJeremy Fitzhardinge }
877fef5ba79SJeremy Fitzhardinge
early_ioremap_init(void)878e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
879e64c8aa0SThomas Gleixner {
880551889a6SIan Campbell pmd_t *pmd;
881e64c8aa0SThomas Gleixner
88273159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
88373159fdcSAndy Lutomirski BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
88473159fdcSAndy Lutomirski #else
88573159fdcSAndy Lutomirski WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
88673159fdcSAndy Lutomirski #endif
88773159fdcSAndy Lutomirski
8885b7c73e0SMark Salter early_ioremap_setup();
8898827247fSWang Chen
890551889a6SIan Campbell pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
891e64c8aa0SThomas Gleixner memset(bm_pte, 0, sizeof(bm_pte));
892b6fbb669SIan Campbell pmd_populate_kernel(&init_mm, pmd, bm_pte);
893551889a6SIan Campbell
894e64c8aa0SThomas Gleixner /*
895551889a6SIan Campbell * The boot-ioremap range spans multiple pmds, for which
896e64c8aa0SThomas Gleixner * we are not prepared:
897e64c8aa0SThomas Gleixner */
898499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
899499a5f1eSJan Beulich BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
900499a5f1eSJan Beulich != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
901499a5f1eSJan Beulich #undef __FIXADDR_TOP
902551889a6SIan Campbell if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
903e64c8aa0SThomas Gleixner WARN_ON(1);
904551889a6SIan Campbell printk(KERN_WARNING "pmd %p != %p\n",
905551889a6SIan Campbell pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
906e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
907e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_BEGIN));
908e64c8aa0SThomas Gleixner printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
909e64c8aa0SThomas Gleixner fix_to_virt(FIX_BTMAP_END));
910e64c8aa0SThomas Gleixner
911e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
912e64c8aa0SThomas Gleixner printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
913e64c8aa0SThomas Gleixner FIX_BTMAP_BEGIN);
914e64c8aa0SThomas Gleixner }
915e64c8aa0SThomas Gleixner }
916e64c8aa0SThomas Gleixner
__early_set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)9175b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
9189b987aebSMasami Hiramatsu phys_addr_t phys, pgprot_t flags)
919e64c8aa0SThomas Gleixner {
920551889a6SIan Campbell unsigned long addr = __fix_to_virt(idx);
921551889a6SIan Campbell pte_t *pte;
922e64c8aa0SThomas Gleixner
923e64c8aa0SThomas Gleixner if (idx >= __end_of_fixed_addresses) {
924e64c8aa0SThomas Gleixner BUG();
925e64c8aa0SThomas Gleixner return;
926e64c8aa0SThomas Gleixner }
927e64c8aa0SThomas Gleixner pte = early_ioremap_pte(addr);
9284583ed51SJeremy Fitzhardinge
929fb43d6cbSDave Hansen /* Sanitize 'prot' against any unsupported bits: */
930510bb96fSThomas Gleixner pgprot_val(flags) &= __supported_pte_mask;
931fb43d6cbSDave Hansen
932e64c8aa0SThomas Gleixner if (pgprot_val(flags))
933551889a6SIan Campbell set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
934e64c8aa0SThomas Gleixner else
9354f9c11ddSJeremy Fitzhardinge pte_clear(&init_mm, addr, pte);
93658430c5dSThomas Gleixner flush_tlb_one_kernel(addr);
937e64c8aa0SThomas Gleixner }
938