xref: /linux/arch/x86/mm/ioremap.c (revision e9d1d2bb75b2d5d4b426769c5aae0ce8cef3558f)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e64c8aa0SThomas Gleixner /*
3e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
4e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
5e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
6e64c8aa0SThomas Gleixner  *
7e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
8e64c8aa0SThomas Gleixner  */
9e64c8aa0SThomas Gleixner 
1057c8a661SMike Rapoport #include <linux/memblock.h>
11e64c8aa0SThomas Gleixner #include <linux/init.h>
12e64c8aa0SThomas Gleixner #include <linux/io.h>
139de94dbbSIngo Molnar #include <linux/ioport.h>
14e64c8aa0SThomas Gleixner #include <linux/slab.h>
15e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
16d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
1732cb4d02STom Lendacky #include <linux/cc_platform.h>
188f716c9bSTom Lendacky #include <linux/efi.h>
1965fddcfcSMike Rapoport #include <linux/pgtable.h>
20e64c8aa0SThomas Gleixner 
21d1163651SLaura Abbott #include <asm/set_memory.h>
2266441bd3SIngo Molnar #include <asm/e820/api.h>
23e55f31a5SArd Biesheuvel #include <asm/efi.h>
24e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
25e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
26f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
27eb243d1dSIngo Molnar #include <asm/memtype.h>
288f716c9bSTom Lendacky #include <asm/setup.h>
29e64c8aa0SThomas Gleixner 
3078c86e5eSJeremy Fitzhardinge #include "physaddr.h"
31e64c8aa0SThomas Gleixner 
325da04cc8SLianbo Jiang /*
335da04cc8SLianbo Jiang  * Descriptor controlling ioremap() behavior.
345da04cc8SLianbo Jiang  */
355da04cc8SLianbo Jiang struct ioremap_desc {
365da04cc8SLianbo Jiang 	unsigned int flags;
370e4c12b4STom Lendacky };
380e4c12b4STom Lendacky 
39e64c8aa0SThomas Gleixner /*
40e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
41e64c8aa0SThomas Gleixner  * conflicts.
42e64c8aa0SThomas Gleixner  */
433a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
44b14097bdSJuergen Gross 			enum page_cache_mode pcm)
45e64c8aa0SThomas Gleixner {
46d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
4793809be8SHarvey Harrison 	int err;
48e64c8aa0SThomas Gleixner 
49b14097bdSJuergen Gross 	switch (pcm) {
50b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
51d806e5eeSThomas Gleixner 	default:
521219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
53d806e5eeSThomas Gleixner 		break;
54b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
55b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
56b310f381Svenkatesh.pallipadi@intel.com 		break;
57623dffb2SToshi Kani 	case _PAGE_CACHE_MODE_WT:
58623dffb2SToshi Kani 		err = _set_memory_wt(vaddr, nrpages);
59623dffb2SToshi Kani 		break;
60b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
611219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
62d806e5eeSThomas Gleixner 		break;
63d806e5eeSThomas Gleixner 	}
64e64c8aa0SThomas Gleixner 
65e64c8aa0SThomas Gleixner 	return err;
66e64c8aa0SThomas Gleixner }
67e64c8aa0SThomas Gleixner 
685da04cc8SLianbo Jiang /* Does the range (or a subset of) contain normal RAM? */
695da04cc8SLianbo Jiang static unsigned int __ioremap_check_ram(struct resource *res)
70c81c8a1eSRoland Dreier {
710e4c12b4STom Lendacky 	unsigned long start_pfn, stop_pfn;
72c81c8a1eSRoland Dreier 	unsigned long i;
73c81c8a1eSRoland Dreier 
740e4c12b4STom Lendacky 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
755da04cc8SLianbo Jiang 		return 0;
760e4c12b4STom Lendacky 
770e4c12b4STom Lendacky 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
780e4c12b4STom Lendacky 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
790e4c12b4STom Lendacky 	if (stop_pfn > start_pfn) {
800e4c12b4STom Lendacky 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
81c81c8a1eSRoland Dreier 			if (pfn_valid(start_pfn + i) &&
82c81c8a1eSRoland Dreier 			    !PageReserved(pfn_to_page(start_pfn + i)))
835da04cc8SLianbo Jiang 				return IORES_MAP_SYSTEM_RAM;
840e4c12b4STom Lendacky 	}
85c81c8a1eSRoland Dreier 
865da04cc8SLianbo Jiang 	return 0;
870e4c12b4STom Lendacky }
880e4c12b4STom Lendacky 
895da04cc8SLianbo Jiang /*
905da04cc8SLianbo Jiang  * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
915da04cc8SLianbo Jiang  * there the whole memory is already encrypted.
925da04cc8SLianbo Jiang  */
935da04cc8SLianbo Jiang static unsigned int __ioremap_check_encrypted(struct resource *res)
940e4c12b4STom Lendacky {
954d96f910STom Lendacky 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
965da04cc8SLianbo Jiang 		return 0;
975da04cc8SLianbo Jiang 
985da04cc8SLianbo Jiang 	switch (res->desc) {
995da04cc8SLianbo Jiang 	case IORES_DESC_NONE:
1005da04cc8SLianbo Jiang 	case IORES_DESC_RESERVED:
1015da04cc8SLianbo Jiang 		break;
1025da04cc8SLianbo Jiang 	default:
1035da04cc8SLianbo Jiang 		return IORES_MAP_ENCRYPTED;
1040e4c12b4STom Lendacky 	}
1050e4c12b4STom Lendacky 
1065da04cc8SLianbo Jiang 	return 0;
1075da04cc8SLianbo Jiang }
1085da04cc8SLianbo Jiang 
109985e537aSTom Lendacky /*
110985e537aSTom Lendacky  * The EFI runtime services data area is not covered by walk_mem_res(), but must
111985e537aSTom Lendacky  * be mapped encrypted when SEV is active.
112985e537aSTom Lendacky  */
113985e537aSTom Lendacky static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
114985e537aSTom Lendacky {
1154d96f910STom Lendacky 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
116985e537aSTom Lendacky 		return;
117985e537aSTom Lendacky 
118870b4333SBorislav Petkov 	if (!IS_ENABLED(CONFIG_EFI))
119870b4333SBorislav Petkov 		return;
120870b4333SBorislav Petkov 
1218d651ee9STom Lendacky 	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
1228d651ee9STom Lendacky 	    (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
1238d651ee9STom Lendacky 	     efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
124985e537aSTom Lendacky 		desc->flags |= IORES_MAP_ENCRYPTED;
125985e537aSTom Lendacky }
126985e537aSTom Lendacky 
1275da04cc8SLianbo Jiang static int __ioremap_collect_map_flags(struct resource *res, void *arg)
1280e4c12b4STom Lendacky {
1295da04cc8SLianbo Jiang 	struct ioremap_desc *desc = arg;
1300e4c12b4STom Lendacky 
1315da04cc8SLianbo Jiang 	if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
1325da04cc8SLianbo Jiang 		desc->flags |= __ioremap_check_ram(res);
1330e4c12b4STom Lendacky 
1345da04cc8SLianbo Jiang 	if (!(desc->flags & IORES_MAP_ENCRYPTED))
1355da04cc8SLianbo Jiang 		desc->flags |= __ioremap_check_encrypted(res);
1360e4c12b4STom Lendacky 
1375da04cc8SLianbo Jiang 	return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
1385da04cc8SLianbo Jiang 			       (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
1390e4c12b4STom Lendacky }
1400e4c12b4STom Lendacky 
1410e4c12b4STom Lendacky /*
1420e4c12b4STom Lendacky  * To avoid multiple resource walks, this function walks resources marked as
1430e4c12b4STom Lendacky  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
1440e4c12b4STom Lendacky  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
145985e537aSTom Lendacky  *
146985e537aSTom Lendacky  * After that, deal with misc other ranges in __ioremap_check_other() which do
147985e537aSTom Lendacky  * not fall into the above category.
1480e4c12b4STom Lendacky  */
1490e4c12b4STom Lendacky static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
1505da04cc8SLianbo Jiang 				struct ioremap_desc *desc)
1510e4c12b4STom Lendacky {
1520e4c12b4STom Lendacky 	u64 start, end;
1530e4c12b4STom Lendacky 
1540e4c12b4STom Lendacky 	start = (u64)addr;
1550e4c12b4STom Lendacky 	end = start + size - 1;
1565da04cc8SLianbo Jiang 	memset(desc, 0, sizeof(struct ioremap_desc));
1570e4c12b4STom Lendacky 
1585da04cc8SLianbo Jiang 	walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
159985e537aSTom Lendacky 
160985e537aSTom Lendacky 	__ioremap_check_other(addr, desc);
161c81c8a1eSRoland Dreier }
162c81c8a1eSRoland Dreier 
163e64c8aa0SThomas Gleixner /*
164e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
1655d72b4fbSToshi Kani  * address space. It transparently creates kernel huge I/O mapping when
1665d72b4fbSToshi Kani  * the physical address is aligned by a huge page size (1GB or 2MB) and
1675d72b4fbSToshi Kani  * the requested size is at least the huge page size.
1685d72b4fbSToshi Kani  *
1695d72b4fbSToshi Kani  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
1705d72b4fbSToshi Kani  * Therefore, the mapping code falls back to use a smaller page toward 4KB
1715d72b4fbSToshi Kani  * when a mapping range is covered by non-WB type of MTRRs.
172e64c8aa0SThomas Gleixner  *
173e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
174e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
175e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
176e64c8aa0SThomas Gleixner  */
1775da04cc8SLianbo Jiang static void __iomem *
1785da04cc8SLianbo Jiang __ioremap_caller(resource_size_t phys_addr, unsigned long size,
1795da04cc8SLianbo Jiang 		 enum page_cache_mode pcm, void *caller, bool encrypted)
180e64c8aa0SThomas Gleixner {
181ffa71f33SKenji Kaneshige 	unsigned long offset, vaddr;
1820e4c12b4STom Lendacky 	resource_size_t last_addr;
18387e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
18487e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
1855da04cc8SLianbo Jiang 	struct ioremap_desc io_desc;
186e64c8aa0SThomas Gleixner 	struct vm_struct *area;
187b14097bdSJuergen Gross 	enum page_cache_mode new_pcm;
188d806e5eeSThomas Gleixner 	pgprot_t prot;
189dee7cbb2SVenki Pallipadi 	int retval;
190d61fc448SPekka Paalanen 	void __iomem *ret_addr;
191e64c8aa0SThomas Gleixner 
192e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
193e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
194e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
195e64c8aa0SThomas Gleixner 		return NULL;
196e64c8aa0SThomas Gleixner 
197e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
1986997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
1994c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
200e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
201e3100c82SThomas Gleixner 		return NULL;
202e3100c82SThomas Gleixner 	}
203e3100c82SThomas Gleixner 
2045da04cc8SLianbo Jiang 	__ioremap_check_mem(phys_addr, size, &io_desc);
2050e4c12b4STom Lendacky 
206e64c8aa0SThomas Gleixner 	/*
207e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
208e64c8aa0SThomas Gleixner 	 */
2095da04cc8SLianbo Jiang 	if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
2108a0a5da6SThomas Gleixner 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
2118a0a5da6SThomas Gleixner 			  &phys_addr, &last_addr);
212e64c8aa0SThomas Gleixner 		return NULL;
213906e36c5SMike Travis 	}
2149a58eebeSToshi Kani 
215d7677d40Svenkatesh.pallipadi@intel.com 	/*
216d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
217d7677d40Svenkatesh.pallipadi@intel.com 	 */
218d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
219ffa71f33SKenji Kaneshige 	phys_addr &= PHYSICAL_PAGE_MASK;
220d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
221d7677d40Svenkatesh.pallipadi@intel.com 
222ecdd6ee7SIngo Molnar 	retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
223e00c8cc9SJuergen Gross 						pcm, &new_pcm);
224dee7cbb2SVenki Pallipadi 	if (retval) {
225ecdd6ee7SIngo Molnar 		printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
226dee7cbb2SVenki Pallipadi 		return NULL;
227dee7cbb2SVenki Pallipadi 	}
228dee7cbb2SVenki Pallipadi 
229b14097bdSJuergen Gross 	if (pcm != new_pcm) {
230b14097bdSJuergen Gross 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
231279e669bSVenkatesh Pallipadi 			printk(KERN_ERR
232b14097bdSJuergen Gross 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
2334c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
2344c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
235b14097bdSJuergen Gross 				pcm, new_pcm);
236de2a47cfSXiaotian Feng 			goto err_free_memtype;
237d7677d40Svenkatesh.pallipadi@intel.com 		}
238b14097bdSJuergen Gross 		pcm = new_pcm;
239d7677d40Svenkatesh.pallipadi@intel.com 	}
240d7677d40Svenkatesh.pallipadi@intel.com 
2410e4c12b4STom Lendacky 	/*
2420e4c12b4STom Lendacky 	 * If the page being mapped is in memory and SEV is active then
2430e4c12b4STom Lendacky 	 * make sure the memory encryption attribute is enabled in the
2440e4c12b4STom Lendacky 	 * resulting mapping.
2450e4c12b4STom Lendacky 	 */
246be43d728SJeremy Fitzhardinge 	prot = PAGE_KERNEL_IO;
2475da04cc8SLianbo Jiang 	if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
2480e4c12b4STom Lendacky 		prot = pgprot_encrypted(prot);
2490e4c12b4STom Lendacky 
250b14097bdSJuergen Gross 	switch (pcm) {
251b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
252b14097bdSJuergen Gross 	default:
253b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
254b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC));
255b14097bdSJuergen Gross 		break;
256b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC_MINUS:
257b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
258b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
259b14097bdSJuergen Gross 		break;
260b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
261b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
262b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_WC));
263b14097bdSJuergen Gross 		break;
264d838270eSToshi Kani 	case _PAGE_CACHE_MODE_WT:
265d838270eSToshi Kani 		prot = __pgprot(pgprot_val(prot) |
266d838270eSToshi Kani 				cachemode2protval(_PAGE_CACHE_MODE_WT));
267d838270eSToshi Kani 		break;
268b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
269d806e5eeSThomas Gleixner 		break;
270d806e5eeSThomas Gleixner 	}
271e64c8aa0SThomas Gleixner 
272e64c8aa0SThomas Gleixner 	/*
273e64c8aa0SThomas Gleixner 	 * Ok, go for it..
274e64c8aa0SThomas Gleixner 	 */
27523016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
276e64c8aa0SThomas Gleixner 	if (!area)
277de2a47cfSXiaotian Feng 		goto err_free_memtype;
278e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
279e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
28043a432b1SSuresh Siddha 
281ecdd6ee7SIngo Molnar 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
282de2a47cfSXiaotian Feng 		goto err_free_area;
283e64c8aa0SThomas Gleixner 
284de2a47cfSXiaotian Feng 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
285de2a47cfSXiaotian Feng 		goto err_free_area;
286e64c8aa0SThomas Gleixner 
287d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
28887e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
289d61fc448SPekka Paalanen 
290c7a7b814STim Gardner 	/*
291c7a7b814STim Gardner 	 * Check if the request spans more than any BAR in the iomem resource
292c7a7b814STim Gardner 	 * tree.
293c7a7b814STim Gardner 	 */
2949abb0ecdSLaura Abbott 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
2959abb0ecdSLaura Abbott 		pr_warn("caller %pS mapping multiple BARs\n", caller);
296c7a7b814STim Gardner 
297d61fc448SPekka Paalanen 	return ret_addr;
298de2a47cfSXiaotian Feng err_free_area:
299de2a47cfSXiaotian Feng 	free_vm_area(area);
300de2a47cfSXiaotian Feng err_free_memtype:
301ecdd6ee7SIngo Molnar 	memtype_free(phys_addr, phys_addr + size);
302de2a47cfSXiaotian Feng 	return NULL;
303e64c8aa0SThomas Gleixner }
304e64c8aa0SThomas Gleixner 
305e64c8aa0SThomas Gleixner /**
306c0d94aa5SChristoph Hellwig  * ioremap     -   map bus memory into CPU space
3079efc31b8SWanpeng Li  * @phys_addr:    bus address of the memory
308e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
309e64c8aa0SThomas Gleixner  *
310c0d94aa5SChristoph Hellwig  * ioremap performs a platform specific sequence of operations to
311e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
312e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
313e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
314e64c8aa0SThomas Gleixner  * address.
315e64c8aa0SThomas Gleixner  *
316e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
317e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
318e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
319e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
320e64c8aa0SThomas Gleixner  *
321e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
322e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
323e64c8aa0SThomas Gleixner  *
324e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
325e64c8aa0SThomas Gleixner  */
326c0d94aa5SChristoph Hellwig void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
327e64c8aa0SThomas Gleixner {
328de33c442SSuresh Siddha 	/*
329de33c442SSuresh Siddha 	 * Ideally, this should be:
330cb32edf6SLuis R. Rodriguez 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
331de33c442SSuresh Siddha 	 *
332de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
333e4b6be33SLuis R. Rodriguez 	 * UC MINUS. Drivers that are certain they need or can already
334e4b6be33SLuis R. Rodriguez 	 * be converted over to strong UC can use ioremap_uc().
335de33c442SSuresh Siddha 	 */
336b14097bdSJuergen Gross 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
337de33c442SSuresh Siddha 
338b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, pcm,
339c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
340e64c8aa0SThomas Gleixner }
341c0d94aa5SChristoph Hellwig EXPORT_SYMBOL(ioremap);
342e64c8aa0SThomas Gleixner 
343b310f381Svenkatesh.pallipadi@intel.com /**
344e4b6be33SLuis R. Rodriguez  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
345e4b6be33SLuis R. Rodriguez  * @phys_addr:    bus address of the memory
346e4b6be33SLuis R. Rodriguez  * @size:      size of the resource to map
347e4b6be33SLuis R. Rodriguez  *
348e4b6be33SLuis R. Rodriguez  * ioremap_uc performs a platform specific sequence of operations to
349e4b6be33SLuis R. Rodriguez  * make bus memory CPU accessible via the readb/readw/readl/writeb/
350e4b6be33SLuis R. Rodriguez  * writew/writel functions and the other mmio helpers. The returned
351e4b6be33SLuis R. Rodriguez  * address is not guaranteed to be usable directly as a virtual
352e4b6be33SLuis R. Rodriguez  * address.
353e4b6be33SLuis R. Rodriguez  *
354e4b6be33SLuis R. Rodriguez  * This version of ioremap ensures that the memory is marked with a strong
355e4b6be33SLuis R. Rodriguez  * preference as completely uncachable on the CPU when possible. For non-PAT
356e4b6be33SLuis R. Rodriguez  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
357e4b6be33SLuis R. Rodriguez  * systems this will set the PAT entry for the pages as strong UC.  This call
358e4b6be33SLuis R. Rodriguez  * will honor existing caching rules from things like the PCI bus. Note that
359e4b6be33SLuis R. Rodriguez  * there are other caches and buffers on many busses. In particular driver
360e4b6be33SLuis R. Rodriguez  * authors should read up on PCI writes.
361e4b6be33SLuis R. Rodriguez  *
362e4b6be33SLuis R. Rodriguez  * It's useful if some control registers are in such an area and
363e4b6be33SLuis R. Rodriguez  * write combining or read caching is not desirable:
364e4b6be33SLuis R. Rodriguez  *
365e4b6be33SLuis R. Rodriguez  * Must be freed with iounmap.
366e4b6be33SLuis R. Rodriguez  */
367e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
368e4b6be33SLuis R. Rodriguez {
369e4b6be33SLuis R. Rodriguez 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
370e4b6be33SLuis R. Rodriguez 
371e4b6be33SLuis R. Rodriguez 	return __ioremap_caller(phys_addr, size, pcm,
372c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
373e4b6be33SLuis R. Rodriguez }
374e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
375e4b6be33SLuis R. Rodriguez 
376e4b6be33SLuis R. Rodriguez /**
377b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
3789efc31b8SWanpeng Li  * @phys_addr:	bus address of the memory
379b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
380b310f381Svenkatesh.pallipadi@intel.com  *
381b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
382b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
383b310f381Svenkatesh.pallipadi@intel.com  *
384b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
385b310f381Svenkatesh.pallipadi@intel.com  */
386d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
387b310f381Svenkatesh.pallipadi@intel.com {
388b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
389c3a7a61cSLianbo Jiang 					__builtin_return_address(0), false);
390b310f381Svenkatesh.pallipadi@intel.com }
391b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
392b310f381Svenkatesh.pallipadi@intel.com 
393d838270eSToshi Kani /**
394d838270eSToshi Kani  * ioremap_wt	-	map memory into CPU space write through
395d838270eSToshi Kani  * @phys_addr:	bus address of the memory
396d838270eSToshi Kani  * @size:	size of the resource to map
397d838270eSToshi Kani  *
398d838270eSToshi Kani  * This version of ioremap ensures that the memory is marked write through.
399d838270eSToshi Kani  * Write through stores data into memory while keeping the cache up-to-date.
400d838270eSToshi Kani  *
401d838270eSToshi Kani  * Must be freed with iounmap.
402d838270eSToshi Kani  */
403d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
404d838270eSToshi Kani {
405d838270eSToshi Kani 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
406c3a7a61cSLianbo Jiang 					__builtin_return_address(0), false);
407d838270eSToshi Kani }
408d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt);
409d838270eSToshi Kani 
410c3a7a61cSLianbo Jiang void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
411c3a7a61cSLianbo Jiang {
412c3a7a61cSLianbo Jiang 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
413c3a7a61cSLianbo Jiang 				__builtin_return_address(0), true);
414c3a7a61cSLianbo Jiang }
415c3a7a61cSLianbo Jiang EXPORT_SYMBOL(ioremap_encrypted);
416c3a7a61cSLianbo Jiang 
417b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
4185f868152SThomas Gleixner {
419b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
420c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
4215f868152SThomas Gleixner }
4225f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
4235f868152SThomas Gleixner 
42428b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
42528b2ee20SRik van Riel 				unsigned long prot_val)
42628b2ee20SRik van Riel {
427b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size,
428b14097bdSJuergen Gross 				pgprot2cachemode(__pgprot(prot_val)),
429c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
43028b2ee20SRik van Riel }
43128b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
43228b2ee20SRik van Riel 
433e64c8aa0SThomas Gleixner /**
434e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
435e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
436e64c8aa0SThomas Gleixner  *
437e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
438e64c8aa0SThomas Gleixner  */
439e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
440e64c8aa0SThomas Gleixner {
441e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
442e64c8aa0SThomas Gleixner 
443e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
444e64c8aa0SThomas Gleixner 		return;
445e64c8aa0SThomas Gleixner 
446e64c8aa0SThomas Gleixner 	/*
44733c2b803STom Lendacky 	 * The PCI/ISA range special-casing was removed from __ioremap()
44833c2b803STom Lendacky 	 * so this check, in theory, can be removed. However, there are
44933c2b803STom Lendacky 	 * cases where iounmap() is called for addresses not obtained via
45033c2b803STom Lendacky 	 * ioremap() (vga16fb for example). Add a warning so that these
45133c2b803STom Lendacky 	 * cases can be caught and fixed.
452e64c8aa0SThomas Gleixner 	 */
4536e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
45433c2b803STom Lendacky 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
45533c2b803STom Lendacky 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
456e64c8aa0SThomas Gleixner 		return;
45733c2b803STom Lendacky 	}
458e64c8aa0SThomas Gleixner 
4596d60ce38SKarol Herbst 	mmiotrace_iounmap(addr);
4606d60ce38SKarol Herbst 
461e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
462e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
463e64c8aa0SThomas Gleixner 
464e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
465e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
466e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
467e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
468e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
469ef932473SJoonsoo Kim 	p = find_vm_area((void __force *)addr);
470e64c8aa0SThomas Gleixner 
471e64c8aa0SThomas Gleixner 	if (!p) {
472e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
473e64c8aa0SThomas Gleixner 		dump_stack();
474e64c8aa0SThomas Gleixner 		return;
475e64c8aa0SThomas Gleixner 	}
476e64c8aa0SThomas Gleixner 
477ecdd6ee7SIngo Molnar 	memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
478d7677d40Svenkatesh.pallipadi@intel.com 
479e64c8aa0SThomas Gleixner 	/* Finally remove it */
4806e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
481e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
482e64c8aa0SThomas Gleixner 	kfree(p);
483e64c8aa0SThomas Gleixner }
484e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
485e64c8aa0SThomas Gleixner 
486e045fb2aSvenkatesh.pallipadi@intel.com /*
487e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
488e045fb2aSvenkatesh.pallipadi@intel.com  * access
489e045fb2aSvenkatesh.pallipadi@intel.com  */
4904707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
491e045fb2aSvenkatesh.pallipadi@intel.com {
492e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start  = phys &  PAGE_MASK;
49394d4b476SIngo Molnar 	unsigned long offset = phys & ~PAGE_MASK;
494562bfca4SIngo Molnar 	void *vaddr;
495e045fb2aSvenkatesh.pallipadi@intel.com 
4968458bf94STom Lendacky 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
4978458bf94STom Lendacky 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
498e045fb2aSvenkatesh.pallipadi@intel.com 
4998458bf94STom Lendacky 	/* Only add the offset on success and return NULL if memremap() failed */
50094d4b476SIngo Molnar 	if (vaddr)
50194d4b476SIngo Molnar 		vaddr += offset;
502e045fb2aSvenkatesh.pallipadi@intel.com 
503562bfca4SIngo Molnar 	return vaddr;
504e045fb2aSvenkatesh.pallipadi@intel.com }
505e045fb2aSvenkatesh.pallipadi@intel.com 
5064707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
507e045fb2aSvenkatesh.pallipadi@intel.com {
5088458bf94STom Lendacky 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
509e045fb2aSvenkatesh.pallipadi@intel.com }
510e045fb2aSvenkatesh.pallipadi@intel.com 
511402fe0cbSTom Lendacky #ifdef CONFIG_AMD_MEM_ENCRYPT
5128f716c9bSTom Lendacky /*
5138f716c9bSTom Lendacky  * Examine the physical address to determine if it is an area of memory
5148f716c9bSTom Lendacky  * that should be mapped decrypted.  If the memory is not part of the
5158f716c9bSTom Lendacky  * kernel usable area it was accessed and created decrypted, so these
5161de32862STom Lendacky  * areas should be mapped decrypted. And since the encryption key can
5171de32862STom Lendacky  * change across reboots, persistent memory should also be mapped
5181de32862STom Lendacky  * decrypted.
519072f58c6STom Lendacky  *
520072f58c6STom Lendacky  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
521072f58c6STom Lendacky  * only persistent memory should be mapped decrypted.
5228f716c9bSTom Lendacky  */
5238f716c9bSTom Lendacky static bool memremap_should_map_decrypted(resource_size_t phys_addr,
5248f716c9bSTom Lendacky 					  unsigned long size)
5258f716c9bSTom Lendacky {
5261de32862STom Lendacky 	int is_pmem;
5271de32862STom Lendacky 
5281de32862STom Lendacky 	/*
5291de32862STom Lendacky 	 * Check if the address is part of a persistent memory region.
5301de32862STom Lendacky 	 * This check covers areas added by E820, EFI and ACPI.
5311de32862STom Lendacky 	 */
5321de32862STom Lendacky 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
5331de32862STom Lendacky 				    IORES_DESC_PERSISTENT_MEMORY);
5341de32862STom Lendacky 	if (is_pmem != REGION_DISJOINT)
5351de32862STom Lendacky 		return true;
5361de32862STom Lendacky 
5371de32862STom Lendacky 	/*
5381de32862STom Lendacky 	 * Check if the non-volatile attribute is set for an EFI
5391de32862STom Lendacky 	 * reserved area.
5401de32862STom Lendacky 	 */
5411de32862STom Lendacky 	if (efi_enabled(EFI_BOOT)) {
5421de32862STom Lendacky 		switch (efi_mem_type(phys_addr)) {
5431de32862STom Lendacky 		case EFI_RESERVED_TYPE:
5441de32862STom Lendacky 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
5451de32862STom Lendacky 				return true;
5461de32862STom Lendacky 			break;
5471de32862STom Lendacky 		default:
5481de32862STom Lendacky 			break;
5491de32862STom Lendacky 		}
5501de32862STom Lendacky 	}
5511de32862STom Lendacky 
5528f716c9bSTom Lendacky 	/* Check if the address is outside kernel usable area */
5538f716c9bSTom Lendacky 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
5548f716c9bSTom Lendacky 	case E820_TYPE_RESERVED:
5558f716c9bSTom Lendacky 	case E820_TYPE_ACPI:
5568f716c9bSTom Lendacky 	case E820_TYPE_NVS:
5578f716c9bSTom Lendacky 	case E820_TYPE_UNUSABLE:
558072f58c6STom Lendacky 		/* For SEV, these areas are encrypted */
5594d96f910STom Lendacky 		if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
560072f58c6STom Lendacky 			break;
561df561f66SGustavo A. R. Silva 		fallthrough;
562072f58c6STom Lendacky 
5631de32862STom Lendacky 	case E820_TYPE_PRAM:
5648f716c9bSTom Lendacky 		return true;
5658f716c9bSTom Lendacky 	default:
5668f716c9bSTom Lendacky 		break;
5678f716c9bSTom Lendacky 	}
5688f716c9bSTom Lendacky 
5698f716c9bSTom Lendacky 	return false;
5708f716c9bSTom Lendacky }
5718f716c9bSTom Lendacky 
5728f716c9bSTom Lendacky /*
5738f716c9bSTom Lendacky  * Examine the physical address to determine if it is EFI data. Check
5748f716c9bSTom Lendacky  * it against the boot params structure and EFI tables and memory types.
5758f716c9bSTom Lendacky  */
5768f716c9bSTom Lendacky static bool memremap_is_efi_data(resource_size_t phys_addr,
5778f716c9bSTom Lendacky 				 unsigned long size)
5788f716c9bSTom Lendacky {
5798f716c9bSTom Lendacky 	u64 paddr;
5808f716c9bSTom Lendacky 
5818f716c9bSTom Lendacky 	/* Check if the address is part of EFI boot/runtime data */
5828f716c9bSTom Lendacky 	if (!efi_enabled(EFI_BOOT))
5838f716c9bSTom Lendacky 		return false;
5848f716c9bSTom Lendacky 
5858f716c9bSTom Lendacky 	paddr = boot_params.efi_info.efi_memmap_hi;
5868f716c9bSTom Lendacky 	paddr <<= 32;
5878f716c9bSTom Lendacky 	paddr |= boot_params.efi_info.efi_memmap;
5888f716c9bSTom Lendacky 	if (phys_addr == paddr)
5898f716c9bSTom Lendacky 		return true;
5908f716c9bSTom Lendacky 
5918f716c9bSTom Lendacky 	paddr = boot_params.efi_info.efi_systab_hi;
5928f716c9bSTom Lendacky 	paddr <<= 32;
5938f716c9bSTom Lendacky 	paddr |= boot_params.efi_info.efi_systab;
5948f716c9bSTom Lendacky 	if (phys_addr == paddr)
5958f716c9bSTom Lendacky 		return true;
5968f716c9bSTom Lendacky 
5978f716c9bSTom Lendacky 	if (efi_is_table_address(phys_addr))
5988f716c9bSTom Lendacky 		return true;
5998f716c9bSTom Lendacky 
6008f716c9bSTom Lendacky 	switch (efi_mem_type(phys_addr)) {
6018f716c9bSTom Lendacky 	case EFI_BOOT_SERVICES_DATA:
6028f716c9bSTom Lendacky 	case EFI_RUNTIME_SERVICES_DATA:
6038f716c9bSTom Lendacky 		return true;
6048f716c9bSTom Lendacky 	default:
6058f716c9bSTom Lendacky 		break;
6068f716c9bSTom Lendacky 	}
6078f716c9bSTom Lendacky 
6088f716c9bSTom Lendacky 	return false;
6098f716c9bSTom Lendacky }
6108f716c9bSTom Lendacky 
6118f716c9bSTom Lendacky /*
6128f716c9bSTom Lendacky  * Examine the physical address to determine if it is boot data by checking
6138f716c9bSTom Lendacky  * it against the boot params setup_data chain.
6148f716c9bSTom Lendacky  */
6158f716c9bSTom Lendacky static bool memremap_is_setup_data(resource_size_t phys_addr,
6168f716c9bSTom Lendacky 				   unsigned long size)
6178f716c9bSTom Lendacky {
6188f716c9bSTom Lendacky 	struct setup_data *data;
6198f716c9bSTom Lendacky 	u64 paddr, paddr_next;
6208f716c9bSTom Lendacky 
6218f716c9bSTom Lendacky 	paddr = boot_params.hdr.setup_data;
6228f716c9bSTom Lendacky 	while (paddr) {
6238f716c9bSTom Lendacky 		unsigned int len;
6248f716c9bSTom Lendacky 
6258f716c9bSTom Lendacky 		if (phys_addr == paddr)
6268f716c9bSTom Lendacky 			return true;
6278f716c9bSTom Lendacky 
6288f716c9bSTom Lendacky 		data = memremap(paddr, sizeof(*data),
6298f716c9bSTom Lendacky 				MEMREMAP_WB | MEMREMAP_DEC);
6308f716c9bSTom Lendacky 
6318f716c9bSTom Lendacky 		paddr_next = data->next;
6328f716c9bSTom Lendacky 		len = data->len;
6338f716c9bSTom Lendacky 
634b3c72fc9SDaniel Kiper 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
635b3c72fc9SDaniel Kiper 			memunmap(data);
636b3c72fc9SDaniel Kiper 			return true;
637b3c72fc9SDaniel Kiper 		}
638b3c72fc9SDaniel Kiper 
639b3c72fc9SDaniel Kiper 		if (data->type == SETUP_INDIRECT &&
640b3c72fc9SDaniel Kiper 		    ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
641b3c72fc9SDaniel Kiper 			paddr = ((struct setup_indirect *)data->data)->addr;
642b3c72fc9SDaniel Kiper 			len = ((struct setup_indirect *)data->data)->len;
643b3c72fc9SDaniel Kiper 		}
644b3c72fc9SDaniel Kiper 
6458f716c9bSTom Lendacky 		memunmap(data);
6468f716c9bSTom Lendacky 
6478f716c9bSTom Lendacky 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
6488f716c9bSTom Lendacky 			return true;
6498f716c9bSTom Lendacky 
6508f716c9bSTom Lendacky 		paddr = paddr_next;
6518f716c9bSTom Lendacky 	}
6528f716c9bSTom Lendacky 
6538f716c9bSTom Lendacky 	return false;
6548f716c9bSTom Lendacky }
6558f716c9bSTom Lendacky 
6568f716c9bSTom Lendacky /*
6578f716c9bSTom Lendacky  * Examine the physical address to determine if it is boot data by checking
6588f716c9bSTom Lendacky  * it against the boot params setup_data chain (early boot version).
6598f716c9bSTom Lendacky  */
6608f716c9bSTom Lendacky static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
6618f716c9bSTom Lendacky 						unsigned long size)
6628f716c9bSTom Lendacky {
6638f716c9bSTom Lendacky 	struct setup_data *data;
6648f716c9bSTom Lendacky 	u64 paddr, paddr_next;
6658f716c9bSTom Lendacky 
6668f716c9bSTom Lendacky 	paddr = boot_params.hdr.setup_data;
6678f716c9bSTom Lendacky 	while (paddr) {
6688f716c9bSTom Lendacky 		unsigned int len;
6698f716c9bSTom Lendacky 
6708f716c9bSTom Lendacky 		if (phys_addr == paddr)
6718f716c9bSTom Lendacky 			return true;
6728f716c9bSTom Lendacky 
6738f716c9bSTom Lendacky 		data = early_memremap_decrypted(paddr, sizeof(*data));
6748f716c9bSTom Lendacky 
6758f716c9bSTom Lendacky 		paddr_next = data->next;
6768f716c9bSTom Lendacky 		len = data->len;
6778f716c9bSTom Lendacky 
6788f716c9bSTom Lendacky 		early_memunmap(data, sizeof(*data));
6798f716c9bSTom Lendacky 
6808f716c9bSTom Lendacky 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
6818f716c9bSTom Lendacky 			return true;
6828f716c9bSTom Lendacky 
6838f716c9bSTom Lendacky 		paddr = paddr_next;
6848f716c9bSTom Lendacky 	}
6858f716c9bSTom Lendacky 
6868f716c9bSTom Lendacky 	return false;
6878f716c9bSTom Lendacky }
6888f716c9bSTom Lendacky 
6898f716c9bSTom Lendacky /*
6908f716c9bSTom Lendacky  * Architecture function to determine if RAM remap is allowed. By default, a
6918f716c9bSTom Lendacky  * RAM remap will map the data as encrypted. Determine if a RAM remap should
6928f716c9bSTom Lendacky  * not be done so that the data will be mapped decrypted.
6938f716c9bSTom Lendacky  */
6948f716c9bSTom Lendacky bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
6958f716c9bSTom Lendacky 				 unsigned long flags)
6968f716c9bSTom Lendacky {
697*e9d1d2bbSTom Lendacky 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
6988f716c9bSTom Lendacky 		return true;
6998f716c9bSTom Lendacky 
7008f716c9bSTom Lendacky 	if (flags & MEMREMAP_ENC)
7018f716c9bSTom Lendacky 		return true;
7028f716c9bSTom Lendacky 
7038f716c9bSTom Lendacky 	if (flags & MEMREMAP_DEC)
7048f716c9bSTom Lendacky 		return false;
7058f716c9bSTom Lendacky 
70632cb4d02STom Lendacky 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
7078f716c9bSTom Lendacky 		if (memremap_is_setup_data(phys_addr, size) ||
708072f58c6STom Lendacky 		    memremap_is_efi_data(phys_addr, size))
7098f716c9bSTom Lendacky 			return false;
710072f58c6STom Lendacky 	}
7118f716c9bSTom Lendacky 
712072f58c6STom Lendacky 	return !memremap_should_map_decrypted(phys_addr, size);
7138f716c9bSTom Lendacky }
7148f716c9bSTom Lendacky 
7158f716c9bSTom Lendacky /*
7168f716c9bSTom Lendacky  * Architecture override of __weak function to adjust the protection attributes
7178f716c9bSTom Lendacky  * used when remapping memory. By default, early_memremap() will map the data
7188f716c9bSTom Lendacky  * as encrypted. Determine if an encrypted mapping should not be done and set
7198f716c9bSTom Lendacky  * the appropriate protection attributes.
7208f716c9bSTom Lendacky  */
7218f716c9bSTom Lendacky pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
7228f716c9bSTom Lendacky 					     unsigned long size,
7238f716c9bSTom Lendacky 					     pgprot_t prot)
7248f716c9bSTom Lendacky {
725072f58c6STom Lendacky 	bool encrypted_prot;
726072f58c6STom Lendacky 
727*e9d1d2bbSTom Lendacky 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
7288f716c9bSTom Lendacky 		return prot;
7298f716c9bSTom Lendacky 
730072f58c6STom Lendacky 	encrypted_prot = true;
731072f58c6STom Lendacky 
73232cb4d02STom Lendacky 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
7338f716c9bSTom Lendacky 		if (early_memremap_is_setup_data(phys_addr, size) ||
734072f58c6STom Lendacky 		    memremap_is_efi_data(phys_addr, size))
735072f58c6STom Lendacky 			encrypted_prot = false;
736072f58c6STom Lendacky 	}
7378f716c9bSTom Lendacky 
738072f58c6STom Lendacky 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
739072f58c6STom Lendacky 		encrypted_prot = false;
740072f58c6STom Lendacky 
741072f58c6STom Lendacky 	return encrypted_prot ? pgprot_encrypted(prot)
742072f58c6STom Lendacky 			      : pgprot_decrypted(prot);
7438f716c9bSTom Lendacky }
7448f716c9bSTom Lendacky 
7458458bf94STom Lendacky bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
7468458bf94STom Lendacky {
7478458bf94STom Lendacky 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
7488458bf94STom Lendacky }
7498458bf94STom Lendacky 
750f88a68faSTom Lendacky /* Remap memory with encryption */
751f88a68faSTom Lendacky void __init *early_memremap_encrypted(resource_size_t phys_addr,
752f88a68faSTom Lendacky 				      unsigned long size)
753f88a68faSTom Lendacky {
754f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
755f88a68faSTom Lendacky }
756f88a68faSTom Lendacky 
757f88a68faSTom Lendacky /*
758f88a68faSTom Lendacky  * Remap memory with encryption and write-protected - cannot be called
759f88a68faSTom Lendacky  * before pat_init() is called
760f88a68faSTom Lendacky  */
761f88a68faSTom Lendacky void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
762f88a68faSTom Lendacky 					 unsigned long size)
763f88a68faSTom Lendacky {
7641f6f655eSChristoph Hellwig 	if (!x86_has_pat_wp())
765f88a68faSTom Lendacky 		return NULL;
766f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
767f88a68faSTom Lendacky }
768f88a68faSTom Lendacky 
769f88a68faSTom Lendacky /* Remap memory without encryption */
770f88a68faSTom Lendacky void __init *early_memremap_decrypted(resource_size_t phys_addr,
771f88a68faSTom Lendacky 				      unsigned long size)
772f88a68faSTom Lendacky {
773f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
774f88a68faSTom Lendacky }
775f88a68faSTom Lendacky 
776f88a68faSTom Lendacky /*
777f88a68faSTom Lendacky  * Remap memory without encryption and write-protected - cannot be called
778f88a68faSTom Lendacky  * before pat_init() is called
779f88a68faSTom Lendacky  */
780f88a68faSTom Lendacky void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
781f88a68faSTom Lendacky 					 unsigned long size)
782f88a68faSTom Lendacky {
7831f6f655eSChristoph Hellwig 	if (!x86_has_pat_wp())
784f88a68faSTom Lendacky 		return NULL;
785f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
786f88a68faSTom Lendacky }
787ce9084baSArd Biesheuvel #endif	/* CONFIG_AMD_MEM_ENCRYPT */
788f88a68faSTom Lendacky 
78945c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
790e64c8aa0SThomas Gleixner 
791551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
792e64c8aa0SThomas Gleixner {
79337cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
7946c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
79537cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
796e0c4f675SKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, addr);
797e0c4f675SKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, addr);
798551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
799551889a6SIan Campbell 
800551889a6SIan Campbell 	return pmd;
801e64c8aa0SThomas Gleixner }
802e64c8aa0SThomas Gleixner 
803551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
804e64c8aa0SThomas Gleixner {
805551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
806e64c8aa0SThomas Gleixner }
807e64c8aa0SThomas Gleixner 
808fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
809fef5ba79SJeremy Fitzhardinge {
810fef5ba79SJeremy Fitzhardinge 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
811fef5ba79SJeremy Fitzhardinge }
812fef5ba79SJeremy Fitzhardinge 
813e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
814e64c8aa0SThomas Gleixner {
815551889a6SIan Campbell 	pmd_t *pmd;
816e64c8aa0SThomas Gleixner 
81773159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
81873159fdcSAndy Lutomirski 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
81973159fdcSAndy Lutomirski #else
82073159fdcSAndy Lutomirski 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
82173159fdcSAndy Lutomirski #endif
82273159fdcSAndy Lutomirski 
8235b7c73e0SMark Salter 	early_ioremap_setup();
8248827247fSWang Chen 
825551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
826e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
827b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
828551889a6SIan Campbell 
829e64c8aa0SThomas Gleixner 	/*
830551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
831e64c8aa0SThomas Gleixner 	 * we are not prepared:
832e64c8aa0SThomas Gleixner 	 */
833499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
834499a5f1eSJan Beulich 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
835499a5f1eSJan Beulich 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
836499a5f1eSJan Beulich #undef __FIXADDR_TOP
837551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
838e64c8aa0SThomas Gleixner 		WARN_ON(1);
839551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
840551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
841e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
842e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
843e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
844e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
845e64c8aa0SThomas Gleixner 
846e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
847e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
848e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
849e64c8aa0SThomas Gleixner 	}
850e64c8aa0SThomas Gleixner }
851e64c8aa0SThomas Gleixner 
8525b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
8539b987aebSMasami Hiramatsu 			       phys_addr_t phys, pgprot_t flags)
854e64c8aa0SThomas Gleixner {
855551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
856551889a6SIan Campbell 	pte_t *pte;
857e64c8aa0SThomas Gleixner 
858e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
859e64c8aa0SThomas Gleixner 		BUG();
860e64c8aa0SThomas Gleixner 		return;
861e64c8aa0SThomas Gleixner 	}
862e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
8634583ed51SJeremy Fitzhardinge 
864fb43d6cbSDave Hansen 	/* Sanitize 'prot' against any unsupported bits: */
865510bb96fSThomas Gleixner 	pgprot_val(flags) &= __supported_pte_mask;
866fb43d6cbSDave Hansen 
867e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
868551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
869e64c8aa0SThomas Gleixner 	else
8704f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
87158430c5dSThomas Gleixner 	flush_tlb_one_kernel(addr);
872e64c8aa0SThomas Gleixner }
873