xref: /linux/arch/x86/mm/ioremap.c (revision 985e537a4082b4635754a57f4f95430790afee6a)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e64c8aa0SThomas Gleixner /*
3e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
4e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
5e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
6e64c8aa0SThomas Gleixner  *
7e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
8e64c8aa0SThomas Gleixner  */
9e64c8aa0SThomas Gleixner 
1057c8a661SMike Rapoport #include <linux/memblock.h>
11e64c8aa0SThomas Gleixner #include <linux/init.h>
12e64c8aa0SThomas Gleixner #include <linux/io.h>
139de94dbbSIngo Molnar #include <linux/ioport.h>
14e64c8aa0SThomas Gleixner #include <linux/slab.h>
15e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
16d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
178f716c9bSTom Lendacky #include <linux/mem_encrypt.h>
188f716c9bSTom Lendacky #include <linux/efi.h>
19e64c8aa0SThomas Gleixner 
20d1163651SLaura Abbott #include <asm/set_memory.h>
2166441bd3SIngo Molnar #include <asm/e820/api.h>
22e55f31a5SArd Biesheuvel #include <asm/efi.h>
23e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
24e64c8aa0SThomas Gleixner #include <asm/pgtable.h>
25e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
26f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
27eb243d1dSIngo Molnar #include <asm/memtype.h>
288f716c9bSTom Lendacky #include <asm/setup.h>
29e64c8aa0SThomas Gleixner 
3078c86e5eSJeremy Fitzhardinge #include "physaddr.h"
31e64c8aa0SThomas Gleixner 
325da04cc8SLianbo Jiang /*
335da04cc8SLianbo Jiang  * Descriptor controlling ioremap() behavior.
345da04cc8SLianbo Jiang  */
355da04cc8SLianbo Jiang struct ioremap_desc {
365da04cc8SLianbo Jiang 	unsigned int flags;
370e4c12b4STom Lendacky };
380e4c12b4STom Lendacky 
39e64c8aa0SThomas Gleixner /*
40e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
41e64c8aa0SThomas Gleixner  * conflicts.
42e64c8aa0SThomas Gleixner  */
433a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
44b14097bdSJuergen Gross 			enum page_cache_mode pcm)
45e64c8aa0SThomas Gleixner {
46d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
4793809be8SHarvey Harrison 	int err;
48e64c8aa0SThomas Gleixner 
49b14097bdSJuergen Gross 	switch (pcm) {
50b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
51d806e5eeSThomas Gleixner 	default:
521219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
53d806e5eeSThomas Gleixner 		break;
54b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
55b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
56b310f381Svenkatesh.pallipadi@intel.com 		break;
57623dffb2SToshi Kani 	case _PAGE_CACHE_MODE_WT:
58623dffb2SToshi Kani 		err = _set_memory_wt(vaddr, nrpages);
59623dffb2SToshi Kani 		break;
60b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
611219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
62d806e5eeSThomas Gleixner 		break;
63d806e5eeSThomas Gleixner 	}
64e64c8aa0SThomas Gleixner 
65e64c8aa0SThomas Gleixner 	return err;
66e64c8aa0SThomas Gleixner }
67e64c8aa0SThomas Gleixner 
685da04cc8SLianbo Jiang /* Does the range (or a subset of) contain normal RAM? */
695da04cc8SLianbo Jiang static unsigned int __ioremap_check_ram(struct resource *res)
70c81c8a1eSRoland Dreier {
710e4c12b4STom Lendacky 	unsigned long start_pfn, stop_pfn;
72c81c8a1eSRoland Dreier 	unsigned long i;
73c81c8a1eSRoland Dreier 
740e4c12b4STom Lendacky 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
755da04cc8SLianbo Jiang 		return 0;
760e4c12b4STom Lendacky 
770e4c12b4STom Lendacky 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
780e4c12b4STom Lendacky 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
790e4c12b4STom Lendacky 	if (stop_pfn > start_pfn) {
800e4c12b4STom Lendacky 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
81c81c8a1eSRoland Dreier 			if (pfn_valid(start_pfn + i) &&
82c81c8a1eSRoland Dreier 			    !PageReserved(pfn_to_page(start_pfn + i)))
835da04cc8SLianbo Jiang 				return IORES_MAP_SYSTEM_RAM;
840e4c12b4STom Lendacky 	}
85c81c8a1eSRoland Dreier 
865da04cc8SLianbo Jiang 	return 0;
870e4c12b4STom Lendacky }
880e4c12b4STom Lendacky 
895da04cc8SLianbo Jiang /*
905da04cc8SLianbo Jiang  * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
915da04cc8SLianbo Jiang  * there the whole memory is already encrypted.
925da04cc8SLianbo Jiang  */
935da04cc8SLianbo Jiang static unsigned int __ioremap_check_encrypted(struct resource *res)
940e4c12b4STom Lendacky {
955da04cc8SLianbo Jiang 	if (!sev_active())
965da04cc8SLianbo Jiang 		return 0;
975da04cc8SLianbo Jiang 
985da04cc8SLianbo Jiang 	switch (res->desc) {
995da04cc8SLianbo Jiang 	case IORES_DESC_NONE:
1005da04cc8SLianbo Jiang 	case IORES_DESC_RESERVED:
1015da04cc8SLianbo Jiang 		break;
1025da04cc8SLianbo Jiang 	default:
1035da04cc8SLianbo Jiang 		return IORES_MAP_ENCRYPTED;
1040e4c12b4STom Lendacky 	}
1050e4c12b4STom Lendacky 
1065da04cc8SLianbo Jiang 	return 0;
1075da04cc8SLianbo Jiang }
1085da04cc8SLianbo Jiang 
109*985e537aSTom Lendacky /*
110*985e537aSTom Lendacky  * The EFI runtime services data area is not covered by walk_mem_res(), but must
111*985e537aSTom Lendacky  * be mapped encrypted when SEV is active.
112*985e537aSTom Lendacky  */
113*985e537aSTom Lendacky static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
114*985e537aSTom Lendacky {
115*985e537aSTom Lendacky 	if (!sev_active())
116*985e537aSTom Lendacky 		return;
117*985e537aSTom Lendacky 
118*985e537aSTom Lendacky 	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
119*985e537aSTom Lendacky 		desc->flags |= IORES_MAP_ENCRYPTED;
120*985e537aSTom Lendacky }
121*985e537aSTom Lendacky 
1225da04cc8SLianbo Jiang static int __ioremap_collect_map_flags(struct resource *res, void *arg)
1230e4c12b4STom Lendacky {
1245da04cc8SLianbo Jiang 	struct ioremap_desc *desc = arg;
1250e4c12b4STom Lendacky 
1265da04cc8SLianbo Jiang 	if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
1275da04cc8SLianbo Jiang 		desc->flags |= __ioremap_check_ram(res);
1280e4c12b4STom Lendacky 
1295da04cc8SLianbo Jiang 	if (!(desc->flags & IORES_MAP_ENCRYPTED))
1305da04cc8SLianbo Jiang 		desc->flags |= __ioremap_check_encrypted(res);
1310e4c12b4STom Lendacky 
1325da04cc8SLianbo Jiang 	return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
1335da04cc8SLianbo Jiang 			       (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
1340e4c12b4STom Lendacky }
1350e4c12b4STom Lendacky 
1360e4c12b4STom Lendacky /*
1370e4c12b4STom Lendacky  * To avoid multiple resource walks, this function walks resources marked as
1380e4c12b4STom Lendacky  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
1390e4c12b4STom Lendacky  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
140*985e537aSTom Lendacky  *
141*985e537aSTom Lendacky  * After that, deal with misc other ranges in __ioremap_check_other() which do
142*985e537aSTom Lendacky  * not fall into the above category.
1430e4c12b4STom Lendacky  */
1440e4c12b4STom Lendacky static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
1455da04cc8SLianbo Jiang 				struct ioremap_desc *desc)
1460e4c12b4STom Lendacky {
1470e4c12b4STom Lendacky 	u64 start, end;
1480e4c12b4STom Lendacky 
1490e4c12b4STom Lendacky 	start = (u64)addr;
1500e4c12b4STom Lendacky 	end = start + size - 1;
1515da04cc8SLianbo Jiang 	memset(desc, 0, sizeof(struct ioremap_desc));
1520e4c12b4STom Lendacky 
1535da04cc8SLianbo Jiang 	walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
154*985e537aSTom Lendacky 
155*985e537aSTom Lendacky 	__ioremap_check_other(addr, desc);
156c81c8a1eSRoland Dreier }
157c81c8a1eSRoland Dreier 
158e64c8aa0SThomas Gleixner /*
159e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
1605d72b4fbSToshi Kani  * address space. It transparently creates kernel huge I/O mapping when
1615d72b4fbSToshi Kani  * the physical address is aligned by a huge page size (1GB or 2MB) and
1625d72b4fbSToshi Kani  * the requested size is at least the huge page size.
1635d72b4fbSToshi Kani  *
1645d72b4fbSToshi Kani  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
1655d72b4fbSToshi Kani  * Therefore, the mapping code falls back to use a smaller page toward 4KB
1665d72b4fbSToshi Kani  * when a mapping range is covered by non-WB type of MTRRs.
167e64c8aa0SThomas Gleixner  *
168e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
169e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
170e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
171e64c8aa0SThomas Gleixner  */
1725da04cc8SLianbo Jiang static void __iomem *
1735da04cc8SLianbo Jiang __ioremap_caller(resource_size_t phys_addr, unsigned long size,
1745da04cc8SLianbo Jiang 		 enum page_cache_mode pcm, void *caller, bool encrypted)
175e64c8aa0SThomas Gleixner {
176ffa71f33SKenji Kaneshige 	unsigned long offset, vaddr;
1770e4c12b4STom Lendacky 	resource_size_t last_addr;
17887e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
17987e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
1805da04cc8SLianbo Jiang 	struct ioremap_desc io_desc;
181e64c8aa0SThomas Gleixner 	struct vm_struct *area;
182b14097bdSJuergen Gross 	enum page_cache_mode new_pcm;
183d806e5eeSThomas Gleixner 	pgprot_t prot;
184dee7cbb2SVenki Pallipadi 	int retval;
185d61fc448SPekka Paalanen 	void __iomem *ret_addr;
186e64c8aa0SThomas Gleixner 
187e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
188e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
189e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
190e64c8aa0SThomas Gleixner 		return NULL;
191e64c8aa0SThomas Gleixner 
192e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
1936997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
1944c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
195e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
196e3100c82SThomas Gleixner 		return NULL;
197e3100c82SThomas Gleixner 	}
198e3100c82SThomas Gleixner 
1995da04cc8SLianbo Jiang 	__ioremap_check_mem(phys_addr, size, &io_desc);
2000e4c12b4STom Lendacky 
201e64c8aa0SThomas Gleixner 	/*
202e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
203e64c8aa0SThomas Gleixner 	 */
2045da04cc8SLianbo Jiang 	if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
2058a0a5da6SThomas Gleixner 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
2068a0a5da6SThomas Gleixner 			  &phys_addr, &last_addr);
207e64c8aa0SThomas Gleixner 		return NULL;
208906e36c5SMike Travis 	}
2099a58eebeSToshi Kani 
210d7677d40Svenkatesh.pallipadi@intel.com 	/*
211d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
212d7677d40Svenkatesh.pallipadi@intel.com 	 */
213d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
214ffa71f33SKenji Kaneshige 	phys_addr &= PHYSICAL_PAGE_MASK;
215d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
216d7677d40Svenkatesh.pallipadi@intel.com 
217ecdd6ee7SIngo Molnar 	retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
218e00c8cc9SJuergen Gross 						pcm, &new_pcm);
219dee7cbb2SVenki Pallipadi 	if (retval) {
220ecdd6ee7SIngo Molnar 		printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
221dee7cbb2SVenki Pallipadi 		return NULL;
222dee7cbb2SVenki Pallipadi 	}
223dee7cbb2SVenki Pallipadi 
224b14097bdSJuergen Gross 	if (pcm != new_pcm) {
225b14097bdSJuergen Gross 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
226279e669bSVenkatesh Pallipadi 			printk(KERN_ERR
227b14097bdSJuergen Gross 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
2284c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
2294c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
230b14097bdSJuergen Gross 				pcm, new_pcm);
231de2a47cfSXiaotian Feng 			goto err_free_memtype;
232d7677d40Svenkatesh.pallipadi@intel.com 		}
233b14097bdSJuergen Gross 		pcm = new_pcm;
234d7677d40Svenkatesh.pallipadi@intel.com 	}
235d7677d40Svenkatesh.pallipadi@intel.com 
2360e4c12b4STom Lendacky 	/*
2370e4c12b4STom Lendacky 	 * If the page being mapped is in memory and SEV is active then
2380e4c12b4STom Lendacky 	 * make sure the memory encryption attribute is enabled in the
2390e4c12b4STom Lendacky 	 * resulting mapping.
2400e4c12b4STom Lendacky 	 */
241be43d728SJeremy Fitzhardinge 	prot = PAGE_KERNEL_IO;
2425da04cc8SLianbo Jiang 	if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
2430e4c12b4STom Lendacky 		prot = pgprot_encrypted(prot);
2440e4c12b4STom Lendacky 
245b14097bdSJuergen Gross 	switch (pcm) {
246b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
247b14097bdSJuergen Gross 	default:
248b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
249b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC));
250b14097bdSJuergen Gross 		break;
251b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC_MINUS:
252b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
253b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
254b14097bdSJuergen Gross 		break;
255b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
256b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
257b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_WC));
258b14097bdSJuergen Gross 		break;
259d838270eSToshi Kani 	case _PAGE_CACHE_MODE_WT:
260d838270eSToshi Kani 		prot = __pgprot(pgprot_val(prot) |
261d838270eSToshi Kani 				cachemode2protval(_PAGE_CACHE_MODE_WT));
262d838270eSToshi Kani 		break;
263b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
264d806e5eeSThomas Gleixner 		break;
265d806e5eeSThomas Gleixner 	}
266e64c8aa0SThomas Gleixner 
267e64c8aa0SThomas Gleixner 	/*
268e64c8aa0SThomas Gleixner 	 * Ok, go for it..
269e64c8aa0SThomas Gleixner 	 */
27023016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
271e64c8aa0SThomas Gleixner 	if (!area)
272de2a47cfSXiaotian Feng 		goto err_free_memtype;
273e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
274e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
27543a432b1SSuresh Siddha 
276ecdd6ee7SIngo Molnar 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
277de2a47cfSXiaotian Feng 		goto err_free_area;
278e64c8aa0SThomas Gleixner 
279de2a47cfSXiaotian Feng 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
280de2a47cfSXiaotian Feng 		goto err_free_area;
281e64c8aa0SThomas Gleixner 
282d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
28387e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
284d61fc448SPekka Paalanen 
285c7a7b814STim Gardner 	/*
286c7a7b814STim Gardner 	 * Check if the request spans more than any BAR in the iomem resource
287c7a7b814STim Gardner 	 * tree.
288c7a7b814STim Gardner 	 */
2899abb0ecdSLaura Abbott 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
2909abb0ecdSLaura Abbott 		pr_warn("caller %pS mapping multiple BARs\n", caller);
291c7a7b814STim Gardner 
292d61fc448SPekka Paalanen 	return ret_addr;
293de2a47cfSXiaotian Feng err_free_area:
294de2a47cfSXiaotian Feng 	free_vm_area(area);
295de2a47cfSXiaotian Feng err_free_memtype:
296ecdd6ee7SIngo Molnar 	memtype_free(phys_addr, phys_addr + size);
297de2a47cfSXiaotian Feng 	return NULL;
298e64c8aa0SThomas Gleixner }
299e64c8aa0SThomas Gleixner 
300e64c8aa0SThomas Gleixner /**
301c0d94aa5SChristoph Hellwig  * ioremap     -   map bus memory into CPU space
3029efc31b8SWanpeng Li  * @phys_addr:    bus address of the memory
303e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
304e64c8aa0SThomas Gleixner  *
305c0d94aa5SChristoph Hellwig  * ioremap performs a platform specific sequence of operations to
306e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
307e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
308e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
309e64c8aa0SThomas Gleixner  * address.
310e64c8aa0SThomas Gleixner  *
311e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
312e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
313e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
314e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
315e64c8aa0SThomas Gleixner  *
316e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
317e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
318e64c8aa0SThomas Gleixner  *
319e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
320e64c8aa0SThomas Gleixner  */
321c0d94aa5SChristoph Hellwig void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
322e64c8aa0SThomas Gleixner {
323de33c442SSuresh Siddha 	/*
324de33c442SSuresh Siddha 	 * Ideally, this should be:
325cb32edf6SLuis R. Rodriguez 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
326de33c442SSuresh Siddha 	 *
327de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
328e4b6be33SLuis R. Rodriguez 	 * UC MINUS. Drivers that are certain they need or can already
329e4b6be33SLuis R. Rodriguez 	 * be converted over to strong UC can use ioremap_uc().
330de33c442SSuresh Siddha 	 */
331b14097bdSJuergen Gross 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
332de33c442SSuresh Siddha 
333b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, pcm,
334c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
335e64c8aa0SThomas Gleixner }
336c0d94aa5SChristoph Hellwig EXPORT_SYMBOL(ioremap);
337e64c8aa0SThomas Gleixner 
338b310f381Svenkatesh.pallipadi@intel.com /**
339e4b6be33SLuis R. Rodriguez  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
340e4b6be33SLuis R. Rodriguez  * @phys_addr:    bus address of the memory
341e4b6be33SLuis R. Rodriguez  * @size:      size of the resource to map
342e4b6be33SLuis R. Rodriguez  *
343e4b6be33SLuis R. Rodriguez  * ioremap_uc performs a platform specific sequence of operations to
344e4b6be33SLuis R. Rodriguez  * make bus memory CPU accessible via the readb/readw/readl/writeb/
345e4b6be33SLuis R. Rodriguez  * writew/writel functions and the other mmio helpers. The returned
346e4b6be33SLuis R. Rodriguez  * address is not guaranteed to be usable directly as a virtual
347e4b6be33SLuis R. Rodriguez  * address.
348e4b6be33SLuis R. Rodriguez  *
349e4b6be33SLuis R. Rodriguez  * This version of ioremap ensures that the memory is marked with a strong
350e4b6be33SLuis R. Rodriguez  * preference as completely uncachable on the CPU when possible. For non-PAT
351e4b6be33SLuis R. Rodriguez  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
352e4b6be33SLuis R. Rodriguez  * systems this will set the PAT entry for the pages as strong UC.  This call
353e4b6be33SLuis R. Rodriguez  * will honor existing caching rules from things like the PCI bus. Note that
354e4b6be33SLuis R. Rodriguez  * there are other caches and buffers on many busses. In particular driver
355e4b6be33SLuis R. Rodriguez  * authors should read up on PCI writes.
356e4b6be33SLuis R. Rodriguez  *
357e4b6be33SLuis R. Rodriguez  * It's useful if some control registers are in such an area and
358e4b6be33SLuis R. Rodriguez  * write combining or read caching is not desirable:
359e4b6be33SLuis R. Rodriguez  *
360e4b6be33SLuis R. Rodriguez  * Must be freed with iounmap.
361e4b6be33SLuis R. Rodriguez  */
362e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
363e4b6be33SLuis R. Rodriguez {
364e4b6be33SLuis R. Rodriguez 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
365e4b6be33SLuis R. Rodriguez 
366e4b6be33SLuis R. Rodriguez 	return __ioremap_caller(phys_addr, size, pcm,
367c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
368e4b6be33SLuis R. Rodriguez }
369e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
370e4b6be33SLuis R. Rodriguez 
371e4b6be33SLuis R. Rodriguez /**
372b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
3739efc31b8SWanpeng Li  * @phys_addr:	bus address of the memory
374b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
375b310f381Svenkatesh.pallipadi@intel.com  *
376b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
377b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
378b310f381Svenkatesh.pallipadi@intel.com  *
379b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
380b310f381Svenkatesh.pallipadi@intel.com  */
381d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
382b310f381Svenkatesh.pallipadi@intel.com {
383b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
384c3a7a61cSLianbo Jiang 					__builtin_return_address(0), false);
385b310f381Svenkatesh.pallipadi@intel.com }
386b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
387b310f381Svenkatesh.pallipadi@intel.com 
388d838270eSToshi Kani /**
389d838270eSToshi Kani  * ioremap_wt	-	map memory into CPU space write through
390d838270eSToshi Kani  * @phys_addr:	bus address of the memory
391d838270eSToshi Kani  * @size:	size of the resource to map
392d838270eSToshi Kani  *
393d838270eSToshi Kani  * This version of ioremap ensures that the memory is marked write through.
394d838270eSToshi Kani  * Write through stores data into memory while keeping the cache up-to-date.
395d838270eSToshi Kani  *
396d838270eSToshi Kani  * Must be freed with iounmap.
397d838270eSToshi Kani  */
398d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
399d838270eSToshi Kani {
400d838270eSToshi Kani 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
401c3a7a61cSLianbo Jiang 					__builtin_return_address(0), false);
402d838270eSToshi Kani }
403d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt);
404d838270eSToshi Kani 
405c3a7a61cSLianbo Jiang void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
406c3a7a61cSLianbo Jiang {
407c3a7a61cSLianbo Jiang 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
408c3a7a61cSLianbo Jiang 				__builtin_return_address(0), true);
409c3a7a61cSLianbo Jiang }
410c3a7a61cSLianbo Jiang EXPORT_SYMBOL(ioremap_encrypted);
411c3a7a61cSLianbo Jiang 
412b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
4135f868152SThomas Gleixner {
414b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
415c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
4165f868152SThomas Gleixner }
4175f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
4185f868152SThomas Gleixner 
41928b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
42028b2ee20SRik van Riel 				unsigned long prot_val)
42128b2ee20SRik van Riel {
422b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size,
423b14097bdSJuergen Gross 				pgprot2cachemode(__pgprot(prot_val)),
424c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
42528b2ee20SRik van Riel }
42628b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
42728b2ee20SRik van Riel 
428e64c8aa0SThomas Gleixner /**
429e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
430e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
431e64c8aa0SThomas Gleixner  *
432e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
433e64c8aa0SThomas Gleixner  */
434e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
435e64c8aa0SThomas Gleixner {
436e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
437e64c8aa0SThomas Gleixner 
438e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
439e64c8aa0SThomas Gleixner 		return;
440e64c8aa0SThomas Gleixner 
441e64c8aa0SThomas Gleixner 	/*
44233c2b803STom Lendacky 	 * The PCI/ISA range special-casing was removed from __ioremap()
44333c2b803STom Lendacky 	 * so this check, in theory, can be removed. However, there are
44433c2b803STom Lendacky 	 * cases where iounmap() is called for addresses not obtained via
44533c2b803STom Lendacky 	 * ioremap() (vga16fb for example). Add a warning so that these
44633c2b803STom Lendacky 	 * cases can be caught and fixed.
447e64c8aa0SThomas Gleixner 	 */
4486e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
44933c2b803STom Lendacky 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
45033c2b803STom Lendacky 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
451e64c8aa0SThomas Gleixner 		return;
45233c2b803STom Lendacky 	}
453e64c8aa0SThomas Gleixner 
4546d60ce38SKarol Herbst 	mmiotrace_iounmap(addr);
4556d60ce38SKarol Herbst 
456e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
457e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
458e64c8aa0SThomas Gleixner 
459e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
460e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
461e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
462e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
463e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
464ef932473SJoonsoo Kim 	p = find_vm_area((void __force *)addr);
465e64c8aa0SThomas Gleixner 
466e64c8aa0SThomas Gleixner 	if (!p) {
467e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
468e64c8aa0SThomas Gleixner 		dump_stack();
469e64c8aa0SThomas Gleixner 		return;
470e64c8aa0SThomas Gleixner 	}
471e64c8aa0SThomas Gleixner 
472ecdd6ee7SIngo Molnar 	memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
473d7677d40Svenkatesh.pallipadi@intel.com 
474e64c8aa0SThomas Gleixner 	/* Finally remove it */
4756e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
476e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
477e64c8aa0SThomas Gleixner 	kfree(p);
478e64c8aa0SThomas Gleixner }
479e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
480e64c8aa0SThomas Gleixner 
4810f472d04SAnshuman Khandual int __init arch_ioremap_p4d_supported(void)
4820f472d04SAnshuman Khandual {
4830f472d04SAnshuman Khandual 	return 0;
4840f472d04SAnshuman Khandual }
4850f472d04SAnshuman Khandual 
4861e6277deSJan Beulich int __init arch_ioremap_pud_supported(void)
4875d72b4fbSToshi Kani {
4885d72b4fbSToshi Kani #ifdef CONFIG_X86_64
489b8291adcSBorislav Petkov 	return boot_cpu_has(X86_FEATURE_GBPAGES);
4905d72b4fbSToshi Kani #else
4915d72b4fbSToshi Kani 	return 0;
4925d72b4fbSToshi Kani #endif
4935d72b4fbSToshi Kani }
4945d72b4fbSToshi Kani 
4951e6277deSJan Beulich int __init arch_ioremap_pmd_supported(void)
4965d72b4fbSToshi Kani {
49716bf9226SBorislav Petkov 	return boot_cpu_has(X86_FEATURE_PSE);
4985d72b4fbSToshi Kani }
4995d72b4fbSToshi Kani 
500e045fb2aSvenkatesh.pallipadi@intel.com /*
501e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
502e045fb2aSvenkatesh.pallipadi@intel.com  * access
503e045fb2aSvenkatesh.pallipadi@intel.com  */
5044707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
505e045fb2aSvenkatesh.pallipadi@intel.com {
506e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start  = phys &  PAGE_MASK;
50794d4b476SIngo Molnar 	unsigned long offset = phys & ~PAGE_MASK;
508562bfca4SIngo Molnar 	void *vaddr;
509e045fb2aSvenkatesh.pallipadi@intel.com 
5108458bf94STom Lendacky 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
5118458bf94STom Lendacky 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
512e045fb2aSvenkatesh.pallipadi@intel.com 
5138458bf94STom Lendacky 	/* Only add the offset on success and return NULL if memremap() failed */
51494d4b476SIngo Molnar 	if (vaddr)
51594d4b476SIngo Molnar 		vaddr += offset;
516e045fb2aSvenkatesh.pallipadi@intel.com 
517562bfca4SIngo Molnar 	return vaddr;
518e045fb2aSvenkatesh.pallipadi@intel.com }
519e045fb2aSvenkatesh.pallipadi@intel.com 
5204707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
521e045fb2aSvenkatesh.pallipadi@intel.com {
5228458bf94STom Lendacky 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
523e045fb2aSvenkatesh.pallipadi@intel.com }
524e045fb2aSvenkatesh.pallipadi@intel.com 
5258f716c9bSTom Lendacky /*
5268f716c9bSTom Lendacky  * Examine the physical address to determine if it is an area of memory
5278f716c9bSTom Lendacky  * that should be mapped decrypted.  If the memory is not part of the
5288f716c9bSTom Lendacky  * kernel usable area it was accessed and created decrypted, so these
5291de32862STom Lendacky  * areas should be mapped decrypted. And since the encryption key can
5301de32862STom Lendacky  * change across reboots, persistent memory should also be mapped
5311de32862STom Lendacky  * decrypted.
532072f58c6STom Lendacky  *
533072f58c6STom Lendacky  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
534072f58c6STom Lendacky  * only persistent memory should be mapped decrypted.
5358f716c9bSTom Lendacky  */
5368f716c9bSTom Lendacky static bool memremap_should_map_decrypted(resource_size_t phys_addr,
5378f716c9bSTom Lendacky 					  unsigned long size)
5388f716c9bSTom Lendacky {
5391de32862STom Lendacky 	int is_pmem;
5401de32862STom Lendacky 
5411de32862STom Lendacky 	/*
5421de32862STom Lendacky 	 * Check if the address is part of a persistent memory region.
5431de32862STom Lendacky 	 * This check covers areas added by E820, EFI and ACPI.
5441de32862STom Lendacky 	 */
5451de32862STom Lendacky 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
5461de32862STom Lendacky 				    IORES_DESC_PERSISTENT_MEMORY);
5471de32862STom Lendacky 	if (is_pmem != REGION_DISJOINT)
5481de32862STom Lendacky 		return true;
5491de32862STom Lendacky 
5501de32862STom Lendacky 	/*
5511de32862STom Lendacky 	 * Check if the non-volatile attribute is set for an EFI
5521de32862STom Lendacky 	 * reserved area.
5531de32862STom Lendacky 	 */
5541de32862STom Lendacky 	if (efi_enabled(EFI_BOOT)) {
5551de32862STom Lendacky 		switch (efi_mem_type(phys_addr)) {
5561de32862STom Lendacky 		case EFI_RESERVED_TYPE:
5571de32862STom Lendacky 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
5581de32862STom Lendacky 				return true;
5591de32862STom Lendacky 			break;
5601de32862STom Lendacky 		default:
5611de32862STom Lendacky 			break;
5621de32862STom Lendacky 		}
5631de32862STom Lendacky 	}
5641de32862STom Lendacky 
5658f716c9bSTom Lendacky 	/* Check if the address is outside kernel usable area */
5668f716c9bSTom Lendacky 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
5678f716c9bSTom Lendacky 	case E820_TYPE_RESERVED:
5688f716c9bSTom Lendacky 	case E820_TYPE_ACPI:
5698f716c9bSTom Lendacky 	case E820_TYPE_NVS:
5708f716c9bSTom Lendacky 	case E820_TYPE_UNUSABLE:
571072f58c6STom Lendacky 		/* For SEV, these areas are encrypted */
572072f58c6STom Lendacky 		if (sev_active())
573072f58c6STom Lendacky 			break;
574072f58c6STom Lendacky 		/* Fallthrough */
575072f58c6STom Lendacky 
5761de32862STom Lendacky 	case E820_TYPE_PRAM:
5778f716c9bSTom Lendacky 		return true;
5788f716c9bSTom Lendacky 	default:
5798f716c9bSTom Lendacky 		break;
5808f716c9bSTom Lendacky 	}
5818f716c9bSTom Lendacky 
5828f716c9bSTom Lendacky 	return false;
5838f716c9bSTom Lendacky }
5848f716c9bSTom Lendacky 
5858f716c9bSTom Lendacky /*
5868f716c9bSTom Lendacky  * Examine the physical address to determine if it is EFI data. Check
5878f716c9bSTom Lendacky  * it against the boot params structure and EFI tables and memory types.
5888f716c9bSTom Lendacky  */
5898f716c9bSTom Lendacky static bool memremap_is_efi_data(resource_size_t phys_addr,
5908f716c9bSTom Lendacky 				 unsigned long size)
5918f716c9bSTom Lendacky {
5928f716c9bSTom Lendacky 	u64 paddr;
5938f716c9bSTom Lendacky 
5948f716c9bSTom Lendacky 	/* Check if the address is part of EFI boot/runtime data */
5958f716c9bSTom Lendacky 	if (!efi_enabled(EFI_BOOT))
5968f716c9bSTom Lendacky 		return false;
5978f716c9bSTom Lendacky 
5988f716c9bSTom Lendacky 	paddr = boot_params.efi_info.efi_memmap_hi;
5998f716c9bSTom Lendacky 	paddr <<= 32;
6008f716c9bSTom Lendacky 	paddr |= boot_params.efi_info.efi_memmap;
6018f716c9bSTom Lendacky 	if (phys_addr == paddr)
6028f716c9bSTom Lendacky 		return true;
6038f716c9bSTom Lendacky 
6048f716c9bSTom Lendacky 	paddr = boot_params.efi_info.efi_systab_hi;
6058f716c9bSTom Lendacky 	paddr <<= 32;
6068f716c9bSTom Lendacky 	paddr |= boot_params.efi_info.efi_systab;
6078f716c9bSTom Lendacky 	if (phys_addr == paddr)
6088f716c9bSTom Lendacky 		return true;
6098f716c9bSTom Lendacky 
6108f716c9bSTom Lendacky 	if (efi_is_table_address(phys_addr))
6118f716c9bSTom Lendacky 		return true;
6128f716c9bSTom Lendacky 
6138f716c9bSTom Lendacky 	switch (efi_mem_type(phys_addr)) {
6148f716c9bSTom Lendacky 	case EFI_BOOT_SERVICES_DATA:
6158f716c9bSTom Lendacky 	case EFI_RUNTIME_SERVICES_DATA:
6168f716c9bSTom Lendacky 		return true;
6178f716c9bSTom Lendacky 	default:
6188f716c9bSTom Lendacky 		break;
6198f716c9bSTom Lendacky 	}
6208f716c9bSTom Lendacky 
6218f716c9bSTom Lendacky 	return false;
6228f716c9bSTom Lendacky }
6238f716c9bSTom Lendacky 
6248f716c9bSTom Lendacky /*
6258f716c9bSTom Lendacky  * Examine the physical address to determine if it is boot data by checking
6268f716c9bSTom Lendacky  * it against the boot params setup_data chain.
6278f716c9bSTom Lendacky  */
6288f716c9bSTom Lendacky static bool memremap_is_setup_data(resource_size_t phys_addr,
6298f716c9bSTom Lendacky 				   unsigned long size)
6308f716c9bSTom Lendacky {
6318f716c9bSTom Lendacky 	struct setup_data *data;
6328f716c9bSTom Lendacky 	u64 paddr, paddr_next;
6338f716c9bSTom Lendacky 
6348f716c9bSTom Lendacky 	paddr = boot_params.hdr.setup_data;
6358f716c9bSTom Lendacky 	while (paddr) {
6368f716c9bSTom Lendacky 		unsigned int len;
6378f716c9bSTom Lendacky 
6388f716c9bSTom Lendacky 		if (phys_addr == paddr)
6398f716c9bSTom Lendacky 			return true;
6408f716c9bSTom Lendacky 
6418f716c9bSTom Lendacky 		data = memremap(paddr, sizeof(*data),
6428f716c9bSTom Lendacky 				MEMREMAP_WB | MEMREMAP_DEC);
6438f716c9bSTom Lendacky 
6448f716c9bSTom Lendacky 		paddr_next = data->next;
6458f716c9bSTom Lendacky 		len = data->len;
6468f716c9bSTom Lendacky 
647b3c72fc9SDaniel Kiper 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
648b3c72fc9SDaniel Kiper 			memunmap(data);
649b3c72fc9SDaniel Kiper 			return true;
650b3c72fc9SDaniel Kiper 		}
651b3c72fc9SDaniel Kiper 
652b3c72fc9SDaniel Kiper 		if (data->type == SETUP_INDIRECT &&
653b3c72fc9SDaniel Kiper 		    ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
654b3c72fc9SDaniel Kiper 			paddr = ((struct setup_indirect *)data->data)->addr;
655b3c72fc9SDaniel Kiper 			len = ((struct setup_indirect *)data->data)->len;
656b3c72fc9SDaniel Kiper 		}
657b3c72fc9SDaniel Kiper 
6588f716c9bSTom Lendacky 		memunmap(data);
6598f716c9bSTom Lendacky 
6608f716c9bSTom Lendacky 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
6618f716c9bSTom Lendacky 			return true;
6628f716c9bSTom Lendacky 
6638f716c9bSTom Lendacky 		paddr = paddr_next;
6648f716c9bSTom Lendacky 	}
6658f716c9bSTom Lendacky 
6668f716c9bSTom Lendacky 	return false;
6678f716c9bSTom Lendacky }
6688f716c9bSTom Lendacky 
6698f716c9bSTom Lendacky /*
6708f716c9bSTom Lendacky  * Examine the physical address to determine if it is boot data by checking
6718f716c9bSTom Lendacky  * it against the boot params setup_data chain (early boot version).
6728f716c9bSTom Lendacky  */
6738f716c9bSTom Lendacky static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
6748f716c9bSTom Lendacky 						unsigned long size)
6758f716c9bSTom Lendacky {
6768f716c9bSTom Lendacky 	struct setup_data *data;
6778f716c9bSTom Lendacky 	u64 paddr, paddr_next;
6788f716c9bSTom Lendacky 
6798f716c9bSTom Lendacky 	paddr = boot_params.hdr.setup_data;
6808f716c9bSTom Lendacky 	while (paddr) {
6818f716c9bSTom Lendacky 		unsigned int len;
6828f716c9bSTom Lendacky 
6838f716c9bSTom Lendacky 		if (phys_addr == paddr)
6848f716c9bSTom Lendacky 			return true;
6858f716c9bSTom Lendacky 
6868f716c9bSTom Lendacky 		data = early_memremap_decrypted(paddr, sizeof(*data));
6878f716c9bSTom Lendacky 
6888f716c9bSTom Lendacky 		paddr_next = data->next;
6898f716c9bSTom Lendacky 		len = data->len;
6908f716c9bSTom Lendacky 
6918f716c9bSTom Lendacky 		early_memunmap(data, sizeof(*data));
6928f716c9bSTom Lendacky 
6938f716c9bSTom Lendacky 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
6948f716c9bSTom Lendacky 			return true;
6958f716c9bSTom Lendacky 
6968f716c9bSTom Lendacky 		paddr = paddr_next;
6978f716c9bSTom Lendacky 	}
6988f716c9bSTom Lendacky 
6998f716c9bSTom Lendacky 	return false;
7008f716c9bSTom Lendacky }
7018f716c9bSTom Lendacky 
7028f716c9bSTom Lendacky /*
7038f716c9bSTom Lendacky  * Architecture function to determine if RAM remap is allowed. By default, a
7048f716c9bSTom Lendacky  * RAM remap will map the data as encrypted. Determine if a RAM remap should
7058f716c9bSTom Lendacky  * not be done so that the data will be mapped decrypted.
7068f716c9bSTom Lendacky  */
7078f716c9bSTom Lendacky bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
7088f716c9bSTom Lendacky 				 unsigned long flags)
7098f716c9bSTom Lendacky {
710072f58c6STom Lendacky 	if (!mem_encrypt_active())
7118f716c9bSTom Lendacky 		return true;
7128f716c9bSTom Lendacky 
7138f716c9bSTom Lendacky 	if (flags & MEMREMAP_ENC)
7148f716c9bSTom Lendacky 		return true;
7158f716c9bSTom Lendacky 
7168f716c9bSTom Lendacky 	if (flags & MEMREMAP_DEC)
7178f716c9bSTom Lendacky 		return false;
7188f716c9bSTom Lendacky 
719072f58c6STom Lendacky 	if (sme_active()) {
7208f716c9bSTom Lendacky 		if (memremap_is_setup_data(phys_addr, size) ||
721072f58c6STom Lendacky 		    memremap_is_efi_data(phys_addr, size))
7228f716c9bSTom Lendacky 			return false;
723072f58c6STom Lendacky 	}
7248f716c9bSTom Lendacky 
725072f58c6STom Lendacky 	return !memremap_should_map_decrypted(phys_addr, size);
7268f716c9bSTom Lendacky }
7278f716c9bSTom Lendacky 
7288f716c9bSTom Lendacky /*
7298f716c9bSTom Lendacky  * Architecture override of __weak function to adjust the protection attributes
7308f716c9bSTom Lendacky  * used when remapping memory. By default, early_memremap() will map the data
7318f716c9bSTom Lendacky  * as encrypted. Determine if an encrypted mapping should not be done and set
7328f716c9bSTom Lendacky  * the appropriate protection attributes.
7338f716c9bSTom Lendacky  */
7348f716c9bSTom Lendacky pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
7358f716c9bSTom Lendacky 					     unsigned long size,
7368f716c9bSTom Lendacky 					     pgprot_t prot)
7378f716c9bSTom Lendacky {
738072f58c6STom Lendacky 	bool encrypted_prot;
739072f58c6STom Lendacky 
740072f58c6STom Lendacky 	if (!mem_encrypt_active())
7418f716c9bSTom Lendacky 		return prot;
7428f716c9bSTom Lendacky 
743072f58c6STom Lendacky 	encrypted_prot = true;
744072f58c6STom Lendacky 
745072f58c6STom Lendacky 	if (sme_active()) {
7468f716c9bSTom Lendacky 		if (early_memremap_is_setup_data(phys_addr, size) ||
747072f58c6STom Lendacky 		    memremap_is_efi_data(phys_addr, size))
748072f58c6STom Lendacky 			encrypted_prot = false;
749072f58c6STom Lendacky 	}
7508f716c9bSTom Lendacky 
751072f58c6STom Lendacky 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
752072f58c6STom Lendacky 		encrypted_prot = false;
753072f58c6STom Lendacky 
754072f58c6STom Lendacky 	return encrypted_prot ? pgprot_encrypted(prot)
755072f58c6STom Lendacky 			      : pgprot_decrypted(prot);
7568f716c9bSTom Lendacky }
7578f716c9bSTom Lendacky 
7588458bf94STom Lendacky bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
7598458bf94STom Lendacky {
7608458bf94STom Lendacky 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
7618458bf94STom Lendacky }
7628458bf94STom Lendacky 
763ce9084baSArd Biesheuvel #ifdef CONFIG_AMD_MEM_ENCRYPT
764f88a68faSTom Lendacky /* Remap memory with encryption */
765f88a68faSTom Lendacky void __init *early_memremap_encrypted(resource_size_t phys_addr,
766f88a68faSTom Lendacky 				      unsigned long size)
767f88a68faSTom Lendacky {
768f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
769f88a68faSTom Lendacky }
770f88a68faSTom Lendacky 
771f88a68faSTom Lendacky /*
772f88a68faSTom Lendacky  * Remap memory with encryption and write-protected - cannot be called
773f88a68faSTom Lendacky  * before pat_init() is called
774f88a68faSTom Lendacky  */
775f88a68faSTom Lendacky void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
776f88a68faSTom Lendacky 					 unsigned long size)
777f88a68faSTom Lendacky {
778f88a68faSTom Lendacky 	/* Be sure the write-protect PAT entry is set for write-protect */
779f88a68faSTom Lendacky 	if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
780f88a68faSTom Lendacky 		return NULL;
781f88a68faSTom Lendacky 
782f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
783f88a68faSTom Lendacky }
784f88a68faSTom Lendacky 
785f88a68faSTom Lendacky /* Remap memory without encryption */
786f88a68faSTom Lendacky void __init *early_memremap_decrypted(resource_size_t phys_addr,
787f88a68faSTom Lendacky 				      unsigned long size)
788f88a68faSTom Lendacky {
789f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
790f88a68faSTom Lendacky }
791f88a68faSTom Lendacky 
792f88a68faSTom Lendacky /*
793f88a68faSTom Lendacky  * Remap memory without encryption and write-protected - cannot be called
794f88a68faSTom Lendacky  * before pat_init() is called
795f88a68faSTom Lendacky  */
796f88a68faSTom Lendacky void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
797f88a68faSTom Lendacky 					 unsigned long size)
798f88a68faSTom Lendacky {
799f88a68faSTom Lendacky 	/* Be sure the write-protect PAT entry is set for write-protect */
800f88a68faSTom Lendacky 	if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
801f88a68faSTom Lendacky 		return NULL;
802f88a68faSTom Lendacky 
803f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
804f88a68faSTom Lendacky }
805ce9084baSArd Biesheuvel #endif	/* CONFIG_AMD_MEM_ENCRYPT */
806f88a68faSTom Lendacky 
80745c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
808e64c8aa0SThomas Gleixner 
809551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
810e64c8aa0SThomas Gleixner {
81137cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
8126c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
81337cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
814e0c4f675SKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, addr);
815e0c4f675SKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, addr);
816551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
817551889a6SIan Campbell 
818551889a6SIan Campbell 	return pmd;
819e64c8aa0SThomas Gleixner }
820e64c8aa0SThomas Gleixner 
821551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
822e64c8aa0SThomas Gleixner {
823551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
824e64c8aa0SThomas Gleixner }
825e64c8aa0SThomas Gleixner 
826fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
827fef5ba79SJeremy Fitzhardinge {
828fef5ba79SJeremy Fitzhardinge 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
829fef5ba79SJeremy Fitzhardinge }
830fef5ba79SJeremy Fitzhardinge 
831e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
832e64c8aa0SThomas Gleixner {
833551889a6SIan Campbell 	pmd_t *pmd;
834e64c8aa0SThomas Gleixner 
83573159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
83673159fdcSAndy Lutomirski 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
83773159fdcSAndy Lutomirski #else
83873159fdcSAndy Lutomirski 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
83973159fdcSAndy Lutomirski #endif
84073159fdcSAndy Lutomirski 
8415b7c73e0SMark Salter 	early_ioremap_setup();
8428827247fSWang Chen 
843551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
844e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
845b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
846551889a6SIan Campbell 
847e64c8aa0SThomas Gleixner 	/*
848551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
849e64c8aa0SThomas Gleixner 	 * we are not prepared:
850e64c8aa0SThomas Gleixner 	 */
851499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
852499a5f1eSJan Beulich 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
853499a5f1eSJan Beulich 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
854499a5f1eSJan Beulich #undef __FIXADDR_TOP
855551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
856e64c8aa0SThomas Gleixner 		WARN_ON(1);
857551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
858551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
859e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
860e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
861e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
862e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
863e64c8aa0SThomas Gleixner 
864e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
865e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
866e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
867e64c8aa0SThomas Gleixner 	}
868e64c8aa0SThomas Gleixner }
869e64c8aa0SThomas Gleixner 
8705b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
8719b987aebSMasami Hiramatsu 			       phys_addr_t phys, pgprot_t flags)
872e64c8aa0SThomas Gleixner {
873551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
874551889a6SIan Campbell 	pte_t *pte;
875e64c8aa0SThomas Gleixner 
876e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
877e64c8aa0SThomas Gleixner 		BUG();
878e64c8aa0SThomas Gleixner 		return;
879e64c8aa0SThomas Gleixner 	}
880e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
8814583ed51SJeremy Fitzhardinge 
882fb43d6cbSDave Hansen 	/* Sanitize 'prot' against any unsupported bits: */
883510bb96fSThomas Gleixner 	pgprot_val(flags) &= __supported_pte_mask;
884fb43d6cbSDave Hansen 
885e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
886551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
887e64c8aa0SThomas Gleixner 	else
8884f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
8891299ef1dSAndy Lutomirski 	__flush_tlb_one_kernel(addr);
890e64c8aa0SThomas Gleixner }
891