xref: /linux/arch/powerpc/mm/ioremap_64.c (revision fbf46565c67c626849c7ce2a326972d3008d2a91)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 
7 void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
8 			       pgprot_t prot, void *caller)
9 {
10 	phys_addr_t paligned, offset;
11 	void __iomem *ret;
12 	int err;
13 
14 	/* We don't support the 4K PFN hack with ioremap */
15 	if (pgprot_val(prot) & H_PAGE_4K_PFN)
16 		return NULL;
17 
18 	/*
19 	 * Choose an address to map it to. Once the vmalloc system is running,
20 	 * we use it. Before that, we map using addresses going up from
21 	 * ioremap_bot.  vmalloc will use the addresses from IOREMAP_BASE
22 	 * through ioremap_bot.
23 	 */
24 	paligned = addr & PAGE_MASK;
25 	offset = addr & ~PAGE_MASK;
26 	size = PAGE_ALIGN(addr + size) - paligned;
27 
28 	if (size == 0 || paligned == 0)
29 		return NULL;
30 
31 	if (slab_is_available())
32 		return do_ioremap(paligned, offset, size, prot, caller);
33 
34 	pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
35 
36 	err = early_ioremap_range(ioremap_bot, paligned, size, prot);
37 	if (err)
38 		return NULL;
39 
40 	ret = (void __iomem *)ioremap_bot + offset;
41 	ioremap_bot += size + PAGE_SIZE;
42 
43 	return ret;
44 }
45 
46 /*
47  * Unmap an IO region and remove it from vmalloc'd list.
48  * Access to IO memory should be serialized by driver.
49  */
50 void iounmap(volatile void __iomem *token)
51 {
52 	void *addr;
53 
54 	if (!slab_is_available())
55 		return;
56 
57 	addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK);
58 
59 	if ((unsigned long)addr < ioremap_bot) {
60 		pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr);
61 		return;
62 	}
63 	vunmap(addr);
64 }
65 EXPORT_SYMBOL(iounmap);
66