xref: /linux/arch/powerpc/mm/ioremap_64.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 
7 void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
8 			       pgprot_t prot, void *caller)
9 {
10 	phys_addr_t paligned, offset;
11 	void __iomem *ret;
12 	int err;
13 
14 	/* We don't support the 4K PFN hack with ioremap */
15 	if (pgprot_val(prot) & H_PAGE_4K_PFN)
16 		return NULL;
17 
18 	/*
19 	 * Choose an address to map it to. Once the vmalloc system is running,
20 	 * we use it. Before that, we map using addresses going up from
21 	 * ioremap_bot.  vmalloc will use the addresses from IOREMAP_BASE
22 	 * through ioremap_bot.
23 	 */
24 	paligned = addr & PAGE_MASK;
25 	offset = addr & ~PAGE_MASK;
26 	size = PAGE_ALIGN(addr + size) - paligned;
27 
28 	if (size == 0 || paligned == 0)
29 		return NULL;
30 
31 	if (slab_is_available())
32 		return generic_ioremap_prot(addr, size, prot);
33 
34 	pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
35 
36 	err = early_ioremap_range(ioremap_bot, paligned, size, prot);
37 	if (err)
38 		return NULL;
39 
40 	ret = (void __iomem *)ioremap_bot + offset;
41 	ioremap_bot += size + PAGE_SIZE;
42 
43 	return ret;
44 }
45 
46 /*
47  * Unmap an IO region and remove it from vmalloc'd list.
48  * Access to IO memory should be serialized by driver.
49  */
50 void iounmap(volatile void __iomem *token)
51 {
52 	if (!slab_is_available())
53 		return;
54 
55 	generic_iounmap(PCI_FIX_ADDR(token));
56 }
57 EXPORT_SYMBOL(iounmap);
58