xref: /linux/arch/arm64/mm/ioremap.c (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 /*
2  * Based on arch/arm/mm/ioremap.c
3  *
4  * (C) Copyright 1995 1996 Linus Torvalds
5  * Hacked for ARM by Phil Blundell <philb@gnu.org>
6  * Hacked to allow all architectures to build, and various cleanups
7  * by Russell King
8  * Copyright (C) 2012 ARM Ltd.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include <linux/export.h>
24 #include <linux/mm.h>
25 #include <linux/vmalloc.h>
26 #include <linux/io.h>
27 
28 static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
29 				      pgprot_t prot, void *caller)
30 {
31 	unsigned long last_addr;
32 	unsigned long offset = phys_addr & ~PAGE_MASK;
33 	int err;
34 	unsigned long addr;
35 	struct vm_struct *area;
36 
37 	/*
38 	 * Page align the mapping address and size, taking account of any
39 	 * offset.
40 	 */
41 	phys_addr &= PAGE_MASK;
42 	size = PAGE_ALIGN(size + offset);
43 
44 	/*
45 	 * Don't allow wraparound, zero size or outside PHYS_MASK.
46 	 */
47 	last_addr = phys_addr + size - 1;
48 	if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
49 		return NULL;
50 
51 	/*
52 	 * Don't allow RAM to be mapped.
53 	 */
54 	if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
55 		return NULL;
56 
57 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
58 	if (!area)
59 		return NULL;
60 	addr = (unsigned long)area->addr;
61 
62 	err = ioremap_page_range(addr, addr + size, phys_addr, prot);
63 	if (err) {
64 		vunmap((void *)addr);
65 		return NULL;
66 	}
67 
68 	return (void __iomem *)(offset + addr);
69 }
70 
71 void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
72 {
73 	return __ioremap_caller(phys_addr, size, prot,
74 				__builtin_return_address(0));
75 }
76 EXPORT_SYMBOL(__ioremap);
77 
78 void __iounmap(volatile void __iomem *io_addr)
79 {
80 	unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
81 
82 	/*
83 	 * We could get an address outside vmalloc range in case
84 	 * of ioremap_cache() reusing a RAM mapping.
85 	 */
86 	if (VMALLOC_START <= addr && addr < VMALLOC_END)
87 		vunmap((void *)addr);
88 }
89 EXPORT_SYMBOL(__iounmap);
90 
91 void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
92 {
93 	/* For normal memory we already have a cacheable mapping. */
94 	if (pfn_valid(__phys_to_pfn(phys_addr)))
95 		return (void __iomem *)__phys_to_virt(phys_addr);
96 
97 	return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
98 				__builtin_return_address(0));
99 }
100 EXPORT_SYMBOL(ioremap_cache);
101