xref: /linux/mm/ioremap.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1ab05eabfSMike Rapoport // SPDX-License-Identifier: GPL-2.0
2ab05eabfSMike Rapoport /*
3ab05eabfSMike Rapoport  * Re-map IO memory to kernel address space so that we can access it.
4ab05eabfSMike Rapoport  * This is needed for high PCI addresses that aren't mapped in the
5ab05eabfSMike Rapoport  * 640k-1MB IO memory area on PC's
6ab05eabfSMike Rapoport  *
7ab05eabfSMike Rapoport  * (C) Copyright 1995 1996 Linus Torvalds
8ab05eabfSMike Rapoport  */
9ab05eabfSMike Rapoport #include <linux/vmalloc.h>
10ab05eabfSMike Rapoport #include <linux/mm.h>
11ab05eabfSMike Rapoport #include <linux/io.h>
12ab05eabfSMike Rapoport #include <linux/export.h>
13*016fec91SBaoquan He #include <linux/ioremap.h>
14ab1cd020SChristophe Leroy 
generic_ioremap_prot(phys_addr_t phys_addr,size_t size,pgprot_t prot)157613366aSChristophe Leroy void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
167613366aSChristophe Leroy 				   pgprot_t prot)
17ab05eabfSMike Rapoport {
18ab05eabfSMike Rapoport 	unsigned long offset, vaddr;
19ab05eabfSMike Rapoport 	phys_addr_t last_addr;
20ab05eabfSMike Rapoport 	struct vm_struct *area;
21ab05eabfSMike Rapoport 
22a5f61648SBaoquan He 	/* An early platform driver might end up here */
23a5f61648SBaoquan He 	if (WARN_ON_ONCE(!slab_is_available()))
24a5f61648SBaoquan He 		return NULL;
25a5f61648SBaoquan He 
26ab05eabfSMike Rapoport 	/* Disallow wrap-around or zero size */
27abc5992bSKefeng Wang 	last_addr = phys_addr + size - 1;
28abc5992bSKefeng Wang 	if (!size || last_addr < phys_addr)
29ab05eabfSMike Rapoport 		return NULL;
30ab05eabfSMike Rapoport 
31ab05eabfSMike Rapoport 	/* Page-align mappings */
32abc5992bSKefeng Wang 	offset = phys_addr & (~PAGE_MASK);
33abc5992bSKefeng Wang 	phys_addr -= offset;
34ab05eabfSMike Rapoport 	size = PAGE_ALIGN(size + offset);
35ab05eabfSMike Rapoport 
36ab1cd020SChristophe Leroy 	area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START,
37ab1cd020SChristophe Leroy 				    IOREMAP_END, __builtin_return_address(0));
38ab05eabfSMike Rapoport 	if (!area)
39ab05eabfSMike Rapoport 		return NULL;
40ab05eabfSMike Rapoport 	vaddr = (unsigned long)area->addr;
41a14fff1cSKefeng Wang 	area->phys_addr = phys_addr;
42ab05eabfSMike Rapoport 
437613366aSChristophe Leroy 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
44ab05eabfSMike Rapoport 		free_vm_area(area);
45ab05eabfSMike Rapoport 		return NULL;
46ab05eabfSMike Rapoport 	}
47ab05eabfSMike Rapoport 
48ab05eabfSMike Rapoport 	return (void __iomem *)(vaddr + offset);
49ab05eabfSMike Rapoport }
507613366aSChristophe Leroy 
51dfdc6ba9SBaoquan He #ifndef ioremap_prot
ioremap_prot(phys_addr_t phys_addr,size_t size,unsigned long prot)527613366aSChristophe Leroy void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
537613366aSChristophe Leroy 			   unsigned long prot)
547613366aSChristophe Leroy {
557613366aSChristophe Leroy 	return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
567613366aSChristophe Leroy }
57ab05eabfSMike Rapoport EXPORT_SYMBOL(ioremap_prot);
58dfdc6ba9SBaoquan He #endif
59ab05eabfSMike Rapoport 
generic_iounmap(volatile void __iomem * addr)607613366aSChristophe Leroy void generic_iounmap(volatile void __iomem *addr)
61ab05eabfSMike Rapoport {
6218e780b4SKefeng Wang 	void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
6318e780b4SKefeng Wang 
64ab1cd020SChristophe Leroy 	if (is_ioremap_addr(vaddr))
6518e780b4SKefeng Wang 		vunmap(vaddr);
66ab05eabfSMike Rapoport }
677613366aSChristophe Leroy 
68dfdc6ba9SBaoquan He #ifndef iounmap
iounmap(volatile void __iomem * addr)697613366aSChristophe Leroy void iounmap(volatile void __iomem *addr)
707613366aSChristophe Leroy {
717613366aSChristophe Leroy 	generic_iounmap(addr);
727613366aSChristophe Leroy }
73ab05eabfSMike Rapoport EXPORT_SYMBOL(iounmap);
74dfdc6ba9SBaoquan He #endif
75