xref: /linux/arch/mips/mm/ioremap.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  * (C) Copyright 2001, 2002 Ralf Baechle
8  */
9 #include <linux/export.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mm_types.h>
16 #include <asm/cacheflush.h>
17 #include <asm/io.h>
18 #include <asm/tlbflush.h>
19 
20 static inline void remap_area_pte(pte_t * pte, unsigned long address,
21 	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
22 {
23 	phys_addr_t end;
24 	unsigned long pfn;
25 	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
26 				   | __WRITEABLE | flags);
27 
28 	address &= ~PMD_MASK;
29 	end = address + size;
30 	if (end > PMD_SIZE)
31 		end = PMD_SIZE;
32 	BUG_ON(address >= end);
33 	pfn = phys_addr >> PAGE_SHIFT;
34 	do {
35 		if (!pte_none(*pte)) {
36 			printk("remap_area_pte: page already exists\n");
37 			BUG();
38 		}
39 		set_pte(pte, pfn_pte(pfn, pgprot));
40 		address += PAGE_SIZE;
41 		pfn++;
42 		pte++;
43 	} while (address && (address < end));
44 }
45 
46 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
47 	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
48 {
49 	phys_addr_t end;
50 
51 	address &= ~PGDIR_MASK;
52 	end = address + size;
53 	if (end > PGDIR_SIZE)
54 		end = PGDIR_SIZE;
55 	phys_addr -= address;
56 	BUG_ON(address >= end);
57 	do {
58 		pte_t * pte = pte_alloc_kernel(pmd, address);
59 		if (!pte)
60 			return -ENOMEM;
61 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
62 		address = (address + PMD_SIZE) & PMD_MASK;
63 		pmd++;
64 	} while (address && (address < end));
65 	return 0;
66 }
67 
68 static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
69 	phys_addr_t size, unsigned long flags)
70 {
71 	int error;
72 	pgd_t * dir;
73 	unsigned long end = address + size;
74 
75 	phys_addr -= address;
76 	dir = pgd_offset(&init_mm, address);
77 	flush_cache_all();
78 	BUG_ON(address >= end);
79 	do {
80 		pud_t *pud;
81 		pmd_t *pmd;
82 
83 		error = -ENOMEM;
84 		pud = pud_alloc(&init_mm, dir, address);
85 		if (!pud)
86 			break;
87 		pmd = pmd_alloc(&init_mm, pud, address);
88 		if (!pmd)
89 			break;
90 		if (remap_area_pmd(pmd, address, end - address,
91 					 phys_addr + address, flags))
92 			break;
93 		error = 0;
94 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
95 		dir++;
96 	} while (address && (address < end));
97 	flush_tlb_all();
98 	return error;
99 }
100 
101 /*
102  * Generic mapping function (not visible outside):
103  */
104 
105 /*
106  * Remap an arbitrary physical address space into the kernel virtual
107  * address space. Needed when the kernel wants to access high addresses
108  * directly.
109  *
110  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
111  * have to convert them into an offset in a page-aligned mapping, but the
112  * caller shouldn't need to know that small detail.
113  */
114 
115 #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
116 
117 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
118 {
119 	struct vm_struct * area;
120 	unsigned long offset;
121 	phys_addr_t last_addr;
122 	void * addr;
123 
124 	phys_addr = fixup_bigphys_addr(phys_addr, size);
125 
126 	/* Don't allow wraparound or zero size */
127 	last_addr = phys_addr + size - 1;
128 	if (!size || last_addr < phys_addr)
129 		return NULL;
130 
131 	/*
132 	 * Map uncached objects in the low 512mb of address space using KSEG1,
133 	 * otherwise map using page tables.
134 	 */
135 	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
136 	    flags == _CACHE_UNCACHED)
137 		return (void __iomem *) CKSEG1ADDR(phys_addr);
138 
139 	/*
140 	 * Don't allow anybody to remap normal RAM that we're using..
141 	 */
142 	if (phys_addr < virt_to_phys(high_memory)) {
143 		char *t_addr, *t_end;
144 		struct page *page;
145 
146 		t_addr = __va(phys_addr);
147 		t_end = t_addr + (size - 1);
148 
149 		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
150 			if(!PageReserved(page))
151 				return NULL;
152 	}
153 
154 	/*
155 	 * Mappings have to be page-aligned
156 	 */
157 	offset = phys_addr & ~PAGE_MASK;
158 	phys_addr &= PAGE_MASK;
159 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
160 
161 	/*
162 	 * Ok, go for it..
163 	 */
164 	area = get_vm_area(size, VM_IOREMAP);
165 	if (!area)
166 		return NULL;
167 	addr = area->addr;
168 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
169 		vunmap(addr);
170 		return NULL;
171 	}
172 
173 	return (void __iomem *) (offset + (char *)addr);
174 }
175 
176 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
177 
178 void __iounmap(const volatile void __iomem *addr)
179 {
180 	struct vm_struct *p;
181 
182 	if (IS_KSEG1(addr))
183 		return;
184 
185 	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
186 	if (!p)
187 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
188 
189 	kfree(p);
190 }
191 
192 EXPORT_SYMBOL(__ioremap);
193 EXPORT_SYMBOL(__iounmap);
194