xref: /linux/arch/parisc/mm/ioremap.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * arch/parisc/mm/ioremap.c
3  *
4  * (C) Copyright 1995 1996 Linus Torvalds
5  * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
6  * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
7  */
8 
9 #include <linux/vmalloc.h>
10 #include <linux/errno.h>
11 #include <linux/module.h>
12 #include <asm/io.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 
17 static inline void
18 remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
19 	       unsigned long phys_addr, unsigned long flags)
20 {
21 	unsigned long end, pfn;
22 	pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
23 				   _PAGE_ACCESSED | flags);
24 
25 	address &= ~PMD_MASK;
26 
27 	end = address + size;
28 	if (end > PMD_SIZE)
29 		end = PMD_SIZE;
30 
31 	BUG_ON(address >= end);
32 
33 	pfn = phys_addr >> PAGE_SHIFT;
34 	do {
35 		BUG_ON(!pte_none(*pte));
36 
37 		set_pte(pte, pfn_pte(pfn, pgprot));
38 
39 		address += PAGE_SIZE;
40 		pfn++;
41 		pte++;
42 	} while (address && (address < end));
43 }
44 
45 static inline int
46 remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
47 	       unsigned long phys_addr, unsigned long flags)
48 {
49 	unsigned long end;
50 
51 	address &= ~PGDIR_MASK;
52 
53 	end = address + size;
54 	if (end > PGDIR_SIZE)
55 		end = PGDIR_SIZE;
56 
57 	BUG_ON(address >= end);
58 
59 	phys_addr -= address;
60 	do {
61 		pte_t *pte = pte_alloc_kernel(pmd, address);
62 		if (!pte)
63 			return -ENOMEM;
64 
65 		remap_area_pte(pte, address, end - address,
66 			       address + phys_addr, flags);
67 
68 		address = (address + PMD_SIZE) & PMD_MASK;
69 		pmd++;
70 	} while (address && (address < end));
71 
72 	return 0;
73 }
74 
75 static int
76 remap_area_pages(unsigned long address, unsigned long phys_addr,
77 		 unsigned long size, unsigned long flags)
78 {
79 	pgd_t *dir;
80 	int error = 0;
81 	unsigned long end = address + size;
82 
83 	BUG_ON(address >= end);
84 
85 	phys_addr -= address;
86 	dir = pgd_offset_k(address);
87 
88 	flush_cache_all();
89 
90 	do {
91 		pud_t *pud;
92 		pmd_t *pmd;
93 
94 		error = -ENOMEM;
95 		pud = pud_alloc(&init_mm, dir, address);
96 		if (!pud)
97 			break;
98 
99 		pmd = pmd_alloc(&init_mm, pud, address);
100 		if (!pmd)
101 			break;
102 
103 		if (remap_area_pmd(pmd, address, end - address,
104 				   phys_addr + address, flags))
105 			break;
106 
107 		error = 0;
108 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
109 		dir++;
110 	} while (address && (address < end));
111 
112 	flush_tlb_all();
113 
114 	return error;
115 }
116 
117 /*
118  * Generic mapping function (not visible outside):
119  */
120 
121 /*
122  * Remap an arbitrary physical address space into the kernel virtual
123  * address space.
124  *
125  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
126  * have to convert them into an offset in a page-aligned mapping, but the
127  * caller shouldn't need to know that small detail.
128  */
129 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
130 {
131 	void *addr;
132 	struct vm_struct *area;
133 	unsigned long offset, last_addr;
134 
135 #ifdef CONFIG_EISA
136 	unsigned long end = phys_addr + size - 1;
137 	/* Support EISA addresses */
138 	if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
139 	    (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
140 		phys_addr |= F_EXTEND(0xfc000000);
141 		flags |= _PAGE_NO_CACHE;
142 	}
143 #endif
144 
145 	/* Don't allow wraparound or zero size */
146 	last_addr = phys_addr + size - 1;
147 	if (!size || last_addr < phys_addr)
148 		return NULL;
149 
150 	/*
151 	 * Don't allow anybody to remap normal RAM that we're using..
152 	 */
153 	if (phys_addr < virt_to_phys(high_memory)) {
154 		char *t_addr, *t_end;
155 		struct page *page;
156 
157 		t_addr = __va(phys_addr);
158 		t_end = t_addr + (size - 1);
159 
160 		for (page = virt_to_page(t_addr);
161 		     page <= virt_to_page(t_end); page++) {
162 			if(!PageReserved(page))
163 				return NULL;
164 		}
165 	}
166 
167 	/*
168 	 * Mappings have to be page-aligned
169 	 */
170 	offset = phys_addr & ~PAGE_MASK;
171 	phys_addr &= PAGE_MASK;
172 	size = PAGE_ALIGN(last_addr) - phys_addr;
173 
174 	/*
175 	 * Ok, go for it..
176 	 */
177 	area = get_vm_area(size, VM_IOREMAP);
178 	if (!area)
179 		return NULL;
180 
181 	addr = area->addr;
182 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
183 		vfree(addr);
184 		return NULL;
185 	}
186 
187 	return (void __iomem *) (offset + (char *)addr);
188 }
189 EXPORT_SYMBOL(__ioremap);
190 
191 void iounmap(void __iomem *addr)
192 {
193 	if (addr > high_memory)
194 		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
195 }
196 EXPORT_SYMBOL(iounmap);
197