xref: /linux/arch/sh/mm/pmb.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * arch/sh/mm/pmb.c
3  *
4  * Privileged Space Mapping Buffer (PMB) Support.
5  *
6  * Copyright (C) 2005, 2006, 2007 Paul Mundt
7  *
8  * P1/P2 Section mapping definitions from map32.h, which was:
9  *
10  *	Copyright 2003 (c) Lineo Solutions,Inc.
11  *
12  * This file is subject to the terms and conditions of the GNU General Public
13  * License.  See the file "COPYING" in the main directory of this archive
14  * for more details.
15  */
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/bitops.h>
21 #include <linux/debugfs.h>
22 #include <linux/fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/err.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/mmu.h>
29 #include <asm/io.h>
30 #include <asm/mmu_context.h>
31 
32 #define NR_PMB_ENTRIES	16
33 
34 static struct kmem_cache *pmb_cache;
35 static unsigned long pmb_map;
36 
37 static struct pmb_entry pmb_init_map[] = {
38 	/* vpn         ppn         flags (ub/sz/c/wt) */
39 
40 	/* P1 Section Mappings */
41 	{ 0x80000000, 0x00000000, PMB_SZ_64M  | PMB_C, },
42 	{ 0x84000000, 0x04000000, PMB_SZ_64M  | PMB_C, },
43 	{ 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
44 	{ 0x90000000, 0x10000000, PMB_SZ_64M  | PMB_C, },
45 	{ 0x94000000, 0x14000000, PMB_SZ_64M  | PMB_C, },
46 	{ 0x98000000, 0x18000000, PMB_SZ_64M  | PMB_C, },
47 
48 	/* P2 Section Mappings */
49 	{ 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
50 	{ 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
51 	{ 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
52 	{ 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
53 	{ 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
54 	{ 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
55 };
56 
57 static inline unsigned long mk_pmb_entry(unsigned int entry)
58 {
59 	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
60 }
61 
62 static inline unsigned long mk_pmb_addr(unsigned int entry)
63 {
64 	return mk_pmb_entry(entry) | PMB_ADDR;
65 }
66 
67 static inline unsigned long mk_pmb_data(unsigned int entry)
68 {
69 	return mk_pmb_entry(entry) | PMB_DATA;
70 }
71 
72 static DEFINE_SPINLOCK(pmb_list_lock);
73 static struct pmb_entry *pmb_list;
74 
75 static inline void pmb_list_add(struct pmb_entry *pmbe)
76 {
77 	struct pmb_entry **p, *tmp;
78 
79 	p = &pmb_list;
80 	while ((tmp = *p) != NULL)
81 		p = &tmp->next;
82 
83 	pmbe->next = tmp;
84 	*p = pmbe;
85 }
86 
87 static inline void pmb_list_del(struct pmb_entry *pmbe)
88 {
89 	struct pmb_entry **p, *tmp;
90 
91 	for (p = &pmb_list; (tmp = *p); p = &tmp->next)
92 		if (tmp == pmbe) {
93 			*p = tmp->next;
94 			return;
95 		}
96 }
97 
98 struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
99 			    unsigned long flags)
100 {
101 	struct pmb_entry *pmbe;
102 
103 	pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL);
104 	if (!pmbe)
105 		return ERR_PTR(-ENOMEM);
106 
107 	pmbe->vpn	= vpn;
108 	pmbe->ppn	= ppn;
109 	pmbe->flags	= flags;
110 
111 	spin_lock_irq(&pmb_list_lock);
112 	pmb_list_add(pmbe);
113 	spin_unlock_irq(&pmb_list_lock);
114 
115 	return pmbe;
116 }
117 
118 void pmb_free(struct pmb_entry *pmbe)
119 {
120 	spin_lock_irq(&pmb_list_lock);
121 	pmb_list_del(pmbe);
122 	spin_unlock_irq(&pmb_list_lock);
123 
124 	kmem_cache_free(pmb_cache, pmbe);
125 }
126 
127 /*
128  * Must be in P2 for __set_pmb_entry()
129  */
130 int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
131 		    unsigned long flags, int *entry)
132 {
133 	unsigned int pos = *entry;
134 
135 	if (unlikely(pos == PMB_NO_ENTRY))
136 		pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
137 
138 repeat:
139 	if (unlikely(pos > NR_PMB_ENTRIES))
140 		return -ENOSPC;
141 
142 	if (test_and_set_bit(pos, &pmb_map)) {
143 		pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
144 		goto repeat;
145 	}
146 
147 	ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
148 
149 #ifdef CONFIG_CACHE_WRITETHROUGH
150 	/*
151 	 * When we are in 32-bit address extended mode, CCR.CB becomes
152 	 * invalid, so care must be taken to manually adjust cacheable
153 	 * translations.
154 	 */
155 	if (likely(flags & PMB_C))
156 		flags |= PMB_WT;
157 #endif
158 
159 	ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
160 
161 	*entry = pos;
162 
163 	return 0;
164 }
165 
166 int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
167 {
168 	int ret;
169 
170 	jump_to_uncached();
171 	ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
172 	back_to_cached();
173 
174 	return ret;
175 }
176 
177 void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
178 {
179 	unsigned int entry = pmbe->entry;
180 	unsigned long addr;
181 
182 	/*
183 	 * Don't allow clearing of wired init entries, P1 or P2 access
184 	 * without a corresponding mapping in the PMB will lead to reset
185 	 * by the TLB.
186 	 */
187 	if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
188 		     entry >= NR_PMB_ENTRIES))
189 		return;
190 
191 	jump_to_uncached();
192 
193 	/* Clear V-bit */
194 	addr = mk_pmb_addr(entry);
195 	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
196 
197 	addr = mk_pmb_data(entry);
198 	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
199 
200 	back_to_cached();
201 
202 	clear_bit(entry, &pmb_map);
203 }
204 
205 
206 static struct {
207 	unsigned long size;
208 	int flag;
209 } pmb_sizes[] = {
210 	{ .size	= 0x20000000, .flag = PMB_SZ_512M, },
211 	{ .size = 0x08000000, .flag = PMB_SZ_128M, },
212 	{ .size = 0x04000000, .flag = PMB_SZ_64M,  },
213 	{ .size = 0x01000000, .flag = PMB_SZ_16M,  },
214 };
215 
216 long pmb_remap(unsigned long vaddr, unsigned long phys,
217 	       unsigned long size, unsigned long flags)
218 {
219 	struct pmb_entry *pmbp;
220 	unsigned long wanted;
221 	int pmb_flags, i;
222 
223 	/* Convert typical pgprot value to the PMB equivalent */
224 	if (flags & _PAGE_CACHABLE) {
225 		if (flags & _PAGE_WT)
226 			pmb_flags = PMB_WT;
227 		else
228 			pmb_flags = PMB_C;
229 	} else
230 		pmb_flags = PMB_WT | PMB_UB;
231 
232 	pmbp = NULL;
233 	wanted = size;
234 
235 again:
236 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
237 		struct pmb_entry *pmbe;
238 		int ret;
239 
240 		if (size < pmb_sizes[i].size)
241 			continue;
242 
243 		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
244 		if (IS_ERR(pmbe))
245 			return PTR_ERR(pmbe);
246 
247 		ret = set_pmb_entry(pmbe);
248 		if (ret != 0) {
249 			pmb_free(pmbe);
250 			return -EBUSY;
251 		}
252 
253 		phys	+= pmb_sizes[i].size;
254 		vaddr	+= pmb_sizes[i].size;
255 		size	-= pmb_sizes[i].size;
256 
257 		/*
258 		 * Link adjacent entries that span multiple PMB entries
259 		 * for easier tear-down.
260 		 */
261 		if (likely(pmbp))
262 			pmbp->link = pmbe;
263 
264 		pmbp = pmbe;
265 	}
266 
267 	if (size >= 0x1000000)
268 		goto again;
269 
270 	return wanted - size;
271 }
272 
273 void pmb_unmap(unsigned long addr)
274 {
275 	struct pmb_entry **p, *pmbe;
276 
277 	for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
278 		if (pmbe->vpn == addr)
279 			break;
280 
281 	if (unlikely(!pmbe))
282 		return;
283 
284 	WARN_ON(!test_bit(pmbe->entry, &pmb_map));
285 
286 	do {
287 		struct pmb_entry *pmblink = pmbe;
288 
289 		clear_pmb_entry(pmbe);
290 		pmbe = pmblink->link;
291 
292 		pmb_free(pmblink);
293 	} while (pmbe);
294 }
295 
296 static void pmb_cache_ctor(void *pmb)
297 {
298 	struct pmb_entry *pmbe = pmb;
299 
300 	memset(pmb, 0, sizeof(struct pmb_entry));
301 
302 	pmbe->entry = PMB_NO_ENTRY;
303 }
304 
305 static int __uses_jump_to_uncached pmb_init(void)
306 {
307 	unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
308 	unsigned int entry, i;
309 
310 	BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
311 
312 	pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
313 				      SLAB_PANIC, pmb_cache_ctor);
314 
315 	jump_to_uncached();
316 
317 	/*
318 	 * Ordering is important, P2 must be mapped in the PMB before we
319 	 * can set PMB.SE, and P1 must be mapped before we jump back to
320 	 * P1 space.
321 	 */
322 	for (entry = 0; entry < nr_entries; entry++) {
323 		struct pmb_entry *pmbe = pmb_init_map + entry;
324 
325 		__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
326 	}
327 
328 	ctrl_outl(0, PMB_IRMCR);
329 
330 	/* PMB.SE and UB[7] */
331 	ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
332 
333 	/* Flush out the TLB */
334 	i =  ctrl_inl(MMUCR);
335 	i |= MMUCR_TI;
336 	ctrl_outl(i, MMUCR);
337 
338 	back_to_cached();
339 
340 	return 0;
341 }
342 arch_initcall(pmb_init);
343 
344 static int pmb_seq_show(struct seq_file *file, void *iter)
345 {
346 	int i;
347 
348 	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
349 			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
350 	seq_printf(file, "ety   vpn  ppn  size   flags\n");
351 
352 	for (i = 0; i < NR_PMB_ENTRIES; i++) {
353 		unsigned long addr, data;
354 		unsigned int size;
355 		char *sz_str = NULL;
356 
357 		addr = ctrl_inl(mk_pmb_addr(i));
358 		data = ctrl_inl(mk_pmb_data(i));
359 
360 		size = data & PMB_SZ_MASK;
361 		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
362 			 (size == PMB_SZ_64M)  ? " 64MB":
363 			 (size == PMB_SZ_128M) ? "128MB":
364 					         "512MB";
365 
366 		/* 02: V 0x88 0x08 128MB C CB  B */
367 		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
368 			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
369 			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
370 			   sz_str, (data & PMB_C) ? 'C' : ' ',
371 			   (data & PMB_WT) ? "WT" : "CB",
372 			   (data & PMB_UB) ? "UB" : " B");
373 	}
374 
375 	return 0;
376 }
377 
378 static int pmb_debugfs_open(struct inode *inode, struct file *file)
379 {
380 	return single_open(file, pmb_seq_show, NULL);
381 }
382 
383 static const struct file_operations pmb_debugfs_fops = {
384 	.owner		= THIS_MODULE,
385 	.open		= pmb_debugfs_open,
386 	.read		= seq_read,
387 	.llseek		= seq_lseek,
388 	.release	= single_release,
389 };
390 
391 static int __init pmb_debugfs_init(void)
392 {
393 	struct dentry *dentry;
394 
395 	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
396 				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
397 	if (!dentry)
398 		return -ENOMEM;
399 	if (IS_ERR(dentry))
400 		return PTR_ERR(dentry);
401 
402 	return 0;
403 }
404 postcore_initcall(pmb_debugfs_init);
405