xref: /linux/arch/sh/kernel/cpu/sh4/sq.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * arch/sh/kernel/cpu/sq.c
3  *
4  * General management API for SH-4 integrated Store Queues
5  *
6  * Copyright (C) 2001, 2002, 2003, 2004  Paul Mundt
7  * Copyright (C) 2001, 2002  M. R. Brown
8  *
9  * Some of this code has been adopted directly from the old arch/sh/mm/sq.c
10  * hack that was part of the LinuxDC project. For all intents and purposes,
11  * this is a completely new interface that really doesn't have much in common
12  * with the old zone-based approach at all. In fact, it's only listed here for
13  * general completeness.
14  *
15  * This file is subject to the terms and conditions of the GNU General Public
16  * License.  See the file "COPYING" in the main directory of this archive
17  * for more details.
18  */
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/list.h>
24 #include <linux/proc_fs.h>
25 #include <linux/miscdevice.h>
26 #include <linux/vmalloc.h>
27 
28 #include <asm/io.h>
29 #include <asm/page.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpu/sq.h>
32 
33 static LIST_HEAD(sq_mapping_list);
34 static DEFINE_SPINLOCK(sq_mapping_lock);
35 
36 /**
37  * sq_flush - Flush (prefetch) the store queue cache
38  * @addr: the store queue address to flush
39  *
40  * Executes a prefetch instruction on the specified store queue cache,
41  * so that the cached data is written to physical memory.
42  */
43 inline void sq_flush(void *addr)
44 {
45 	__asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory");
46 }
47 
48 /**
49  * sq_flush_range - Flush (prefetch) a specific SQ range
50  * @start: the store queue address to start flushing from
51  * @len: the length to flush
52  *
53  * Flushes the store queue cache from @start to @start + @len in a
54  * linear fashion.
55  */
56 void sq_flush_range(unsigned long start, unsigned int len)
57 {
58 	volatile unsigned long *sq = (unsigned long *)start;
59 	unsigned long dummy;
60 
61 	/* Flush the queues */
62 	for (len >>= 5; len--; sq += 8)
63 		sq_flush((void *)sq);
64 
65 	/* Wait for completion */
66 	dummy = ctrl_inl(P4SEG_STORE_QUE);
67 
68 	ctrl_outl(0, P4SEG_STORE_QUE + 0);
69 	ctrl_outl(0, P4SEG_STORE_QUE + 8);
70 }
71 
72 static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name)
73 {
74 	struct sq_mapping *map;
75 
76 	if (virt + size > SQ_ADDRMAX)
77 		return ERR_PTR(-ENOSPC);
78 
79 	map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL);
80 	if (!map)
81 		return ERR_PTR(-ENOMEM);
82 
83 	INIT_LIST_HEAD(&map->list);
84 
85 	map->sq_addr	= virt;
86 	map->addr	= phys;
87 	map->size	= size + 1;
88 	map->name	= name;
89 
90 	list_add(&map->list, &sq_mapping_list);
91 
92 	return map;
93 }
94 
95 static unsigned long __sq_get_next_addr(void)
96 {
97 	if (!list_empty(&sq_mapping_list)) {
98 		struct list_head *pos, *tmp;
99 
100 		/*
101 		 * Read one off the list head, as it will have the highest
102 		 * mapped allocation. Set the next one up right above it.
103 		 *
104 		 * This is somewhat sub-optimal, as we don't look at
105 		 * gaps between allocations or anything lower then the
106 		 * highest-level allocation.
107 		 *
108 		 * However, in the interest of performance and the general
109 		 * lack of desire to do constant list rebalancing, we don't
110 		 * worry about it.
111 		 */
112 		list_for_each_safe(pos, tmp, &sq_mapping_list) {
113 			struct sq_mapping *entry;
114 
115 			entry = list_entry(pos, typeof(*entry), list);
116 
117 			return entry->sq_addr + entry->size;
118 		}
119 	}
120 
121 	return P4SEG_STORE_QUE;
122 }
123 
124 /**
125  * __sq_remap - Perform a translation from the SQ to a phys addr
126  * @map: sq mapping containing phys and store queue addresses.
127  *
128  * Maps the store queue address specified in the mapping to the physical
129  * address specified in the mapping.
130  */
131 static struct sq_mapping *__sq_remap(struct sq_mapping *map)
132 {
133 	unsigned long flags, pteh, ptel;
134 	struct vm_struct *vma;
135 	pgprot_t pgprot;
136 
137 	/*
138 	 * Without an MMU (or with it turned off), this is much more
139 	 * straightforward, as we can just load up each queue's QACR with
140 	 * the physical address appropriately masked.
141 	 */
142 
143 	ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
144 	ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
145 
146 #ifdef CONFIG_MMU
147 	/*
148 	 * With an MMU on the other hand, things are slightly more involved.
149 	 * Namely, we have to have a direct mapping between the SQ addr and
150 	 * the associated physical address in the UTLB by way of setting up
151 	 * a virt<->phys translation by hand. We do this by simply specifying
152 	 * the SQ addr in UTLB.VPN and the associated physical address in
153 	 * UTLB.PPN.
154 	 *
155 	 * Notably, even though this is a special case translation, and some
156 	 * of the configuration bits are meaningless, we're still required
157 	 * to have a valid ASID context in PTEH.
158 	 *
159 	 * We could also probably get by without explicitly setting PTEA, but
160 	 * we do it here just for good measure.
161 	 */
162 	spin_lock_irqsave(&sq_mapping_lock, flags);
163 
164 	pteh = map->sq_addr;
165 	ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH);
166 
167 	ptel = map->addr & PAGE_MASK;
168 	ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA);
169 
170 	pgprot = pgprot_noncached(PAGE_KERNEL);
171 
172 	ptel &= _PAGE_FLAGS_HARDWARE_MASK;
173 	ptel |= pgprot_val(pgprot);
174 	ctrl_outl(ptel, MMU_PTEL);
175 
176 	__asm__ __volatile__ ("ldtlb" : : : "memory");
177 
178 	spin_unlock_irqrestore(&sq_mapping_lock, flags);
179 
180 	/*
181 	 * Next, we need to map ourselves in the kernel page table, so that
182 	 * future accesses after a TLB flush will be handled when we take a
183 	 * page fault.
184 	 *
185 	 * Theoretically we could just do this directly and not worry about
186 	 * setting up the translation by hand ahead of time, but for the
187 	 * cases where we want a one-shot SQ mapping followed by a quick
188 	 * writeout before we hit the TLB flush, we do it anyways. This way
189 	 * we at least save ourselves the initial page fault overhead.
190 	 */
191 	vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
192 	if (!vma)
193 		return ERR_PTR(-ENOMEM);
194 
195 	vma->phys_addr = map->addr;
196 
197 	if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
198 			     map->size, pgprot_val(pgprot))) {
199 		vunmap(vma->addr);
200 		return NULL;
201 	}
202 #endif /* CONFIG_MMU */
203 
204 	return map;
205 }
206 
207 /**
208  * sq_remap - Map a physical address through the Store Queues
209  * @phys: Physical address of mapping.
210  * @size: Length of mapping.
211  * @name: User invoking mapping.
212  *
213  * Remaps the physical address @phys through the next available store queue
214  * address of @size length. @name is logged at boot time as well as through
215  * the procfs interface.
216  *
217  * A pre-allocated and filled sq_mapping pointer is returned, and must be
218  * cleaned up with a call to sq_unmap() when the user is done with the
219  * mapping.
220  */
221 struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name)
222 {
223 	struct sq_mapping *map;
224 	unsigned long virt, end;
225 	unsigned int psz;
226 
227 	/* Don't allow wraparound or zero size */
228 	end = phys + size - 1;
229 	if (!size || end < phys)
230 		return NULL;
231 	/* Don't allow anyone to remap normal memory.. */
232 	if (phys < virt_to_phys(high_memory))
233 		return NULL;
234 
235 	phys &= PAGE_MASK;
236 
237 	size  = PAGE_ALIGN(end + 1) - phys;
238 	virt  = __sq_get_next_addr();
239 	psz   = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
240 	map   = __sq_alloc_mapping(virt, phys, size, name);
241 
242 	printk("sqremap: %15s  [%4d page%s]  va 0x%08lx   pa 0x%08lx\n",
243 	       map->name ? map->name : "???",
244 	       psz, psz == 1 ? " " : "s",
245 	       map->sq_addr, map->addr);
246 
247 	return __sq_remap(map);
248 }
249 
250 /**
251  * sq_unmap - Unmap a Store Queue allocation
252  * @map: Pre-allocated Store Queue mapping.
253  *
254  * Unmaps the store queue allocation @map that was previously created by
255  * sq_remap(). Also frees up the pte that was previously inserted into
256  * the kernel page table and discards the UTLB translation.
257  */
258 void sq_unmap(struct sq_mapping *map)
259 {
260 	if (map->sq_addr > (unsigned long)high_memory)
261 		vfree((void *)(map->sq_addr & PAGE_MASK));
262 
263 	list_del(&map->list);
264 	kfree(map);
265 }
266 
267 /**
268  * sq_clear - Clear a store queue range
269  * @addr: Address to start clearing from.
270  * @len: Length to clear.
271  *
272  * A quick zero-fill implementation for clearing out memory that has been
273  * remapped through the store queues.
274  */
275 void sq_clear(unsigned long addr, unsigned int len)
276 {
277 	int i;
278 
279 	/* Clear out both queues linearly */
280 	for (i = 0; i < 8; i++) {
281 		ctrl_outl(0, addr + i + 0);
282 		ctrl_outl(0, addr + i + 8);
283 	}
284 
285 	sq_flush_range(addr, len);
286 }
287 
288 /**
289  * sq_vma_unmap - Unmap a VMA range
290  * @area: VMA containing range.
291  * @addr: Start of range.
292  * @len: Length of range.
293  *
294  * Searches the sq_mapping_list for a mapping matching the sq addr @addr,
295  * and subsequently frees up the entry. Further cleanup is done by generic
296  * code.
297  */
298 static void sq_vma_unmap(struct vm_area_struct *area,
299 			 unsigned long addr, size_t len)
300 {
301 	struct list_head *pos, *tmp;
302 
303 	list_for_each_safe(pos, tmp, &sq_mapping_list) {
304 		struct sq_mapping *entry;
305 
306 		entry = list_entry(pos, typeof(*entry), list);
307 
308 		if (entry->sq_addr == addr) {
309 			/*
310 			 * We could probably get away without doing the tlb flush
311 			 * here, as generic code should take care of most of this
312 			 * when unmapping the rest of the VMA range for us. Leave
313 			 * it in for added sanity for the time being..
314 			 */
315 			__flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
316 
317 			list_del(&entry->list);
318 			kfree(entry);
319 
320 			return;
321 		}
322 	}
323 }
324 
325 /**
326  * sq_vma_sync - Sync a VMA range
327  * @area: VMA containing range.
328  * @start: Start of range.
329  * @len: Length of range.
330  * @flags: Additional flags.
331  *
332  * Synchronizes an sq mapped range by flushing the store queue cache for
333  * the duration of the mapping.
334  *
335  * Used internally for user mappings, which must use msync() to prefetch
336  * the store queue cache.
337  */
338 static int sq_vma_sync(struct vm_area_struct *area,
339 		       unsigned long start, size_t len, unsigned int flags)
340 {
341 	sq_flush_range(start, len);
342 
343 	return 0;
344 }
345 
346 static struct vm_operations_struct sq_vma_ops = {
347 	.unmap	= sq_vma_unmap,
348 	.sync	= sq_vma_sync,
349 };
350 
351 /**
352  * sq_mmap - mmap() for /dev/cpu/sq
353  * @file: unused.
354  * @vma: VMA to remap.
355  *
356  * Remap the specified vma @vma through the store queues, and setup associated
357  * information for the new mapping. Also build up the page tables for the new
358  * area.
359  */
360 static int sq_mmap(struct file *file, struct vm_area_struct *vma)
361 {
362 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
363 	unsigned long size = vma->vm_end - vma->vm_start;
364 	struct sq_mapping *map;
365 
366 	/*
367 	 * We're not interested in any arbitrary virtual address that has
368 	 * been stuck in the VMA, as we already know what addresses we
369 	 * want. Save off the size, and reposition the VMA to begin at
370 	 * the next available sq address.
371 	 */
372 	vma->vm_start = __sq_get_next_addr();
373 	vma->vm_end   = vma->vm_start + size;
374 
375 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
376 
377 	vma->vm_flags |= VM_IO | VM_RESERVED;
378 
379 	map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace");
380 
381 	if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT,
382 				size, vma->vm_page_prot))
383 		return -EAGAIN;
384 
385 	vma->vm_ops = &sq_vma_ops;
386 
387 	return 0;
388 }
389 
390 #ifdef CONFIG_PROC_FS
391 static int sq_mapping_read_proc(char *buf, char **start, off_t off,
392 				int len, int *eof, void *data)
393 {
394 	struct list_head *pos;
395 	char *p = buf;
396 
397 	list_for_each_prev(pos, &sq_mapping_list) {
398 		struct sq_mapping *entry;
399 
400 		entry = list_entry(pos, typeof(*entry), list);
401 
402 		p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,
403 			     entry->sq_addr + entry->size - 1, entry->addr,
404 			     entry->name);
405 	}
406 
407 	return p - buf;
408 }
409 #endif
410 
411 static struct file_operations sq_fops = {
412 	.owner		= THIS_MODULE,
413 	.mmap		= sq_mmap,
414 };
415 
416 static struct miscdevice sq_dev = {
417 	.minor		= STORE_QUEUE_MINOR,
418 	.name		= "sq",
419 	.fops		= &sq_fops,
420 };
421 
422 static int __init sq_api_init(void)
423 {
424 	printk(KERN_NOTICE "sq: Registering store queue API.\n");
425 
426 #ifdef CONFIG_PROC_FS
427 	create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
428 #endif
429 
430 	return misc_register(&sq_dev);
431 }
432 
433 static void __exit sq_api_exit(void)
434 {
435 	misc_deregister(&sq_dev);
436 }
437 
438 module_init(sq_api_init);
439 module_exit(sq_api_exit);
440 
441 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
442 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
443 MODULE_LICENSE("GPL");
444 MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR);
445 
446 EXPORT_SYMBOL(sq_remap);
447 EXPORT_SYMBOL(sq_unmap);
448 EXPORT_SYMBOL(sq_clear);
449 EXPORT_SYMBOL(sq_flush);
450 EXPORT_SYMBOL(sq_flush_range);
451 
452