xref: /linux/arch/sh/include/asm/uncached.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SH_UNCACHED_H
3 #define __ASM_SH_UNCACHED_H
4 
5 #include <linux/bug.h>
6 
7 #ifdef CONFIG_UNCACHED_MAPPING
8 extern unsigned long cached_to_uncached;
9 extern unsigned long uncached_size;
10 extern unsigned long uncached_start, uncached_end;
11 
12 extern int virt_addr_uncached(unsigned long kaddr);
13 extern void uncached_init(void);
14 extern void uncached_resize(unsigned long size);
15 
16 /*
17  * Jump to uncached area.
18  * When handling TLB or caches, we need to do it from an uncached area.
19  */
20 #define jump_to_uncached()			\
21 do {						\
22 	unsigned long __dummy;			\
23 						\
24 	__asm__ __volatile__(			\
25 		"mova	1f, %0\n\t"		\
26 		"add	%1, %0\n\t"		\
27 		"jmp	@%0\n\t"		\
28 		" nop\n\t"			\
29 		".balign 4\n"			\
30 		"1:"				\
31 		: "=&z" (__dummy)		\
32 		: "r" (cached_to_uncached));	\
33 } while (0)
34 
35 /*
36  * Back to cached area.
37  */
38 #define back_to_cached()				\
39 do {							\
40 	unsigned long __dummy;				\
41 	ctrl_barrier();					\
42 	__asm__ __volatile__(				\
43 		"mov.l	1f, %0\n\t"			\
44 		"jmp	@%0\n\t"			\
45 		" nop\n\t"				\
46 		".balign 4\n"				\
47 		"1:	.long 2f\n"			\
48 		"2:"					\
49 		: "=&r" (__dummy));			\
50 } while (0)
51 #else
52 #define virt_addr_uncached(kaddr)	(0)
53 #define uncached_init()			do { } while (0)
54 #define uncached_resize(size)		BUG()
55 #define jump_to_uncached()		do { } while (0)
56 #define back_to_cached()		do { } while (0)
57 #endif
58 
59 #endif /* __ASM_SH_UNCACHED_H */
60