xref: /linux/arch/m68k/include/asm/page_mm.h (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_PAGE_MM_H
3 #define _M68K_PAGE_MM_H
4 
5 #ifndef __ASSEMBLY__
6 
7 #include <linux/compiler.h>
8 #include <asm/module.h>
9 
10 /*
11  * We don't need to check for alignment etc.
12  */
13 #ifdef CPU_M68040_OR_M68060_ONLY
14 static inline void copy_page(void *to, void *from)
15 {
16 	unsigned long tmp;
17 
18 	__asm__ __volatile__("1:\t"
19 			     ".chip 68040\n\t"
20 			     "move16 %1@+,%0@+\n\t"
21 			     "move16 %1@+,%0@+\n\t"
22 			     ".chip 68k\n\t"
23 			     "dbra  %2,1b\n\t"
24 			     : "=a" (to), "=a" (from), "=d" (tmp)
25 			     : "0" (to), "1" (from), "2" (PAGE_SIZE / 32 - 1));
26 }
27 
28 static inline void clear_page(void *page)
29 {
30 	unsigned long tmp;
31 	unsigned long *sp = page;
32 
33 	*sp++ = 0;
34 	*sp++ = 0;
35 	*sp++ = 0;
36 	*sp++ = 0;
37 
38 	__asm__ __volatile__("1:\t"
39 			     ".chip 68040\n\t"
40 			     "move16 %2@+,%0@+\n\t"
41 			     ".chip 68k\n\t"
42 			     "subqw  #8,%2\n\t"
43 			     "subqw  #8,%2\n\t"
44 			     "dbra   %1,1b\n\t"
45 			     : "=a" (sp), "=d" (tmp)
46 			     : "a" (page), "0" (sp),
47 			       "1" ((PAGE_SIZE - 16) / 16 - 1));
48 }
49 
50 #else
51 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
52 #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
53 #endif
54 
55 #define clear_user_page(addr, vaddr, page)	\
56 	do {	clear_page(addr);		\
57 		flush_dcache_page(page);	\
58 	} while (0)
59 #define copy_user_page(to, from, vaddr, page)	\
60 	do {	copy_page(to, from);		\
61 		flush_dcache_page(page);	\
62 	} while (0)
63 
64 extern unsigned long m68k_memoffset;
65 
66 #ifndef CONFIG_SUN3
67 
68 #define WANT_PAGE_VIRTUAL
69 
70 static inline unsigned long ___pa(void *vaddr)
71 {
72 	unsigned long paddr;
73 	asm (
74 		"1:	addl #0,%0\n"
75 		m68k_fixup(%c2, 1b+2)
76 		: "=r" (paddr)
77 		: "0" (vaddr), "i" (m68k_fixup_memoffset));
78 	return paddr;
79 }
80 #define __pa(vaddr)	___pa((void *)(long)(vaddr))
81 static inline void *__va(unsigned long paddr)
82 {
83 	void *vaddr;
84 	asm (
85 		"1:	subl #0,%0\n"
86 		m68k_fixup(%c2, 1b+2)
87 		: "=r" (vaddr)
88 		: "0" (paddr), "i" (m68k_fixup_memoffset));
89 	return vaddr;
90 }
91 
92 #else	/* !CONFIG_SUN3 */
93 /* This #define is a horrible hack to suppress lots of warnings. --m */
94 #define __pa(x) ___pa((unsigned long)(x))
95 static inline unsigned long ___pa(unsigned long x)
96 {
97 	if (x == 0)
98 		return 0;
99 	if (x >= PAGE_OFFSET)
100 		return (x - PAGE_OFFSET);
101 	else
102 		return (x + 0x2000000);
103 }
104 
105 static inline void *__va(unsigned long x)
106 {
107 	if (x == 0)
108 		return (void *)0;
109 
110 	if (x < 0x2000000)
111 		return (void *)(x + PAGE_OFFSET);
112 	else
113 		return (void *)(x - 0x2000000);
114 }
115 #endif	/* CONFIG_SUN3 */
116 
117 /*
118  * NOTE: virtual isn't really correct, actually it should be the offset into the
119  * memory node, but we have no highmem, so that works for now.
120  * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
121  * of the shifts unnecessary.
122  */
123 static inline unsigned long virt_to_pfn(const void *kaddr)
124 {
125 	return __pa(kaddr) >> PAGE_SHIFT;
126 }
127 
128 static inline void *pfn_to_virt(unsigned long pfn)
129 {
130 	return __va(pfn << PAGE_SHIFT);
131 }
132 
133 extern int m68k_virt_to_node_shift;
134 
135 #define virt_to_page(addr) ({						\
136 	pfn_to_page(virt_to_pfn(addr));					\
137 })
138 #define page_to_virt(page) ({						\
139 	pfn_to_virt(page_to_pfn(page));					\
140 })
141 
142 #define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
143 
144 #define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
145 #define pfn_valid(pfn)		virt_addr_valid(pfn_to_virt(pfn))
146 
147 #endif /* __ASSEMBLY__ */
148 
149 #endif /* _M68K_PAGE_MM_H */
150