xref: /freebsd/sys/powerpc/include/cpufunc.h (revision ce6a89e27cd190313be39bb479880aeda4778436)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef _MACHINE_CPUFUNC_H_
32 #define	_MACHINE_CPUFUNC_H_
33 
34 #ifdef _KERNEL
35 
36 #include <sys/types.h>
37 
38 #include <machine/psl.h>
39 #include <machine/spr.h>
40 
41 struct thread;
42 
43 #ifdef KDB
44 void breakpoint(void);
45 #else
46 static __inline void
47 breakpoint(void)
48 {
49 
50 	return;
51 }
52 #endif
53 
54 /* CPU register mangling inlines */
55 
56 static __inline void
57 mtmsr(register_t value)
58 {
59 
60 	__asm __volatile ("mtmsr %0; isync" :: "r"(value));
61 }
62 
63 #ifdef __powerpc64__
64 static __inline void
65 mtmsrd(register_t value)
66 {
67 
68 	__asm __volatile ("mtmsrd %0; isync" :: "r"(value));
69 }
70 #endif
71 
72 static __inline register_t
73 mfmsr(void)
74 {
75 	register_t value;
76 
77 	__asm __volatile ("mfmsr %0" : "=r"(value));
78 
79 	return (value);
80 }
81 
82 #ifndef __powerpc64__
83 static __inline void
84 mtsrin(vm_offset_t va, register_t value)
85 {
86 
87 	__asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va));
88 }
89 
90 static __inline register_t
91 mfsrin(vm_offset_t va)
92 {
93 	register_t value;
94 
95 	__asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
96 
97 	return (value);
98 }
99 #endif
100 
101 static __inline register_t
102 mfctrl(void)
103 {
104 	register_t value;
105 
106 	__asm __volatile ("mfspr %0,136" : "=r"(value));
107 
108 	return (value);
109 }
110 
111 
112 static __inline void
113 mtdec(register_t value)
114 {
115 
116 	__asm __volatile ("mtdec %0" :: "r"(value));
117 }
118 
119 static __inline register_t
120 mfdec(void)
121 {
122 	register_t value;
123 
124 	__asm __volatile ("mfdec %0" : "=r"(value));
125 
126 	return (value);
127 }
128 
129 static __inline register_t
130 mfpvr(void)
131 {
132 	register_t value;
133 
134 	__asm __volatile ("mfpvr %0" : "=r"(value));
135 
136 	return (value);
137 }
138 
139 static __inline u_quad_t
140 mftb(void)
141 {
142 	u_quad_t tb;
143       #ifdef __powerpc64__
144 	__asm __volatile ("mftb %0" : "=r"(tb));
145       #else
146 	uint32_t *tbup = (uint32_t *)&tb;
147 	uint32_t *tblp = tbup + 1;
148 
149 	do {
150 		*tbup = mfspr(TBR_TBU);
151 		*tblp = mfspr(TBR_TBL);
152 	} while (*tbup != mfspr(TBR_TBU));
153       #endif
154 
155 	return (tb);
156 }
157 
158 static __inline void
159 mttb(u_quad_t time)
160 {
161 
162 	mtspr(TBR_TBWL, 0);
163 	mtspr(TBR_TBWU, (uint32_t)(time >> 32));
164 	mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
165 }
166 
167 static __inline void
168 eieio(void)
169 {
170 
171 	__asm __volatile ("eieio" : : : "memory");
172 }
173 
174 static __inline void
175 isync(void)
176 {
177 
178 	__asm __volatile ("isync" : : : "memory");
179 }
180 
181 static __inline void
182 powerpc_sync(void)
183 {
184 
185 	__asm __volatile ("sync" : : : "memory");
186 }
187 
188 static __inline register_t
189 intr_disable(void)
190 {
191 	register_t msr;
192 
193 	msr = mfmsr();
194 	mtmsr(msr & ~PSL_EE);
195 	return (msr);
196 }
197 
198 static __inline void
199 intr_restore(register_t msr)
200 {
201 
202 	mtmsr(msr);
203 }
204 
205 static __inline struct pcpu *
206 get_pcpu(void)
207 {
208 	struct pcpu *ret;
209 
210 	__asm __volatile("mfsprg %0, 0" : "=r"(ret));
211 
212 	return (ret);
213 }
214 
215 #define	HAVE_INLINE_FLS
216 static __inline __pure2 int
217 fls(int mask)
218 {
219 	return (mask ? 32 - __builtin_clz(mask) : 0);
220 }
221 
222 #define HAVE_INLINE_FLSL
223 static __inline __pure2 int
224 flsl(long mask)
225 {
226 	return (mask ? (8 * sizeof(long) - __builtin_clzl(mask)) : 0);
227 }
228 
229 /* "NOP" operations to signify priorities to the kernel. */
230 static __inline void
231 nop_prio_vlow(void)
232 {
233 	__asm __volatile("or 31,31,31");
234 }
235 
236 static __inline void
237 nop_prio_low(void)
238 {
239 	__asm __volatile("or 1,1,1");
240 }
241 
242 static __inline void
243 nop_prio_mlow(void)
244 {
245 	__asm __volatile("or 6,6,6");
246 }
247 
248 static __inline void
249 nop_prio_medium(void)
250 {
251 	__asm __volatile("or 2,2,2");
252 }
253 
254 static __inline void
255 nop_prio_mhigh(void)
256 {
257 	__asm __volatile("or 5,5,5");
258 }
259 
260 static __inline void
261 nop_prio_high(void)
262 {
263 	__asm __volatile("or 3,3,3");
264 }
265 
266 #endif /* _KERNEL */
267 
268 #endif /* !_MACHINE_CPUFUNC_H_ */
269