1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef _MACHINE_CPUFUNC_H_
30 #define _MACHINE_CPUFUNC_H_
31
32 #ifdef _KERNEL
33
34 #include <sys/types.h>
35
36 #include <machine/psl.h>
37 #include <machine/spr.h>
38
39 struct thread;
40
41 #ifdef KDB
42 void breakpoint(void);
43 #else
44 static __inline void
breakpoint(void)45 breakpoint(void)
46 {
47
48 return;
49 }
50 #endif
51
52 /* CPU register mangling inlines */
53
54 static __inline void
mtmsr(register_t value)55 mtmsr(register_t value)
56 {
57
58 __asm __volatile ("mtmsr %0; isync" :: "r"(value));
59 }
60
61 #ifdef __powerpc64__
62 static __inline void
mtmsrd(register_t value)63 mtmsrd(register_t value)
64 {
65
66 __asm __volatile ("mtmsrd %0; isync" :: "r"(value));
67 }
68 #endif
69
70 static __inline register_t
mfmsr(void)71 mfmsr(void)
72 {
73 register_t value;
74
75 __asm __volatile ("mfmsr %0" : "=r"(value));
76
77 return (value);
78 }
79
80 #ifndef __powerpc64__
81 static __inline void
mtsrin(vm_offset_t va,register_t value)82 mtsrin(vm_offset_t va, register_t value)
83 {
84
85 __asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va));
86 }
87
88 static __inline register_t
mfsrin(vm_offset_t va)89 mfsrin(vm_offset_t va)
90 {
91 register_t value;
92
93 __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
94
95 return (value);
96 }
97 #endif
98
99 static __inline register_t
mfctrl(void)100 mfctrl(void)
101 {
102 register_t value;
103
104 __asm __volatile ("mfspr %0,136" : "=r"(value));
105
106 return (value);
107 }
108
109 static __inline void
mtdec(register_t value)110 mtdec(register_t value)
111 {
112
113 __asm __volatile ("mtdec %0" :: "r"(value));
114 }
115
116 static __inline register_t
mfdec(void)117 mfdec(void)
118 {
119 register_t value;
120
121 __asm __volatile ("mfdec %0" : "=r"(value));
122
123 return (value);
124 }
125
126 static __inline uint32_t
mfpvr(void)127 mfpvr(void)
128 {
129 uint32_t value;
130
131 __asm __volatile ("mfpvr %0" : "=r"(value));
132
133 return (value);
134 }
135
136 static __inline u_quad_t
mftb(void)137 mftb(void)
138 {
139 u_quad_t tb;
140 #ifdef __powerpc64__
141 __asm __volatile ("mftb %0" : "=r"(tb));
142 #else
143 uint32_t *tbup = (uint32_t *)&tb;
144 uint32_t *tblp = tbup + 1;
145
146 do {
147 *tbup = mfspr(TBR_TBU);
148 *tblp = mfspr(TBR_TBL);
149 } while (*tbup != mfspr(TBR_TBU));
150 #endif
151
152 return (tb);
153 }
154
155 static __inline void
mttb(u_quad_t time)156 mttb(u_quad_t time)
157 {
158
159 mtspr(TBR_TBWL, 0);
160 mtspr(TBR_TBWU, (uint32_t)(time >> 32));
161 mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
162 }
163
164 static __inline register_t
mffs(void)165 mffs(void)
166 {
167 uint64_t value;
168
169 __asm __volatile ("mffs 0; stfd 0,0(%0)"
170 :: "b"(&value));
171
172 return ((register_t)value);
173 }
174
175 static __inline void
mtfsf(uint64_t value)176 mtfsf(uint64_t value)
177 {
178
179 __asm __volatile ("lfd 0,0(%0); mtfsf 0xff,0"
180 :: "b"(&value));
181 }
182
183 static __inline void
eieio(void)184 eieio(void)
185 {
186
187 __asm __volatile ("eieio" : : : "memory");
188 }
189
190 static __inline void
isync(void)191 isync(void)
192 {
193
194 __asm __volatile ("isync" : : : "memory");
195 }
196
197 static __inline void
powerpc_sync(void)198 powerpc_sync(void)
199 {
200
201 __asm __volatile ("sync" : : : "memory");
202 }
203
204 static __inline int
cntlzd(uint64_t word)205 cntlzd(uint64_t word)
206 {
207 uint64_t result;
208 /* cntlzd %0, %1 */
209 __asm __volatile(".long 0x7c000074 | (%1 << 21) | (%0 << 16)" :
210 "=r"(result) : "r"(word));
211
212 return (int)result;
213 }
214
215 static __inline int
cnttzd(uint64_t word)216 cnttzd(uint64_t word)
217 {
218 uint64_t result;
219 /* cnttzd %0, %1 */
220 __asm __volatile(".long 0x7c000474 | (%1 << 21) | (%0 << 16)" :
221 "=r"(result) : "r"(word));
222
223 return (int)result;
224 }
225
226 static __inline void
ptesync(void)227 ptesync(void)
228 {
229 __asm __volatile("ptesync");
230 }
231
232 static __inline register_t
intr_disable(void)233 intr_disable(void)
234 {
235 register_t msr;
236
237 msr = mfmsr();
238 mtmsr(msr & ~PSL_EE);
239 return (msr);
240 }
241
242 static __inline void
intr_restore(register_t msr)243 intr_restore(register_t msr)
244 {
245
246 mtmsr(msr);
247 }
248
249 static __inline struct pcpu *
get_pcpu(void)250 get_pcpu(void)
251 {
252 struct pcpu *ret;
253
254 __asm __volatile("mfsprg %0, 0" : "=r"(ret));
255
256 return (ret);
257 }
258
259 /* "NOP" operations to signify priorities to the kernel. */
260 static __inline void
nop_prio_vlow(void)261 nop_prio_vlow(void)
262 {
263 __asm __volatile("or 31,31,31");
264 }
265
266 static __inline void
nop_prio_low(void)267 nop_prio_low(void)
268 {
269 __asm __volatile("or 1,1,1");
270 }
271
272 static __inline void
nop_prio_mlow(void)273 nop_prio_mlow(void)
274 {
275 __asm __volatile("or 6,6,6");
276 }
277
278 static __inline void
nop_prio_medium(void)279 nop_prio_medium(void)
280 {
281 __asm __volatile("or 2,2,2");
282 }
283
284 static __inline void
nop_prio_mhigh(void)285 nop_prio_mhigh(void)
286 {
287 __asm __volatile("or 5,5,5");
288 }
289
290 static __inline void
nop_prio_high(void)291 nop_prio_high(void)
292 {
293 __asm __volatile("or 3,3,3");
294 }
295
296 #endif /* _KERNEL */
297
298 #endif /* !_MACHINE_CPUFUNC_H_ */
299