xref: /freebsd/sys/powerpc/include/cpufunc.h (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef _MACHINE_CPUFUNC_H_
30 #define	_MACHINE_CPUFUNC_H_
31 
32 /*
33  * Required for user-space atomic.h includes
34  */
35 static __inline void
36 powerpc_mb(void)
37 {
38 
39 	__asm __volatile("eieio; sync" : : : "memory");
40 }
41 
42 #ifdef _KERNEL
43 
44 #include <sys/types.h>
45 
46 #include <machine/psl.h>
47 #include <machine/spr.h>
48 
49 struct thread;
50 
51 #ifdef KDB
52 void breakpoint(void);
53 #else
54 static __inline void
55 breakpoint(void)
56 {
57 
58 	return;
59 }
60 #endif
61 
62 /* CPU register mangling inlines */
63 
64 static __inline void
65 mtmsr(register_t value)
66 {
67 
68 	__asm __volatile ("mtmsr %0; isync" :: "r"(value));
69 }
70 
71 #ifdef __powerpc64__
72 static __inline void
73 mtmsrd(register_t value)
74 {
75 
76 	__asm __volatile ("mtmsrd %0; isync" :: "r"(value));
77 }
78 #endif
79 
80 static __inline register_t
81 mfmsr(void)
82 {
83 	register_t value;
84 
85 	__asm __volatile ("mfmsr %0" : "=r"(value));
86 
87 	return (value);
88 }
89 
90 #ifndef __powerpc64__
91 static __inline void
92 mtsrin(vm_offset_t va, register_t value)
93 {
94 
95 	__asm __volatile ("mtsrin %0,%1" :: "r"(value), "r"(va));
96 }
97 
98 static __inline register_t
99 mfsrin(vm_offset_t va)
100 {
101 	register_t value;
102 
103 	__asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
104 
105 	return (value);
106 }
107 #endif
108 
109 static __inline void
110 mtdec(register_t value)
111 {
112 
113 	__asm __volatile ("mtdec %0" :: "r"(value));
114 }
115 
116 static __inline register_t
117 mfdec(void)
118 {
119 	register_t value;
120 
121 	__asm __volatile ("mfdec %0" : "=r"(value));
122 
123 	return (value);
124 }
125 
126 static __inline register_t
127 mfpvr(void)
128 {
129 	register_t value;
130 
131 	__asm __volatile ("mfpvr %0" : "=r"(value));
132 
133 	return (value);
134 }
135 
136 static __inline u_quad_t
137 mftb(void)
138 {
139 	u_quad_t tb;
140       #ifdef __powerpc64__
141 	__asm __volatile ("mftb %0" : "=r"(tb));
142       #else
143 	uint32_t *tbup = (uint32_t *)&tb;
144 	uint32_t *tblp = tbup + 1;
145 
146 	do {
147 		*tbup = mfspr(TBR_TBU);
148 		*tblp = mfspr(TBR_TBL);
149 	} while (*tbup != mfspr(TBR_TBU));
150       #endif
151 
152 	return (tb);
153 }
154 
155 static __inline void
156 mttb(u_quad_t time)
157 {
158 
159 	mtspr(TBR_TBWL, 0);
160 	mtspr(TBR_TBWU, (uint32_t)(time >> 32));
161 	mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
162 }
163 
164 static __inline void
165 eieio(void)
166 {
167 
168 	__asm __volatile ("eieio");
169 }
170 
171 static __inline void
172 isync(void)
173 {
174 
175 	__asm __volatile ("isync");
176 }
177 
178 static __inline void
179 powerpc_sync(void)
180 {
181 
182 	__asm __volatile ("sync");
183 }
184 
185 static __inline register_t
186 intr_disable(void)
187 {
188 	register_t msr;
189 
190 	msr = mfmsr();
191 	mtmsr(msr & ~PSL_EE);
192 	return (msr);
193 }
194 
195 static __inline void
196 intr_restore(register_t msr)
197 {
198 
199 	mtmsr(msr);
200 }
201 
202 static __inline struct pcpu *
203 powerpc_get_pcpup(void)
204 {
205 	struct pcpu *ret;
206 
207 	__asm __volatile("mfsprg %0, 0" : "=r"(ret));
208 
209 	return (ret);
210 }
211 
212 #endif /* _KERNEL */
213 
214 #endif /* !_MACHINE_CPUFUNC_H_ */
215