xref: /freebsd/sys/powerpc/include/cpufunc.h (revision eacee0ff7ec955b32e09515246bd97b6edcd2b0f)
1 /*-
2  * Copyright (c) 1998 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef _MACHINE_CPUFUNC_H_
30 #define	_MACHINE_CPUFUNC_H_
31 
32 #ifdef _KERNEL
33 
34 #include <sys/types.h>
35 
36 #include <machine/psl.h>
37 
38 #define	CRITICAL_FORK	(mfmsr() | PSL_EE)
39 
40 #ifdef __GNUC__
41 
42 static __inline void
43 breakpoint(void)
44 {
45 
46 	return;
47 }
48 
49 #endif
50 
51 /* CPU register mangling inlines */
52 
53 static __inline void
54 mtmsr(unsigned int value)
55 {
56 
57 	__asm __volatile ("mtmsr %0" :: "r"(value));
58 }
59 
60 static __inline unsigned int
61 mfmsr(void)
62 {
63 	unsigned int	value;
64 
65 	__asm __volatile ("mfmsr %0" : "=r"(value));
66 
67 	return (value);
68 }
69 
70 static __inline void
71 mtdec(unsigned int value)
72 {
73 
74 	__asm __volatile ("mtdec %0" :: "r"(value));
75 }
76 
77 static __inline unsigned int
78 mfdec(void)
79 {
80 	unsigned int	value;
81 
82 	__asm __volatile ("mfdec %0" : "=r"(value));
83 
84 	return (value);
85 }
86 
87 /*
88  * Bogus interrupt manipulation
89  */
90 static __inline void
91 disable_intr(void)
92 {
93 	unsigned int	msr;
94 
95 	msr = mfmsr();
96 	mtmsr(msr & ~PSL_EE);
97 }
98 
99 static __inline void
100 enable_intr(void)
101 {
102 	unsigned int	msr;
103 
104 	msr = mfmsr();
105 	mtmsr(msr | PSL_EE);
106 }
107 
108 static __inline unsigned int
109 save_intr(void)
110 {
111 	unsigned int	msr;
112 
113 	msr = mfmsr();
114 
115 	return msr;
116 }
117 
118 static __inline critical_t
119 cpu_critical_enter(void)
120 {
121 
122 	return ((critical_t)save_intr());
123 }
124 
125 static __inline void
126 restore_intr(unsigned int msr)
127 {
128 
129 	mtmsr(msr);
130 }
131 
132 static __inline void
133 cpu_critical_exit(critical_t msr)
134 {
135 
136 	return (restore_intr((unsigned int)msr));
137 }
138 
139 static __inline void
140 powerpc_mb(void)
141 {
142 
143 	__asm __volatile("eieio; sync" : : : "memory");
144 }
145 
146 static __inline struct pcpu *
147 powerpc_get_pcpup(void)
148 {
149 	struct pcpu	*ret;
150 
151 	__asm ("mfsprg %0, 0" : "=r"(ret));
152 
153 	return(ret);
154 }
155 
156 #endif /* _KERNEL */
157 
158 #endif /* !_MACHINE_CPUFUNC_H_ */
159