xref: /titanic_41/usr/src/uts/intel/asm/cpu.h (revision 6b7143d774683daf27dfb2e93ab48d1ade1a3477)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef _ASM_CPU_H
27 #define	_ASM_CPU_H
28 
29 #include <sys/ccompile.h>
30 
31 #ifdef	__cplusplus
32 extern "C" {
33 #endif
34 
35 #if !defined(__lint) && defined(__GNUC__)
36 
37 #if defined(__i386) || defined(__amd64)
38 
39 extern __GNU_INLINE void
ht_pause(void)40 ht_pause(void)
41 {
42 	__asm__ __volatile__(
43 	    "pause");
44 }
45 
46 /*
47  * prefetch 64 bytes
48  *
49  * prefetch is an SSE extension which is not supported on
50  * older 32-bit processors, so define this as a no-op for now
51  */
52 
53 extern __GNU_INLINE void
prefetch_read_many(void * addr)54 prefetch_read_many(void *addr)
55 {
56 #if defined(__amd64)
57 	__asm__(
58 	    "prefetcht0 (%0);"
59 	    "prefetcht0 32(%0);"
60 	    : /* no output */
61 	    : "r" (addr));
62 #endif	/* __amd64 */
63 }
64 
65 extern __GNU_INLINE void
refetch_read_once(void * addr)66 refetch_read_once(void *addr)
67 {
68 #if defined(__amd64)
69 	__asm__(
70 	    "prefetchnta (%0);"
71 	    "prefetchnta 32(%0);"
72 	    : /* no output */
73 	    : "r" (addr));
74 #endif	/* __amd64 */
75 }
76 
77 extern __GNU_INLINE void
prefetch_write_many(void * addr)78 prefetch_write_many(void *addr)
79 {
80 #if defined(__amd64)
81 	__asm__(
82 	    "prefetcht0 (%0);"
83 	    "prefetcht0 32(%0);"
84 	    : /* no output */
85 	    : "r" (addr));
86 #endif	/* __amd64 */
87 }
88 
89 extern __GNU_INLINE void
prefetch_write_once(void * addr)90 prefetch_write_once(void *addr)
91 {
92 #if defined(__amd64)
93 	__asm__(
94 	    "prefetcht0 (%0);"
95 	    "prefetcht0 32(%0);"
96 	    : /* no output */
97 	    : "r" (addr));
98 #endif	/* __amd64 */
99 }
100 
101 #if !defined(__xpv)
102 
103 extern __GNU_INLINE void
cli(void)104 cli(void)
105 {
106 	__asm__ __volatile__(
107 	    "cli" : : : "memory");
108 }
109 
110 extern __GNU_INLINE void
sti(void)111 sti(void)
112 {
113 	__asm__ __volatile__(
114 	    "sti");
115 }
116 
117 extern __GNU_INLINE void
i86_halt(void)118 i86_halt(void)
119 {
120 	__asm__ __volatile__(
121 	    "sti; hlt");
122 }
123 
124 #endif /* !__xpv */
125 
126 #endif	/* __i386 || defined(__amd64) */
127 
128 #if defined(__amd64)
129 
130 extern __GNU_INLINE void
__set_ds(selector_t value)131 __set_ds(selector_t value)
132 {
133 	__asm__ __volatile__(
134 	    "movw	%0, %%ds"
135 	    : /* no output */
136 	    : "r" (value));
137 }
138 
139 extern __GNU_INLINE void
__set_es(selector_t value)140 __set_es(selector_t value)
141 {
142 	__asm__ __volatile__(
143 	    "movw	%0, %%es"
144 	    : /* no output */
145 	    : "r" (value));
146 }
147 
148 extern __GNU_INLINE void
__set_fs(selector_t value)149 __set_fs(selector_t value)
150 {
151 	__asm__ __volatile__(
152 	    "movw	%0, %%fs"
153 	    : /* no output */
154 	    : "r" (value));
155 }
156 
157 extern __GNU_INLINE void
__set_gs(selector_t value)158 __set_gs(selector_t value)
159 {
160 	__asm__ __volatile__(
161 	    "movw	%0, %%gs"
162 	    : /* no output */
163 	    : "r" (value));
164 }
165 
166 #if !defined(__xpv)
167 
168 extern __GNU_INLINE void
__swapgs(void)169 __swapgs(void)
170 {
171 	__asm__ __volatile__(
172 	    "mfence; swapgs");
173 }
174 
175 #endif /* !__xpv */
176 
177 #endif	/* __amd64 */
178 
179 #endif	/* !__lint && __GNUC__ */
180 
181 #ifdef	__cplusplus
182 }
183 #endif
184 
185 #endif	/* _ASM_CPU_H */
186