xref: /freebsd/sys/i386/include/xen/xen-os.h (revision bd81e07d2761cf1c13063eb49a5c0cb4a6951318)
1 /*****************************************************************************
2  * i386/xen/xen-os.h
3  *
4  * Random collection of macros and definition
5  *
6  * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
7  * All rights reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a copy
10  * of this software and associated documentation files (the "Software"), to
11  * deal in the Software without restriction, including without limitation the
12  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
13  * sell copies of the Software, and to permit persons to whom the Software is
14  * furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * $FreeBSD$
28  */
29 
30 #ifndef _MACHINE_XEN_XEN_OS_H_
31 #define _MACHINE_XEN_XEN_OS_H_
32 
33 #ifdef PAE
34 #define CONFIG_X86_PAE
35 #endif
36 
37 /* Everything below this point is not included by assembler (.S) files. */
38 #ifndef __ASSEMBLY__
39 
40 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
41 static inline void rep_nop(void)
42 {
43     __asm__ __volatile__ ( "rep;nop" : : : "memory" );
44 }
45 #define cpu_relax() rep_nop()
46 
47 /* This is a barrier for the compiler only, NOT the processor! */
48 #define barrier() __asm__ __volatile__("": : :"memory")
49 
50 #define LOCK_PREFIX ""
51 #define LOCK ""
52 #define ADDR (*(volatile long *) addr)
53 /*
54  * Make sure gcc doesn't try to be clever and move things around
55  * on us. We need to use _exactly_ the address the user gave us,
56  * not some alias that contains the same information.
57  */
58 typedef struct { volatile int counter; } atomic_t;
59 
60 #define xen_xchg(ptr,v) \
61         ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
62 struct __xchg_dummy { unsigned long a[100]; };
63 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
64 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
65                                    int size)
66 {
67     switch (size) {
68     case 1:
69         __asm__ __volatile__("xchgb %b0,%1"
70                              :"=q" (x)
71                              :"m" (*__xg(ptr)), "0" (x)
72                              :"memory");
73         break;
74     case 2:
75         __asm__ __volatile__("xchgw %w0,%1"
76                              :"=r" (x)
77                              :"m" (*__xg(ptr)), "0" (x)
78                              :"memory");
79         break;
80     case 4:
81         __asm__ __volatile__("xchgl %0,%1"
82                              :"=r" (x)
83                              :"m" (*__xg(ptr)), "0" (x)
84                              :"memory");
85         break;
86     }
87     return x;
88 }
89 
90 /**
91  * test_and_clear_bit - Clear a bit and return its old value
92  * @nr: Bit to set
93  * @addr: Address to count from
94  *
95  * This operation is atomic and cannot be reordered.
96  * It also implies a memory barrier.
97  */
98 static __inline int test_and_clear_bit(int nr, volatile void * addr)
99 {
100         int oldbit;
101 
102         __asm__ __volatile__( LOCK_PREFIX
103                 "btrl %2,%1\n\tsbbl %0,%0"
104                 :"=r" (oldbit),"=m" (ADDR)
105                 :"Ir" (nr) : "memory");
106         return oldbit;
107 }
108 
109 static __inline int constant_test_bit(int nr, const volatile void * addr)
110 {
111     return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
112 }
113 
114 static __inline int variable_test_bit(int nr, volatile void * addr)
115 {
116     int oldbit;
117 
118     __asm__ __volatile__(
119         "btl %2,%1\n\tsbbl %0,%0"
120         :"=r" (oldbit)
121         :"m" (ADDR),"Ir" (nr));
122     return oldbit;
123 }
124 
125 #define test_bit(nr,addr) \
126 (__builtin_constant_p(nr) ? \
127  constant_test_bit((nr),(addr)) : \
128  variable_test_bit((nr),(addr)))
129 
130 
131 /**
132  * set_bit - Atomically set a bit in memory
133  * @nr: the bit to set
134  * @addr: the address to start counting from
135  *
136  * This function is atomic and may not be reordered.  See __set_bit()
137  * if you do not require the atomic guarantees.
138  * Note that @nr may be almost arbitrarily large; this function is not
139  * restricted to acting on a single-word quantity.
140  */
141 static __inline__ void set_bit(int nr, volatile void * addr)
142 {
143         __asm__ __volatile__( LOCK_PREFIX
144                 "btsl %1,%0"
145                 :"=m" (ADDR)
146                 :"Ir" (nr));
147 }
148 
149 /**
150  * clear_bit - Clears a bit in memory
151  * @nr: Bit to clear
152  * @addr: Address to start counting from
153  *
154  * clear_bit() is atomic and may not be reordered.  However, it does
155  * not contain a memory barrier, so if it is used for locking purposes,
156  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
157  * in order to ensure changes are visible on other processors.
158  */
159 static __inline__ void clear_bit(int nr, volatile void * addr)
160 {
161         __asm__ __volatile__( LOCK_PREFIX
162                 "btrl %1,%0"
163                 :"=m" (ADDR)
164                 :"Ir" (nr));
165 }
166 
167 /**
168  * atomic_inc - increment atomic variable
169  * @v: pointer of type atomic_t
170  *
171  * Atomically increments @v by 1.  Note that the guaranteed
172  * useful range of an atomic_t is only 24 bits.
173  */
174 static __inline__ void atomic_inc(atomic_t *v)
175 {
176         __asm__ __volatile__(
177                 LOCK "incl %0"
178                 :"=m" (v->counter)
179                 :"m" (v->counter));
180 }
181 
182 
183 #define rdtscll(val) \
184      __asm__ __volatile__("rdtsc" : "=A" (val))
185 
186 #endif /* !__ASSEMBLY__ */
187 
188 #endif /* _MACHINE_XEN_XEN_OS_H_ */
189