1 /****************************************************************************** 2 * os.h 3 * 4 * random collection of macros and definition 5 * 6 * $FreeBSD$ 7 */ 8 9 #ifndef _XEN_OS_H_ 10 #define _XEN_OS_H_ 11 12 #ifdef PAE 13 #define CONFIG_X86_PAE 14 #endif 15 16 #if !defined(__XEN_INTERFACE_VERSION__) 17 /* 18 * Can update to a more recent version when we implement 19 * the hypercall page 20 */ 21 #define __XEN_INTERFACE_VERSION__ 0x00030204 22 #endif 23 24 #include <xen/interface/xen.h> 25 26 /* Force a proper event-channel callback from Xen. */ 27 void force_evtchn_callback(void); 28 29 extern int gdtset; 30 31 extern shared_info_t *HYPERVISOR_shared_info; 32 33 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 34 static inline void rep_nop(void) 35 { 36 __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 37 } 38 #define cpu_relax() rep_nop() 39 40 /* crude memory allocator for memory allocation early in 41 * boot 42 */ 43 void *bootmem_alloc(unsigned int size); 44 void bootmem_free(void *ptr, unsigned int size); 45 46 47 /* Everything below this point is not included by assembler (.S) files. */ 48 #ifndef __ASSEMBLY__ 49 50 void printk(const char *fmt, ...); 51 52 /* some function prototypes */ 53 void trap_init(void); 54 55 #define likely(x) __builtin_expect((x),1) 56 #define unlikely(x) __builtin_expect((x),0) 57 58 #ifndef XENHVM 59 60 /* 61 * STI/CLI equivalents. These basically set and clear the virtual 62 * event_enable flag in the shared_info structure. Note that when 63 * the enable bit is set, there may be pending events to be handled. 64 * We may therefore call into do_hypervisor_callback() directly. 65 */ 66 67 #define __cli() \ 68 do { \ 69 vcpu_info_t *_vcpu; \ 70 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \ 71 _vcpu->evtchn_upcall_mask = 1; \ 72 barrier(); \ 73 } while (0) 74 75 #define __sti() \ 76 do { \ 77 vcpu_info_t *_vcpu; \ 78 barrier(); \ 79 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \ 80 _vcpu->evtchn_upcall_mask = 0; \ 81 barrier(); /* unmask then check (avoid races) */ \ 82 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 83 force_evtchn_callback(); \ 84 } while (0) 85 86 #define __restore_flags(x) \ 87 do { \ 88 vcpu_info_t *_vcpu; \ 89 barrier(); \ 90 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \ 91 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 92 barrier(); /* unmask then check (avoid races) */ \ 93 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 94 force_evtchn_callback(); \ 95 } \ 96 } while (0) 97 98 /* 99 * Add critical_{enter, exit}? 100 * 101 */ 102 #define __save_and_cli(x) \ 103 do { \ 104 vcpu_info_t *_vcpu; \ 105 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \ 106 (x) = _vcpu->evtchn_upcall_mask; \ 107 _vcpu->evtchn_upcall_mask = 1; \ 108 barrier(); \ 109 } while (0) 110 111 112 #define cli() __cli() 113 #define sti() __sti() 114 #define save_flags(x) __save_flags(x) 115 #define restore_flags(x) __restore_flags(x) 116 #define save_and_cli(x) __save_and_cli(x) 117 118 #define local_irq_save(x) __save_and_cli(x) 119 #define local_irq_restore(x) __restore_flags(x) 120 #define local_irq_disable() __cli() 121 #define local_irq_enable() __sti() 122 123 #define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 124 #define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 125 #define spin_lock_irqsave mtx_lock_irqsave 126 #define spin_unlock_irqrestore mtx_unlock_irqrestore 127 128 #else 129 #endif 130 131 #ifndef mb 132 #define mb() __asm__ __volatile__("mfence":::"memory") 133 #endif 134 #ifndef rmb 135 #define rmb() __asm__ __volatile__("lfence":::"memory"); 136 #endif 137 #ifndef wmb 138 #define wmb() barrier() 139 #endif 140 #ifdef SMP 141 #define smp_mb() mb() 142 #define smp_rmb() rmb() 143 #define smp_wmb() wmb() 144 #define smp_read_barrier_depends() read_barrier_depends() 145 #define set_mb(var, value) do { xchg(&var, value); } while (0) 146 #else 147 #define smp_mb() barrier() 148 #define smp_rmb() barrier() 149 #define smp_wmb() barrier() 150 #define smp_read_barrier_depends() do { } while(0) 151 #define set_mb(var, value) do { var = value; barrier(); } while (0) 152 #endif 153 154 155 /* This is a barrier for the compiler only, NOT the processor! */ 156 #define barrier() __asm__ __volatile__("": : :"memory") 157 158 #define LOCK_PREFIX "" 159 #define LOCK "" 160 #define ADDR (*(volatile long *) addr) 161 /* 162 * Make sure gcc doesn't try to be clever and move things around 163 * on us. We need to use _exactly_ the address the user gave us, 164 * not some alias that contains the same information. 165 */ 166 typedef struct { volatile int counter; } atomic_t; 167 168 169 170 #define xen_xchg(ptr,v) \ 171 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 172 struct __xchg_dummy { unsigned long a[100]; }; 173 #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 174 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 175 int size) 176 { 177 switch (size) { 178 case 1: 179 __asm__ __volatile__("xchgb %b0,%1" 180 :"=q" (x) 181 :"m" (*__xg(ptr)), "0" (x) 182 :"memory"); 183 break; 184 case 2: 185 __asm__ __volatile__("xchgw %w0,%1" 186 :"=r" (x) 187 :"m" (*__xg(ptr)), "0" (x) 188 :"memory"); 189 break; 190 case 4: 191 __asm__ __volatile__("xchgl %0,%1" 192 :"=r" (x) 193 :"m" (*__xg(ptr)), "0" (x) 194 :"memory"); 195 break; 196 } 197 return x; 198 } 199 200 /** 201 * test_and_clear_bit - Clear a bit and return its old value 202 * @nr: Bit to set 203 * @addr: Address to count from 204 * 205 * This operation is atomic and cannot be reordered. 206 * It also implies a memory barrier. 207 */ 208 static __inline int test_and_clear_bit(int nr, volatile void * addr) 209 { 210 int oldbit; 211 212 __asm__ __volatile__( LOCK_PREFIX 213 "btrl %2,%1\n\tsbbl %0,%0" 214 :"=r" (oldbit),"=m" (ADDR) 215 :"Ir" (nr) : "memory"); 216 return oldbit; 217 } 218 219 static __inline int constant_test_bit(int nr, const volatile void * addr) 220 { 221 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 222 } 223 224 static __inline int variable_test_bit(int nr, volatile void * addr) 225 { 226 int oldbit; 227 228 __asm__ __volatile__( 229 "btl %2,%1\n\tsbbl %0,%0" 230 :"=r" (oldbit) 231 :"m" (ADDR),"Ir" (nr)); 232 return oldbit; 233 } 234 235 #define test_bit(nr,addr) \ 236 (__builtin_constant_p(nr) ? \ 237 constant_test_bit((nr),(addr)) : \ 238 variable_test_bit((nr),(addr))) 239 240 241 /** 242 * set_bit - Atomically set a bit in memory 243 * @nr: the bit to set 244 * @addr: the address to start counting from 245 * 246 * This function is atomic and may not be reordered. See __set_bit() 247 * if you do not require the atomic guarantees. 248 * Note that @nr may be almost arbitrarily large; this function is not 249 * restricted to acting on a single-word quantity. 250 */ 251 static __inline__ void set_bit(int nr, volatile void * addr) 252 { 253 __asm__ __volatile__( LOCK_PREFIX 254 "btsl %1,%0" 255 :"=m" (ADDR) 256 :"Ir" (nr)); 257 } 258 259 /** 260 * clear_bit - Clears a bit in memory 261 * @nr: Bit to clear 262 * @addr: Address to start counting from 263 * 264 * clear_bit() is atomic and may not be reordered. However, it does 265 * not contain a memory barrier, so if it is used for locking purposes, 266 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 267 * in order to ensure changes are visible on other processors. 268 */ 269 static __inline__ void clear_bit(int nr, volatile void * addr) 270 { 271 __asm__ __volatile__( LOCK_PREFIX 272 "btrl %1,%0" 273 :"=m" (ADDR) 274 :"Ir" (nr)); 275 } 276 277 /** 278 * atomic_inc - increment atomic variable 279 * @v: pointer of type atomic_t 280 * 281 * Atomically increments @v by 1. Note that the guaranteed 282 * useful range of an atomic_t is only 24 bits. 283 */ 284 static __inline__ void atomic_inc(atomic_t *v) 285 { 286 __asm__ __volatile__( 287 LOCK "incl %0" 288 :"=m" (v->counter) 289 :"m" (v->counter)); 290 } 291 292 293 #define rdtscll(val) \ 294 __asm__ __volatile__("rdtsc" : "=A" (val)) 295 296 #endif /* !__ASSEMBLY__ */ 297 298 #endif /* _OS_H_ */ 299