1 /***************************************************************************** 2 * i386/xen/xen-os.h 3 * 4 * Random collection of macros and definition 5 * 6 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 7 * All rights reserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a copy 10 * of this software and associated documentation files (the "Software"), to 11 * deal in the Software without restriction, including without limitation the 12 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 13 * sell copies of the Software, and to permit persons to whom the Software is 14 * furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 * DEALINGS IN THE SOFTWARE. 26 * 27 * $FreeBSD$ 28 */ 29 30 #ifndef _MACHINE_XEN_XEN_OS_H_ 31 #define _MACHINE_XEN_XEN_OS_H_ 32 33 #ifdef PAE 34 #define CONFIG_X86_PAE 35 #endif 36 37 /* Everything below this point is not included by assembler (.S) files. */ 38 #ifndef __ASSEMBLY__ 39 40 /* Force a proper event-channel callback from Xen. */ 41 void force_evtchn_callback(void); 42 43 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 44 static inline void rep_nop(void) 45 { 46 __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 47 } 48 #define cpu_relax() rep_nop() 49 50 #ifndef XENHVM 51 void xc_printf(const char *fmt, ...); 52 53 #ifdef SMP 54 extern int gdtset; 55 56 #include <sys/time.h> /* XXX for pcpu.h */ 57 #include <sys/pcpu.h> /* XXX for PCPU_GET */ 58 static inline int 59 smp_processor_id(void) 60 { 61 if (__predict_true(gdtset)) 62 return PCPU_GET(cpuid); 63 return 0; 64 } 65 66 #else 67 #define smp_processor_id() 0 68 #endif 69 70 #ifndef PANIC_IF 71 #define PANIC_IF(exp) if (__predict_false(exp)) {printf("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 72 #endif 73 74 /* 75 * Crude memory allocator for memory allocation early in boot. 76 */ 77 void *bootmem_alloc(unsigned int size); 78 void bootmem_free(void *ptr, unsigned int size); 79 80 /* 81 * STI/CLI equivalents. These basically set and clear the virtual 82 * event_enable flag in the shared_info structure. Note that when 83 * the enable bit is set, there may be pending events to be handled. 84 * We may therefore call into do_hypervisor_callback() directly. 85 */ 86 87 #define __cli() \ 88 do { \ 89 vcpu_info_t *_vcpu; \ 90 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 91 _vcpu->evtchn_upcall_mask = 1; \ 92 barrier(); \ 93 } while (0) 94 95 #define __sti() \ 96 do { \ 97 vcpu_info_t *_vcpu; \ 98 barrier(); \ 99 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 100 _vcpu->evtchn_upcall_mask = 0; \ 101 barrier(); /* unmask then check (avoid races) */ \ 102 if (__predict_false(_vcpu->evtchn_upcall_pending)) \ 103 force_evtchn_callback(); \ 104 } while (0) 105 106 #define __restore_flags(x) \ 107 do { \ 108 vcpu_info_t *_vcpu; \ 109 barrier(); \ 110 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 111 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 112 barrier(); /* unmask then check (avoid races) */ \ 113 if (__predict_false(_vcpu->evtchn_upcall_pending)) \ 114 force_evtchn_callback(); \ 115 } \ 116 } while (0) 117 118 /* 119 * Add critical_{enter, exit}? 120 * 121 */ 122 #define __save_and_cli(x) \ 123 do { \ 124 vcpu_info_t *_vcpu; \ 125 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 126 (x) = _vcpu->evtchn_upcall_mask; \ 127 _vcpu->evtchn_upcall_mask = 1; \ 128 barrier(); \ 129 } while (0) 130 131 132 #define cli() __cli() 133 #define sti() __sti() 134 #define save_flags(x) __save_flags(x) 135 #define restore_flags(x) __restore_flags(x) 136 #define save_and_cli(x) __save_and_cli(x) 137 138 #define local_irq_save(x) __save_and_cli(x) 139 #define local_irq_restore(x) __restore_flags(x) 140 #define local_irq_disable() __cli() 141 #define local_irq_enable() __sti() 142 143 #define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 144 #define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 145 #define spin_lock_irqsave mtx_lock_irqsave 146 #define spin_unlock_irqrestore mtx_unlock_irqrestore 147 148 #endif /* !XENHVM */ 149 150 /* This is a barrier for the compiler only, NOT the processor! */ 151 #define barrier() __asm__ __volatile__("": : :"memory") 152 153 #define LOCK_PREFIX "" 154 #define LOCK "" 155 #define ADDR (*(volatile long *) addr) 156 /* 157 * Make sure gcc doesn't try to be clever and move things around 158 * on us. We need to use _exactly_ the address the user gave us, 159 * not some alias that contains the same information. 160 */ 161 typedef struct { volatile int counter; } atomic_t; 162 163 #define xen_xchg(ptr,v) \ 164 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 165 struct __xchg_dummy { unsigned long a[100]; }; 166 #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 167 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 168 int size) 169 { 170 switch (size) { 171 case 1: 172 __asm__ __volatile__("xchgb %b0,%1" 173 :"=q" (x) 174 :"m" (*__xg(ptr)), "0" (x) 175 :"memory"); 176 break; 177 case 2: 178 __asm__ __volatile__("xchgw %w0,%1" 179 :"=r" (x) 180 :"m" (*__xg(ptr)), "0" (x) 181 :"memory"); 182 break; 183 case 4: 184 __asm__ __volatile__("xchgl %0,%1" 185 :"=r" (x) 186 :"m" (*__xg(ptr)), "0" (x) 187 :"memory"); 188 break; 189 } 190 return x; 191 } 192 193 /** 194 * test_and_clear_bit - Clear a bit and return its old value 195 * @nr: Bit to set 196 * @addr: Address to count from 197 * 198 * This operation is atomic and cannot be reordered. 199 * It also implies a memory barrier. 200 */ 201 static __inline int test_and_clear_bit(int nr, volatile void * addr) 202 { 203 int oldbit; 204 205 __asm__ __volatile__( LOCK_PREFIX 206 "btrl %2,%1\n\tsbbl %0,%0" 207 :"=r" (oldbit),"=m" (ADDR) 208 :"Ir" (nr) : "memory"); 209 return oldbit; 210 } 211 212 static __inline int constant_test_bit(int nr, const volatile void * addr) 213 { 214 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 215 } 216 217 static __inline int variable_test_bit(int nr, volatile void * addr) 218 { 219 int oldbit; 220 221 __asm__ __volatile__( 222 "btl %2,%1\n\tsbbl %0,%0" 223 :"=r" (oldbit) 224 :"m" (ADDR),"Ir" (nr)); 225 return oldbit; 226 } 227 228 #define test_bit(nr,addr) \ 229 (__builtin_constant_p(nr) ? \ 230 constant_test_bit((nr),(addr)) : \ 231 variable_test_bit((nr),(addr))) 232 233 234 /** 235 * set_bit - Atomically set a bit in memory 236 * @nr: the bit to set 237 * @addr: the address to start counting from 238 * 239 * This function is atomic and may not be reordered. See __set_bit() 240 * if you do not require the atomic guarantees. 241 * Note that @nr may be almost arbitrarily large; this function is not 242 * restricted to acting on a single-word quantity. 243 */ 244 static __inline__ void set_bit(int nr, volatile void * addr) 245 { 246 __asm__ __volatile__( LOCK_PREFIX 247 "btsl %1,%0" 248 :"=m" (ADDR) 249 :"Ir" (nr)); 250 } 251 252 /** 253 * clear_bit - Clears a bit in memory 254 * @nr: Bit to clear 255 * @addr: Address to start counting from 256 * 257 * clear_bit() is atomic and may not be reordered. However, it does 258 * not contain a memory barrier, so if it is used for locking purposes, 259 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 260 * in order to ensure changes are visible on other processors. 261 */ 262 static __inline__ void clear_bit(int nr, volatile void * addr) 263 { 264 __asm__ __volatile__( LOCK_PREFIX 265 "btrl %1,%0" 266 :"=m" (ADDR) 267 :"Ir" (nr)); 268 } 269 270 /** 271 * atomic_inc - increment atomic variable 272 * @v: pointer of type atomic_t 273 * 274 * Atomically increments @v by 1. Note that the guaranteed 275 * useful range of an atomic_t is only 24 bits. 276 */ 277 static __inline__ void atomic_inc(atomic_t *v) 278 { 279 __asm__ __volatile__( 280 LOCK "incl %0" 281 :"=m" (v->counter) 282 :"m" (v->counter)); 283 } 284 285 286 #define rdtscll(val) \ 287 __asm__ __volatile__("rdtsc" : "=A" (val)) 288 289 #endif /* !__ASSEMBLY__ */ 290 291 #endif /* _MACHINE_XEN_XEN_OS_H_ */ 292