1 /*- 2 * Copyright (c) 2010 Max Khon <fjoe@freebsd.org> 3 * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@bluezbox.com> 4 * Copyright (c) 2013 Jared D. McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #ifndef __VCHI_BSD_H__ 29 #define __VCHI_BSD_H__ 30 31 #include <sys/systm.h> 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/lock.h> 36 #include <sys/kernel.h> 37 #include <sys/kthread.h> 38 #include <sys/mutex.h> 39 #include <sys/rwlock.h> 40 #include <sys/sx.h> 41 #include <sys/sema.h> 42 #include <sys/malloc.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/ioccom.h> 46 47 /* 48 * Copy from/to user API 49 */ 50 #define copy_from_user(to, from, n) copyin((from), (to), (n)) 51 #define copy_to_user(to, from, n) copyout((from), (to), (n)) 52 53 /* 54 * Bit API 55 */ 56 57 static __inline int 58 test_and_set_bit(int nr, volatile void *addr) 59 { 60 int val; 61 62 do { 63 val = *(volatile int *) addr; 64 } while (atomic_cmpset_int(addr, val, val | (1 << nr)) == 0); 65 return (val & (1 << nr)); 66 } 67 68 static __inline__ 69 int test_and_clear_bit(int nr, volatile void *addr) 70 { 71 int val; 72 73 do { 74 val = *(volatile int *) addr; 75 } while (atomic_cmpset_int(addr, val, val & ~(1 << nr)) == 0); 76 return (val & (1 << nr)); 77 } 78 79 /* 80 * Atomic API 81 */ 82 typedef volatile unsigned atomic_t; 83 84 #define atomic_set(p, v) (*(p) = (v)) 85 #define atomic_read(p) (*(p)) 86 #define atomic_inc(p) atomic_add_int(p, 1) 87 #define atomic_dec(p) atomic_subtract_int(p, 1) 88 #define atomic_dec_and_test(p) (atomic_fetchadd_int(p, -1) == 1) 89 #define atomic_inc_return(v) atomic_add_return(1, (v)) 90 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 91 #define atomic_add(v, p) atomic_add_int(p, v) 92 #define atomic_sub(v, p) atomic_subtract_int(p, v) 93 94 #define ATOMIC_INIT(v) (v) 95 96 static inline int 97 atomic_add_return(int i, atomic_t *v) 98 { 99 return i + atomic_fetchadd_int(v, i); 100 } 101 102 static inline int 103 atomic_sub_return(int i, atomic_t *v) 104 { 105 return atomic_fetchadd_int(v, -i) - i; 106 } 107 108 static inline int 109 atomic_cmpxchg(atomic_t *v, int oldv, int newv) 110 { 111 if (atomic_cmpset_rel_int(v, oldv, newv)) 112 return newv; 113 else 114 return *v; 115 } 116 117 static inline int 118 atomic_xchg(atomic_t *v, int newv) 119 { 120 int oldv; 121 if (newv == 0) 122 return atomic_readandclear_int(v); 123 else { 124 do { 125 oldv = atomic_load_acq_int(v); 126 } while (!atomic_cmpset_rel_int(v, oldv, newv)); 127 } 128 129 return (oldv); 130 } 131 132 /* 133 * Spinlock API 134 */ 135 typedef struct mtx spinlock_t; 136 137 #define DEFINE_SPINLOCK(name) \ 138 struct mtx name 139 #define spin_lock_init(lock) mtx_init(lock, "VCHI spinlock " # lock, NULL, MTX_DEF) 140 #define spin_lock_destroy(lock) mtx_destroy(lock) 141 #define spin_lock(lock) mtx_lock(lock) 142 #define spin_unlock(lock) mtx_unlock(lock) 143 #define spin_lock_bh(lock) spin_lock(lock) 144 #define spin_unlock_bh(lock) spin_unlock(lock) 145 146 /* 147 * Mutex API 148 */ 149 struct mutex { 150 struct sx mtx; 151 }; 152 153 #define lmutex_init(lock) sx_init(&(lock)->mtx, #lock) 154 #define lmutex_lock(lock) sx_xlock(&(lock)->mtx) 155 #define lmutex_unlock(lock) sx_unlock(&(lock)->mtx) 156 #define lmutex_destroy(lock) sx_destroy(&(lock)->mtx) 157 158 #define lmutex_lock_interruptible(lock) sx_xlock_sig(&(lock)->mtx) 159 160 /* 161 * Rwlock API 162 */ 163 typedef struct rwlock rwlock_t; 164 165 #if defined(SX_ADAPTIVESPIN) && !defined(SX_NOADAPTIVE) 166 #define SX_NOADAPTIVE SX_ADAPTIVESPIN 167 #endif 168 169 #define DEFINE_RWLOCK(name) \ 170 struct rwlock name; \ 171 SX_SYSINIT(name, &name, #name) 172 #define rwlock_init(rwlock) rw_init(rwlock, "VCHI rwlock") 173 #define read_lock(rwlock) rw_rlock(rwlock) 174 #define read_unlock(rwlock) rw_unlock(rwlock) 175 176 #define write_lock(rwlock) rw_wlock(rwlock) 177 #define write_unlock(rwlock) rw_unlock(rwlock) 178 #define write_lock_irqsave(rwlock, flags) \ 179 do { \ 180 rw_wlock(rwlock); \ 181 (void) &(flags); \ 182 } while (0) 183 #define write_unlock_irqrestore(rwlock, flags) \ 184 rw_unlock(rwlock) 185 186 #define read_lock_bh(rwlock) rw_rlock(rwlock) 187 #define read_unlock_bh(rwlock) rw_unlock(rwlock) 188 #define write_lock_bh(rwlock) rw_wlock(rwlock) 189 #define write_unlock_bh(rwlock) rw_unlock(rwlock) 190 191 /* 192 * Timer API 193 */ 194 struct timer_list { 195 struct mtx mtx; 196 struct callout callout; 197 198 unsigned long expires; 199 void (*function)(unsigned long); 200 unsigned long data; 201 }; 202 203 void init_timer(struct timer_list *t); 204 void setup_timer(struct timer_list *t, void (*function)(unsigned long), unsigned long data); 205 void mod_timer(struct timer_list *t, unsigned long expires); 206 void add_timer(struct timer_list *t); 207 int del_timer(struct timer_list *t); 208 int del_timer_sync(struct timer_list *t); 209 210 /* 211 * Completion API 212 */ 213 struct completion { 214 struct cv cv; 215 struct mtx lock; 216 int done; 217 }; 218 219 void init_completion(struct completion *c); 220 void destroy_completion(struct completion *c); 221 int try_wait_for_completion(struct completion *); 222 int wait_for_completion_interruptible(struct completion *); 223 int wait_for_completion_interruptible_timeout(struct completion *, unsigned long ticks); 224 int wait_for_completion_killable(struct completion *); 225 void wait_for_completion(struct completion *c); 226 void complete(struct completion *c); 227 void complete_all(struct completion *c); 228 void INIT_COMPLETION_locked(struct completion *c); 229 230 #define INIT_COMPLETION(x) INIT_COMPLETION_locked(&(x)) 231 232 /* 233 * Semaphore API 234 */ 235 struct semaphore { 236 struct mtx mtx; 237 struct cv cv; 238 int value; 239 int waiters; 240 }; 241 242 #define DEFINE_SEMAPHORE(name) \ 243 struct semaphore name; \ 244 SYSINIT(name##_sema_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 245 sema_sysinit, &name); \ 246 SYSUNINIT(name##_sema_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 247 _sema_destroy, __DEVOLATILE(void *, &(name))) 248 249 void sema_sysinit(void *arg); 250 void _sema_init(struct semaphore *s, int value); 251 void _sema_destroy(struct semaphore *s); 252 void down(struct semaphore *s); 253 int down_interruptible(struct semaphore *s); 254 int down_trylock(struct semaphore *s); 255 void up(struct semaphore *s); 256 257 /* 258 * Logging and assertions API 259 */ 260 void rlprintf(int pps, const char *fmt, ...) 261 __printflike(2, 3); 262 263 void 264 device_rlprintf(int pps, device_t dev, const char *fmt, ...) 265 __printflike(3, 4); 266 267 #define might_sleep() 268 269 #define WARN(condition, msg) \ 270 ({ \ 271 int __ret_warn_on = !!(condition); \ 272 if (unlikely(__ret_warn_on)) \ 273 printf((msg)); \ 274 unlikely(__ret_warn_on); \ 275 }) 276 277 278 279 #define WARN_ON(condition) \ 280 ({ \ 281 int __ret_warn_on = !!(condition); \ 282 if (unlikely(__ret_warn_on)) \ 283 printf("WARN_ON: " #condition "\n"); \ 284 unlikely(__ret_warn_on); \ 285 }) 286 287 #define WARN_ON_ONCE(condition) ({ \ 288 static int __warned; \ 289 int __ret_warn_once = !!(condition); \ 290 \ 291 if (unlikely(__ret_warn_once)) \ 292 if (WARN_ON(!__warned)) \ 293 __warned = 1; \ 294 unlikely(__ret_warn_once); \ 295 }) 296 297 #define BUG_ON(cond) \ 298 do { \ 299 if (cond) \ 300 panic("BUG_ON: " #cond); \ 301 } while (0) 302 303 #define BUG() \ 304 do { \ 305 panic("BUG: %s:%d", __FILE__, __LINE__); \ 306 } while (0) 307 308 #define vchiq_static_assert(cond) CTASSERT(cond) 309 310 #define KERN_EMERG "<0>" /* system is unusable */ 311 #define KERN_ALERT "<1>" /* action must be taken immediately */ 312 #define KERN_CRIT "<2>" /* critical conditions */ 313 #define KERN_ERR "<3>" /* error conditions */ 314 #define KERN_WARNING "<4>" /* warning conditions */ 315 #define KERN_NOTICE "<5>" /* normal but significant condition */ 316 #define KERN_INFO "<6>" /* informational */ 317 #define KERN_DEBUG "<7>" /* debug-level messages */ 318 #define KERN_CONT "" 319 320 #define printk(fmt, args...) printf(fmt, ##args) 321 #define vprintk(fmt, args) vprintf(fmt, args) 322 323 /* 324 * Malloc API 325 */ 326 #define GFP_KERNEL 0 327 #define GFP_ATOMIC 0 328 329 MALLOC_DECLARE(M_VCHI); 330 331 #define kmalloc(size, flags) malloc((size), M_VCHI, M_NOWAIT | M_ZERO) 332 #define kcalloc(n, size, flags) malloc((n) * (size), M_VCHI, M_NOWAIT | M_ZERO) 333 #define kzalloc(a, b) kcalloc(1, (a), (b)) 334 #define kfree(p) free(p, M_VCHI) 335 336 /* 337 * Kernel module API 338 */ 339 #define __init 340 #define __exit 341 #define __devinit 342 #define __devexit 343 #define __devinitdata 344 345 /* 346 * Time API 347 */ 348 #if 1 349 /* emulate jiffies */ 350 static inline unsigned long 351 _jiffies(void) 352 { 353 struct timeval tv; 354 355 microuptime(&tv); 356 return tvtohz(&tv); 357 } 358 359 static inline unsigned long 360 msecs_to_jiffies(unsigned long msecs) 361 { 362 struct timeval tv; 363 364 tv.tv_sec = msecs / 1000000UL; 365 tv.tv_usec = msecs % 1000000UL; 366 return tvtohz(&tv); 367 } 368 369 #define jiffies _jiffies() 370 #else 371 #define jiffies ticks 372 #endif 373 #define HZ hz 374 375 #define udelay(usec) DELAY(usec) 376 #define mdelay(msec) DELAY((msec) * 1000) 377 378 #define schedule_timeout(jiff) pause("dhdslp", jiff) 379 380 #if defined(msleep) 381 #undef msleep 382 #endif 383 #define msleep(msec) mdelay(msec) 384 385 #define time_after(a, b) ((a) > (b)) 386 #define time_after_eq(a, b) ((a) >= (b)) 387 #define time_before(a, b) time_after((b), (a)) 388 389 /* 390 * kthread API (we use proc) 391 */ 392 typedef struct proc * VCHIQ_THREAD_T; 393 394 VCHIQ_THREAD_T vchiq_thread_create(int (*threadfn)(void *data), 395 void *data, 396 const char namefmt[], ...); 397 void set_user_nice(VCHIQ_THREAD_T p, int nice); 398 void wake_up_process(VCHIQ_THREAD_T p); 399 400 /* 401 * Proc APIs 402 */ 403 void flush_signals(VCHIQ_THREAD_T); 404 int fatal_signal_pending(VCHIQ_THREAD_T); 405 406 /* 407 * mbox API 408 */ 409 void bcm_mbox_write(int channel, uint32_t data); 410 411 /* 412 * Misc API 413 */ 414 415 #define ENODATA EINVAL 416 417 #define __user 418 419 #define likely(x) __builtin_expect(!!(x), 1) 420 #define unlikely(x) __builtin_expect(!!(x), 0) 421 #define current curproc 422 #define EXPORT_SYMBOL(x) 423 #define PAGE_ALIGN(addr) round_page(addr) 424 425 typedef void irqreturn_t; 426 typedef off_t loff_t; 427 428 #define BCM2835_MBOX_CHAN_VCHIQ 3 429 430 #define smp_mb wmb 431 #define smp_rmb rmb 432 #define smp_wmb wmb 433 434 #define device_print_prettyname(dev) device_printf((dev), "") 435 436 #endif /* __VCHI_BSD_H__ */ 437