1 /*- 2 * Copyright (c) 2010 Max Khon <fjoe@freebsd.org> 3 * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@bluezbox.com> 4 * Copyright (c) 2013 Jared D. McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #ifndef __VCHI_BSD_H__ 29 #define __VCHI_BSD_H__ 30 31 #include <sys/systm.h> 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/lock.h> 36 #include <sys/kernel.h> 37 #include <sys/kthread.h> 38 #include <sys/mutex.h> 39 #include <sys/sx.h> 40 #include <sys/sema.h> 41 #include <sys/malloc.h> 42 #include <sys/proc.h> 43 #include <sys/types.h> 44 #include <sys/ioccom.h> 45 46 /* 47 * Copy from/to user API 48 */ 49 #define copy_from_user(to, from, n) copyin((from), (to), (n)) 50 #define copy_to_user(to, from, n) copyout((from), (to), (n)) 51 52 /* 53 * Bit API 54 */ 55 56 static __inline int 57 test_and_set_bit(int nr, volatile void *addr) 58 { 59 int val; 60 61 do { 62 val = *(volatile int *) addr; 63 } while (atomic_cmpset_int(addr, val, val | (1 << nr)) == 0); 64 return (val & (1 << nr)); 65 } 66 67 static __inline__ 68 int test_and_clear_bit(int nr, volatile void *addr) 69 { 70 int val; 71 72 do { 73 val = *(volatile int *) addr; 74 } while (atomic_cmpset_int(addr, val, val & ~(1 << nr)) == 0); 75 return (val & (1 << nr)); 76 } 77 78 /* 79 * Atomic API 80 */ 81 typedef volatile unsigned atomic_t; 82 83 #define atomic_set(p, v) (*(p) = (v)) 84 #define atomic_read(p) (*(p)) 85 #define atomic_inc(p) atomic_add_int(p, 1) 86 #define atomic_dec(p) atomic_subtract_int(p, 1) 87 #define atomic_dec_and_test(p) (atomic_fetchadd_int(p, -1) == 1) 88 #define atomic_inc_return(v) atomic_add_return(1, (v)) 89 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 90 #define atomic_add(v, p) atomic_add_int(p, v) 91 #define atomic_sub(v, p) atomic_subtract_int(p, v) 92 93 #define ATOMIC_INIT(v) (v) 94 95 static inline int 96 atomic_add_return(int i, atomic_t *v) 97 { 98 return i + atomic_fetchadd_int(v, i); 99 } 100 101 static inline int 102 atomic_sub_return(int i, atomic_t *v) 103 { 104 return atomic_fetchadd_int(v, -i) - i; 105 } 106 107 static inline int 108 atomic_cmpxchg(atomic_t *v, int oldv, int newv) 109 { 110 if (atomic_cmpset_rel_int(v, oldv, newv)) 111 return newv; 112 else 113 return *v; 114 } 115 116 static inline int 117 atomic_xchg(atomic_t *v, int newv) 118 { 119 int oldv; 120 if (newv == 0) 121 return atomic_readandclear_int(v); 122 else { 123 do { 124 oldv = atomic_load_acq_int(v); 125 } while (!atomic_cmpset_rel_int(v, oldv, newv)); 126 } 127 128 return (oldv); 129 } 130 131 /* 132 * Spinlock API 133 */ 134 typedef struct mtx spinlock_t; 135 136 #define DEFINE_SPINLOCK(name) \ 137 struct mtx name 138 #define spin_lock_init(lock) mtx_init(lock, "VCHI spinlock " # lock, NULL, MTX_DEF) 139 #define spin_lock_destroy(lock) mtx_destroy(lock) 140 #define spin_lock(lock) mtx_lock(lock) 141 #define spin_unlock(lock) mtx_unlock(lock) 142 #define spin_lock_bh(lock) spin_lock(lock) 143 #define spin_unlock_bh(lock) spin_unlock(lock) 144 145 /* 146 * Mutex API 147 */ 148 struct mutex { 149 struct mtx mtx; 150 }; 151 152 #define lmutex_init(lock) mtx_init(&(lock)->mtx, #lock, NULL, MTX_DEF) 153 #define lmutex_lock(lock) mtx_lock(&(lock)->mtx) 154 #define lmutex_unlock(lock) mtx_unlock(&(lock)->mtx) 155 #define lmutex_destroy(lock) mtx_destroy(&(lock)->mtx) 156 157 static __inline int 158 lmutex_lock_interruptible(struct mutex *lock) 159 { 160 mtx_lock(&(lock)->mtx); 161 return 0; 162 } 163 164 /* 165 * Rwlock API 166 */ 167 typedef struct sx rwlock_t; 168 169 #if defined(SX_ADAPTIVESPIN) && !defined(SX_NOADAPTIVE) 170 #define SX_NOADAPTIVE SX_ADAPTIVESPIN 171 #endif 172 173 #define DEFINE_RWLOCK(name) \ 174 struct sx name; \ 175 SX_SYSINIT(name, &name, #name) 176 #define rwlock_init(rwlock) sx_init_flags(rwlock, "VCHI rwlock", SX_NOADAPTIVE) 177 #define read_lock(rwlock) sx_slock(rwlock) 178 #define read_unlock(rwlock) sx_sunlock(rwlock) 179 180 #define write_lock(rwlock) sx_xlock(rwlock) 181 #define write_unlock(rwlock) sx_xunlock(rwlock) 182 #define write_lock_irqsave(rwlock, flags) \ 183 do { \ 184 sx_xlock(rwlock); \ 185 (void) &(flags); \ 186 } while (0) 187 #define write_unlock_irqrestore(rwlock, flags) \ 188 sx_xunlock(rwlock) 189 190 #define read_lock_bh(rwlock) sx_slock(rwlock) 191 #define read_unlock_bh(rwlock) sx_sunlock(rwlock) 192 #define write_lock_bh(rwlock) sx_xlock(rwlock) 193 #define write_unlock_bh(rwlock) sx_xunlock(rwlock) 194 195 /* 196 * Timer API 197 */ 198 struct timer_list { 199 struct mtx mtx; 200 struct callout callout; 201 202 unsigned long expires; 203 void (*function)(unsigned long); 204 unsigned long data; 205 }; 206 207 void init_timer(struct timer_list *t); 208 void setup_timer(struct timer_list *t, void (*function)(unsigned long), unsigned long data); 209 void mod_timer(struct timer_list *t, unsigned long expires); 210 void add_timer(struct timer_list *t); 211 int del_timer(struct timer_list *t); 212 int del_timer_sync(struct timer_list *t); 213 214 /* 215 * Completion API 216 */ 217 struct completion { 218 struct cv cv; 219 struct mtx lock; 220 int done; 221 }; 222 223 void init_completion(struct completion *c); 224 void destroy_completion(struct completion *c); 225 int try_wait_for_completion(struct completion *); 226 int wait_for_completion_interruptible(struct completion *); 227 int wait_for_completion_interruptible_timeout(struct completion *, unsigned long ticks); 228 int wait_for_completion_killable(struct completion *); 229 void wait_for_completion(struct completion *c); 230 void complete(struct completion *c); 231 void complete_all(struct completion *c); 232 void INIT_COMPLETION_locked(struct completion *c); 233 234 #define INIT_COMPLETION(x) INIT_COMPLETION_locked(&(x)) 235 236 /* 237 * Semaphore API 238 */ 239 struct semaphore { 240 struct mtx mtx; 241 struct cv cv; 242 int value; 243 int waiters; 244 }; 245 246 #define DEFINE_SEMAPHORE(name) \ 247 struct semaphore name; \ 248 SYSINIT(name##_sema_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 249 sema_sysinit, &name); \ 250 SYSUNINIT(name##_sema_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 251 _sema_destroy, __DEVOLATILE(void *, &(name))) 252 253 void sema_sysinit(void *arg); 254 void _sema_init(struct semaphore *s, int value); 255 void _sema_destroy(struct semaphore *s); 256 void down(struct semaphore *s); 257 int down_interruptible(struct semaphore *s); 258 int down_trylock(struct semaphore *s); 259 void up(struct semaphore *s); 260 261 /* 262 * Logging and assertions API 263 */ 264 void rlprintf(int pps, const char *fmt, ...) 265 __printflike(2, 3); 266 267 void 268 device_rlprintf(int pps, device_t dev, const char *fmt, ...) 269 __printflike(3, 4); 270 271 #define might_sleep() 272 273 #define WARN(condition, msg) \ 274 ({ \ 275 int __ret_warn_on = !!(condition); \ 276 if (unlikely(__ret_warn_on)) \ 277 printf((msg)); \ 278 unlikely(__ret_warn_on); \ 279 }) 280 281 282 283 #define WARN_ON(condition) \ 284 ({ \ 285 int __ret_warn_on = !!(condition); \ 286 if (unlikely(__ret_warn_on)) \ 287 printf("WARN_ON: " #condition "\n"); \ 288 unlikely(__ret_warn_on); \ 289 }) 290 291 #define WARN_ON_ONCE(condition) ({ \ 292 static int __warned; \ 293 int __ret_warn_once = !!(condition); \ 294 \ 295 if (unlikely(__ret_warn_once)) \ 296 if (WARN_ON(!__warned)) \ 297 __warned = 1; \ 298 unlikely(__ret_warn_once); \ 299 }) 300 301 #define BUG_ON(cond) \ 302 do { \ 303 if (cond) \ 304 panic("BUG_ON: " #cond); \ 305 } while (0) 306 307 #define BUG() \ 308 do { \ 309 panic("BUG: %s:%d", __FILE__, __LINE__); \ 310 } while (0) 311 312 #define vchiq_static_assert(cond) CTASSERT(cond) 313 314 #define KERN_EMERG "<0>" /* system is unusable */ 315 #define KERN_ALERT "<1>" /* action must be taken immediately */ 316 #define KERN_CRIT "<2>" /* critical conditions */ 317 #define KERN_ERR "<3>" /* error conditions */ 318 #define KERN_WARNING "<4>" /* warning conditions */ 319 #define KERN_NOTICE "<5>" /* normal but significant condition */ 320 #define KERN_INFO "<6>" /* informational */ 321 #define KERN_DEBUG "<7>" /* debug-level messages */ 322 #define KERN_CONT "" 323 324 #define printk(fmt, args...) printf(fmt, ##args) 325 #define vprintk(fmt, args) vprintf(fmt, args) 326 327 /* 328 * Malloc API 329 */ 330 #define GFP_KERNEL 0 331 #define GFP_ATOMIC 0 332 333 MALLOC_DECLARE(M_VCHI); 334 335 #define kmalloc(size, flags) malloc((size), M_VCHI, M_NOWAIT | M_ZERO) 336 #define kcalloc(n, size, flags) malloc((n) * (size), M_VCHI, M_NOWAIT | M_ZERO) 337 #define kzalloc(a, b) kcalloc(1, (a), (b)) 338 #define kfree(p) free(p, M_VCHI) 339 340 /* 341 * Kernel module API 342 */ 343 #define __init 344 #define __exit 345 #define __devinit 346 #define __devexit 347 #define __devinitdata 348 349 /* 350 * Time API 351 */ 352 #if 1 353 /* emulate jiffies */ 354 static inline unsigned long 355 _jiffies(void) 356 { 357 struct timeval tv; 358 359 microuptime(&tv); 360 return tvtohz(&tv); 361 } 362 363 static inline unsigned long 364 msecs_to_jiffies(unsigned long msecs) 365 { 366 struct timeval tv; 367 368 tv.tv_sec = msecs / 1000000UL; 369 tv.tv_usec = msecs % 1000000UL; 370 return tvtohz(&tv); 371 } 372 373 #define jiffies _jiffies() 374 #else 375 #define jiffies ticks 376 #endif 377 #define HZ hz 378 379 #define udelay(usec) DELAY(usec) 380 #define mdelay(msec) DELAY((msec) * 1000) 381 382 #define schedule_timeout(jiff) pause("dhdslp", jiff) 383 384 #if defined(msleep) 385 #undef msleep 386 #endif 387 #define msleep(msec) mdelay(msec) 388 389 #define time_after(a, b) ((a) > (b)) 390 #define time_after_eq(a, b) ((a) >= (b)) 391 #define time_before(a, b) time_after((b), (a)) 392 393 /* 394 * kthread API (we use proc) 395 */ 396 typedef struct proc * VCHIQ_THREAD_T; 397 398 VCHIQ_THREAD_T vchiq_thread_create(int (*threadfn)(void *data), 399 void *data, 400 const char namefmt[], ...); 401 void set_user_nice(VCHIQ_THREAD_T p, int nice); 402 void wake_up_process(VCHIQ_THREAD_T p); 403 404 /* 405 * Proc APIs 406 */ 407 void flush_signals(VCHIQ_THREAD_T); 408 int fatal_signal_pending(VCHIQ_THREAD_T); 409 410 /* 411 * mbox API 412 */ 413 void bcm_mbox_write(int channel, uint32_t data); 414 415 /* 416 * Misc API 417 */ 418 419 #define ENODATA EINVAL 420 421 #define __user 422 423 #define likely(x) __builtin_expect(!!(x), 1) 424 #define unlikely(x) __builtin_expect(!!(x), 0) 425 #define current curproc 426 #define EXPORT_SYMBOL(x) 427 #define PAGE_ALIGN(addr) round_page(addr) 428 429 typedef void irqreturn_t; 430 typedef off_t loff_t; 431 432 #define BCM2835_MBOX_CHAN_VCHIQ 3 433 434 #define smp_mb wmb 435 #define smp_rmb rmb 436 #define smp_wmb wmb 437 438 #define device_print_prettyname(dev) device_printf((dev), "") 439 440 #endif /* __VCHI_BSD_H__ */ 441