xref: /freebsd/sys/contrib/vchiq/interface/compat/vchi_bsd.h (revision 04b8208fc0d4f17fc43a189baa395d9a2a66acd2)
1 /*-
2  * Copyright (c) 2010 Max Khon <fjoe@freebsd.org>
3  * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@bluezbox.com>
4  * Copyright (c) 2013 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #ifndef __VCHI_BSD_H__
29 #define __VCHI_BSD_H__
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/lock.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/mutex.h>
39 #include <sys/rwlock.h>
40 #include <sys/sx.h>
41 #include <sys/sema.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/ioccom.h>
46 
47 /*
48  * Copy from/to user API
49  */
50 #define copy_from_user(to, from, n)	copyin((from), (to), (n))
51 #define copy_to_user(to, from, n)	copyout((from), (to), (n))
52 
53 /*
54  * Bit API
55  */
56 
57 static __inline int
test_and_set_bit(int nr,volatile void * addr)58 test_and_set_bit(int nr, volatile void *addr)
59 {
60 	int val;
61 
62 	do {
63 		val = *(volatile int *) addr;
64 	} while (atomic_cmpset_int(addr, val, val | (1 << nr)) == 0);
65 	return (val & (1 << nr));
66 }
67 
68 static __inline__
test_and_clear_bit(int nr,volatile void * addr)69 int test_and_clear_bit(int nr, volatile void *addr)
70 {
71 	int val;
72 
73 	do {
74 		val = *(volatile int *) addr;
75 	} while (atomic_cmpset_int(addr, val, val & ~(1 << nr)) == 0);
76 	return (val & (1 << nr));
77 }
78 
79 /*
80  * Atomic API
81  */
82 typedef volatile unsigned atomic_t;
83 
84 #define atomic_set(p, v)	(*(p) = (v))
85 #define atomic_read(p)		(*(p))
86 #define atomic_inc(p)		atomic_add_int(p, 1)
87 #define atomic_dec(p)		atomic_subtract_int(p, 1)
88 #define atomic_dec_and_test(p)	(atomic_fetchadd_int(p, -1) == 1)
89 #define	atomic_inc_return(v)	atomic_add_return(1, (v))
90 #define	atomic_dec_return(v)	atomic_sub_return(1, (v))
91 #define atomic_add(v, p)	atomic_add_int(p, v)
92 #define atomic_sub(v, p)	atomic_subtract_int(p, v)
93 
94 #define ATOMIC_INIT(v)		(v)
95 
96 static inline int
atomic_add_return(int i,atomic_t * v)97 atomic_add_return(int i, atomic_t *v)
98 {
99 	return i + atomic_fetchadd_int(v, i);
100 }
101 
102 static inline int
atomic_sub_return(int i,atomic_t * v)103 atomic_sub_return(int i, atomic_t *v)
104 {
105 	return atomic_fetchadd_int(v, -i) - i;
106 }
107 
108 static inline int
atomic_cmpxchg(atomic_t * v,int oldv,int newv)109 atomic_cmpxchg(atomic_t *v, int oldv, int newv)
110 {
111 	if (atomic_cmpset_rel_int(v, oldv, newv))
112 		return newv;
113 	else
114 		return *v;
115 }
116 
117 static inline int
atomic_xchg(atomic_t * v,int newv)118 atomic_xchg(atomic_t *v, int newv)
119 {
120 	int oldv;
121 	if (newv == 0)
122 		return atomic_readandclear_int(v);
123 	else {
124 		do {
125 			oldv = atomic_load_acq_int(v);
126 		} while (!atomic_cmpset_rel_int(v, oldv, newv));
127 	}
128 
129 	return (oldv);
130 }
131 
132 /*
133  * Spinlock API
134  */
135 typedef struct mtx spinlock_t;
136 
137 #define DEFINE_SPINLOCK(name)				\
138 	struct mtx name
139 #define spin_lock_init(lock)	mtx_init(lock, "VCHI spinlock " # lock, NULL, MTX_DEF)
140 #define spin_lock_destroy(lock)	mtx_destroy(lock)
141 #define spin_lock(lock)		mtx_lock(lock)
142 #define spin_unlock(lock)	mtx_unlock(lock)
143 #define spin_lock_bh(lock)	spin_lock(lock)
144 #define spin_unlock_bh(lock)	spin_unlock(lock)
145 
146 /*
147  * Mutex API
148  */
149 struct mutex {
150 	struct sx	mtx;
151 };
152 
153 #define	lmutex_init(lock)	sx_init(&(lock)->mtx, #lock)
154 #define lmutex_lock(lock)	sx_xlock(&(lock)->mtx)
155 #define	lmutex_unlock(lock)	sx_unlock(&(lock)->mtx)
156 #define	lmutex_destroy(lock)	sx_destroy(&(lock)->mtx)
157 
158 #define lmutex_lock_interruptible(lock)	sx_xlock_sig(&(lock)->mtx)
159 
160 /*
161  * Rwlock API
162  */
163 typedef struct rwlock rwlock_t;
164 
165 #define DEFINE_RWLOCK(name)				\
166 	struct rwlock name;					\
167 	SX_SYSINIT(name, &name, #name)
168 #define rwlock_init(rwlock)	rw_init(rwlock, "VCHI rwlock")
169 #define read_lock(rwlock)	rw_rlock(rwlock)
170 #define read_unlock(rwlock)	rw_unlock(rwlock)
171 
172 #define write_lock(rwlock)	rw_wlock(rwlock)
173 #define write_unlock(rwlock)	rw_unlock(rwlock)
174 #define write_lock_irqsave(rwlock, flags)		\
175 	do {						\
176 		rw_wlock(rwlock);			\
177 		(void) &(flags);			\
178 	} while (0)
179 #define write_unlock_irqrestore(rwlock, flags)		\
180 	rw_unlock(rwlock)
181 
182 #define read_lock_bh(rwlock)	rw_rlock(rwlock)
183 #define read_unlock_bh(rwlock)	rw_unlock(rwlock)
184 #define write_lock_bh(rwlock)	rw_wlock(rwlock)
185 #define write_unlock_bh(rwlock)	rw_unlock(rwlock)
186 
187 /*
188  * Timer API
189  */
190 struct timer_list {
191 	struct mtx mtx;
192 	struct callout callout;
193 
194 	unsigned long expires;
195 	void (*function)(unsigned long);
196 	unsigned long data;
197 };
198 
199 void vchiq_init_timer(struct timer_list *t);
200 void vchiq_setup_timer(struct timer_list *t, void (*function)(unsigned long), unsigned long data);
201 void vchiq_mod_timer(struct timer_list *t, unsigned long expires);
202 void vchiq_add_timer(struct timer_list *t);
203 int vchiq_del_timer(struct timer_list *t);
204 int vchiq_del_timer_sync(struct timer_list *t);
205 
206 /*
207  * Completion API
208  */
209 struct completion {
210 	struct cv cv;
211 	struct mtx lock;
212 	int done;
213 };
214 
215 void init_completion(struct completion *c);
216 void destroy_completion(struct completion *c);
217 int try_wait_for_completion(struct completion *);
218 int wait_for_completion_interruptible(struct completion *);
219 int wait_for_completion_interruptible_timeout(struct completion *, unsigned long ticks);
220 int wait_for_completion_killable(struct completion *);
221 void wait_for_completion(struct completion *c);
222 void complete(struct completion *c);
223 void complete_all(struct completion *c);
224 void INIT_COMPLETION_locked(struct completion *c);
225 
226 #define	INIT_COMPLETION(x)	INIT_COMPLETION_locked(&(x))
227 
228 /*
229  * Semaphore API
230  */
231 struct semaphore {
232 	struct mtx	mtx;
233 	struct cv	cv;
234 	int		value;
235 	int		waiters;
236 };
237 
238 #define	DEFINE_SEMAPHORE(name)		\
239 	struct semaphore name;		\
240 	SYSINIT(name##_sema_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
241 	    sema_sysinit, &name);					\
242 	SYSUNINIT(name##_sema_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
243 	    _sema_destroy, __DEVOLATILE(void *, &(name)))
244 
245 void sema_sysinit(void *arg);
246 void _sema_init(struct semaphore *s, int value);
247 void _sema_destroy(struct semaphore *s);
248 void down(struct semaphore *s);
249 int down_interruptible(struct semaphore *s);
250 int down_trylock(struct semaphore *s);
251 void up(struct semaphore *s);
252 
253 /*
254  * Logging and assertions API
255  */
256 void rlprintf(int pps, const char *fmt, ...)
257 	__printflike(2, 3);
258 
259 void
260 device_rlprintf(int pps, device_t dev, const char *fmt, ...)
261 	__printflike(3, 4);
262 
263 #define might_sleep()
264 
265 #define WARN(condition, msg)				\
266 ({							\
267 	int __ret_warn_on = !!(condition);		\
268 	if (unlikely(__ret_warn_on))			\
269 		printf((msg));				\
270 	unlikely(__ret_warn_on);			\
271 })
272 
273 
274 
275 #define WARN_ON(condition)				\
276 ({							\
277 	int __ret_warn_on = !!(condition);		\
278 	if (unlikely(__ret_warn_on))			\
279 		printf("WARN_ON: " #condition "\n");	\
280 	unlikely(__ret_warn_on);			\
281 })
282 
283 #define WARN_ON_ONCE(condition) ({			\
284 	static int __warned;				\
285 	int __ret_warn_once = !!(condition);		\
286 							\
287 	if (unlikely(__ret_warn_once))			\
288 		if (WARN_ON(!__warned))			\
289 			__warned = 1;			\
290 	unlikely(__ret_warn_once);			\
291 })
292 
293 #define BUG_ON(cond)					\
294 	do {						\
295 		if (cond)				\
296 			panic("BUG_ON: " #cond);	\
297 	} while (0)
298 
299 #define BUG()						\
300 	do {						\
301 		panic("BUG: %s:%d", __FILE__, __LINE__);	\
302 	} while (0)
303 
304 #define vchiq_static_assert(cond) CTASSERT(cond)
305 
306 #define KERN_EMERG	"<0>"	/* system is unusable			*/
307 #define KERN_ALERT	"<1>"	/* action must be taken immediately	*/
308 #define KERN_CRIT	"<2>"	/* critical conditions			*/
309 #define KERN_ERR	"<3>"	/* error conditions			*/
310 #define KERN_WARNING	"<4>"	/* warning conditions			*/
311 #define KERN_NOTICE	"<5>"	/* normal but significant condition	*/
312 #define KERN_INFO	"<6>"	/* informational			*/
313 #define KERN_DEBUG	"<7>"	/* debug-level messages			*/
314 #define KERN_CONT	""
315 
316 #define printk(fmt, args...)		printf(fmt, ##args)
317 #define vprintk(fmt, args)		vprintf(fmt, args)
318 
319 /*
320  * Malloc API
321  */
322 #define GFP_KERNEL	0
323 #define GFP_ATOMIC	0
324 
325 MALLOC_DECLARE(M_VCHI);
326 
327 #define kmalloc(size, flags)	malloc((size), M_VCHI, M_NOWAIT | M_ZERO)
328 #define kcalloc(n, size, flags)	mallocarray((n), (size), M_VCHI, \
329 				    M_NOWAIT | M_ZERO)
330 #define kzalloc(a, b)		kcalloc(1, (a), (b))
331 #define kfree(p)		free(p, M_VCHI)
332 
333 /*
334  * Kernel module API
335  */
336 #define __init
337 #define __exit
338 #define __devinit
339 #define __devexit
340 #define __devinitdata
341 
342 /*
343  * Time API
344  */
345 #if 1
346 /* emulate jiffies */
347 static inline unsigned long
_jiffies(void)348 _jiffies(void)
349 {
350 	struct timeval tv;
351 
352 	microuptime(&tv);
353 	return tvtohz(&tv);
354 }
355 
356 static inline unsigned long
msecs_to_jiffies(unsigned long msecs)357 msecs_to_jiffies(unsigned long msecs)
358 {
359 	struct timeval tv;
360 
361 	tv.tv_sec = msecs / 1000000UL;
362 	tv.tv_usec = msecs % 1000000UL;
363 	return tvtohz(&tv);
364 }
365 
366 #define jiffies			_jiffies()
367 #else
368 #define jiffies			ticks
369 #endif
370 #define HZ			hz
371 
372 #define udelay(usec)		DELAY(usec)
373 #define mdelay(msec)		DELAY((msec) * 1000)
374 
375 #define schedule_timeout(jiff)	pause("dhdslp", jiff)
376 
377 #if defined(msleep)
378 #undef msleep
379 #endif
380 #define msleep(msec)		mdelay(msec)
381 
382 #define time_after(a, b)	((a) > (b))
383 #define time_after_eq(a, b)	((a) >= (b))
384 #define time_before(a, b)	time_after((b), (a))
385 
386 /*
387  * kthread API (we use proc)
388  */
389 typedef struct proc * VCHIQ_THREAD_T;
390 
391 VCHIQ_THREAD_T vchiq_thread_create(int (*threadfn)(void *data),
392                                    void *data,
393                                    const char namefmt[], ...);
394 void set_user_nice(VCHIQ_THREAD_T p, int nice);
395 void wake_up_process(VCHIQ_THREAD_T p);
396 
397 /*
398  * Proc APIs
399  */
400 void flush_signals(VCHIQ_THREAD_T);
401 int fatal_signal_pending(VCHIQ_THREAD_T);
402 
403 /*
404  * mbox API
405  */
406 void bcm_mbox_write(int channel, uint32_t data);
407 
408 /*
409  * Misc API
410  */
411 
412 #define ENODATA EINVAL
413 
414 #define __user
415 
416 #define likely(x)		__builtin_expect(!!(x), 1)
417 #define unlikely(x)		__builtin_expect(!!(x), 0)
418 #define	current			curproc
419 #define EXPORT_SYMBOL(x)
420 #define PAGE_ALIGN(addr)	round_page(addr)
421 
422 typedef	void	irqreturn_t;
423 typedef	off_t	loff_t;
424 
425 #define BCM2835_MBOX_CHAN_VCHIQ	3
426 
427 #define smp_mb	wmb
428 #define smp_rmb	rmb
429 #define smp_wmb	wmb
430 
431 #define device_print_prettyname(dev)	device_printf((dev), "")
432 
433 #endif /* __VCHI_BSD_H__ */
434