xref: /freebsd/sys/compat/linuxkpi/common/include/linux/spinlock.h (revision b4c3e9b5b09c829b4135aff738bd2893ed052377)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_SPINLOCK_H_
30 #define	_LINUXKPI_LINUX_SPINLOCK_H_
31 
32 #include <asm/atomic.h>
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/kdb.h>
38 
39 #include <linux/cleanup.h>
40 #include <linux/compiler.h>
41 #include <linux/rwlock.h>
42 #include <linux/bottom_half.h>
43 #include <linux/lockdep.h>
44 
45 typedef struct mtx spinlock_t;
46 
47 /*
48  * By defining CONFIG_SPIN_SKIP LinuxKPI spinlocks and asserts will be
49  * skipped during panic(). By default it is disabled due to
50  * performance reasons.
51  */
52 #ifdef CONFIG_SPIN_SKIP
53 #define	SPIN_SKIP(void)	unlikely(SCHEDULER_STOPPED() || kdb_active)
54 #else
55 #define	SPIN_SKIP(void) 0
56 #endif
57 
58 #define	spin_lock(_l) do {			\
59 	if (SPIN_SKIP())			\
60 		break;				\
61 	mtx_lock(_l);				\
62 	local_bh_disable();			\
63 } while (0)
64 
65 #define	spin_lock_bh(_l) do {			\
66 	spin_lock(_l);				\
67 	local_bh_disable();			\
68 } while (0)
69 
70 #define	spin_lock_irq(_l) do {			\
71 	spin_lock(_l);				\
72 } while (0)
73 
74 #define	spin_unlock(_l)	do {			\
75 	if (SPIN_SKIP())			\
76 		break;				\
77 	local_bh_enable();			\
78 	mtx_unlock(_l);				\
79 } while (0)
80 
81 #define	spin_unlock_bh(_l) do {			\
82 	local_bh_enable();			\
83 	spin_unlock(_l);			\
84 } while (0)
85 
86 #define	spin_unlock_irq(_l) do {		\
87 	spin_unlock(_l);			\
88 } while (0)
89 
90 #define	spin_trylock(_l) ({			\
91 	int __ret;				\
92 	if (SPIN_SKIP()) {			\
93 		__ret = 1;			\
94 	} else {				\
95 		__ret = mtx_trylock(_l);	\
96 		if (likely(__ret != 0))		\
97 			local_bh_disable();	\
98 	}					\
99 	__ret;					\
100 })
101 
102 #define	spin_trylock_irq(_l)			\
103 	spin_trylock(_l)
104 
105 #define	spin_trylock_irqsave(_l, flags) ({	\
106 	(flags) = 0;				\
107 	spin_trylock(_l);			\
108 })
109 
110 #define	spin_lock_nested(_l, _n) do {		\
111 	if (SPIN_SKIP())			\
112 		break;				\
113 	mtx_lock_flags(_l, MTX_DUPOK);		\
114 	local_bh_disable();			\
115 } while (0)
116 
117 #define	spin_lock_irqsave(_l, flags) do {	\
118 	(flags) = 0;				\
119 	spin_lock(_l);				\
120 } while (0)
121 
122 #define	spin_lock_irqsave_nested(_l, flags, _n) do {	\
123 	(flags) = 0;					\
124 	spin_lock_nested(_l, _n);			\
125 } while (0)
126 
127 #define	spin_unlock_irqrestore(_l, flags) do {		\
128 	(void)(flags);					\
129 	spin_unlock(_l);				\
130 } while (0)
131 
132 #ifdef WITNESS_ALL
133 /* NOTE: the maximum WITNESS name is 64 chars */
134 #define	__spin_lock_name(name, file, line)		\
135 	(((const char *){file ":" #line "-" name}) +	\
136 	(sizeof(file) > 16 ? sizeof(file) - 16 : 0))
137 #else
138 #define	__spin_lock_name(name, file, line)	name
139 #endif
140 #define	_spin_lock_name(...)		__spin_lock_name(__VA_ARGS__)
141 #define	spin_lock_name(name)		_spin_lock_name(name, __FILE__, __LINE__)
142 
143 #define	spin_lock_init(lock)	mtx_init(lock, spin_lock_name("lnxspin"), \
144 				  NULL, MTX_DEF | MTX_NOWITNESS | MTX_NEW)
145 
146 #define	spin_lock_destroy(_l)	mtx_destroy(_l)
147 
148 #define	DEFINE_SPINLOCK(lock)					\
149 	spinlock_t lock;					\
150 	MTX_SYSINIT(lock, &lock, spin_lock_name("lnxspin"), MTX_DEF)
151 
152 #define	assert_spin_locked(_l) do {		\
153 	if (SPIN_SKIP())			\
154 		break;				\
155 	mtx_assert(_l, MA_OWNED);		\
156 } while (0)
157 
158 #define	local_irq_save(flags) do {		\
159 	(flags) = 0;				\
160 } while (0)
161 
162 #define	local_irq_restore(flags) do {		\
163 	(void)(flags);				\
164 } while (0)
165 
166 #define	atomic_dec_and_lock_irqsave(cnt, lock, flags) \
167 	_atomic_dec_and_lock_irqsave(cnt, lock, &(flags))
168 static inline int
169 _atomic_dec_and_lock_irqsave(atomic_t *cnt, spinlock_t *lock,
170     unsigned long *flags)
171 {
172 	if (atomic_add_unless(cnt, -1, 1))
173 		return (0);
174 
175 	spin_lock_irqsave(lock, *flags);
176 	if (atomic_dec_and_test(cnt))
177 		return (1);
178 	spin_unlock_irqrestore(lock, *flags);
179 	return (0);
180 }
181 
182 /*
183  * struct raw_spinlock
184  */
185 
186 typedef struct raw_spinlock {
187 	struct mtx	lock;
188 } raw_spinlock_t;
189 
190 #define	raw_spin_lock_init(rlock) \
191 	mtx_init(&(rlock)->lock, spin_lock_name("lnxspin_raw"), \
192 	    NULL, MTX_DEF | MTX_NOWITNESS | MTX_NEW)
193 
194 #define	raw_spin_lock(rl)	spin_lock(&(rl)->lock)
195 #define	raw_spin_trylock(rl)	spin_trylock(&(rl)->lock)
196 #define	raw_spin_unlock(rl)	spin_unlock(&(rl)->lock)
197 
198 #define	raw_spin_lock_irqsave(rl, f)		spin_lock_irqsave(&(rl)->lock, (f))
199 #define	raw_spin_trylock_irqsave(rl, f)		spin_trylock_irqsave(&(rl)->lock, (f))
200 #define	raw_spin_unlock_irqrestore(rl, f)	spin_unlock_irqrestore(&(rl)->lock, (f))
201 
202 /*
203  * cleanup.h related pre-defined cases.
204  */
205 DEFINE_LOCK_GUARD_1(spinlock_irqsave,
206     spinlock_t,
207     spin_lock_irqsave(_T->lock, _T->flags),
208     spin_unlock_irqrestore(_T->lock, _T->flags),
209     unsigned long flags)
210 
211 #endif					/* _LINUXKPI_LINUX_SPINLOCK_H_ */
212