xref: /freebsd/sys/netinet/sctp_lock_bsd.h (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #ifndef _NETINET_SCTP_LOCK_BSD_H_
37 #define _NETINET_SCTP_LOCK_BSD_H_
38 
39 /*
40  * General locking concepts: The goal of our locking is to of course provide
41  * consistency and yet minimize overhead. We will attempt to use
42  * non-recursive locks which are supposed to be quite inexpensive. Now in
43  * order to do this the goal is that most functions are not aware of locking.
44  * Once we have a TCB we lock it and unlock when we are through. This means
45  * that the TCB lock is kind-of a "global" lock when working on an
46  * association. Caution must be used when asserting a TCB_LOCK since if we
47  * recurse we deadlock.
48  *
49  * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
50  * to contain the lock and unlock within the function that needs to lock it.
51  * This sometimes mean we do extra locks and unlocks and lose a bit of
52  * efficency, but if the performance statements about non-recursive locks are
53  * true this should not be a problem.  One issue that arises with this only
54  * lock when needed is that if an implicit association setup is done we have
55  * a problem. If at the time I lookup an association I have NULL in the tcb
56  * return, by the time I call to create the association some other processor
57  * could have created it. This is what the CREATE lock on the endpoint.
58  * Places where we will be implicitly creating the association OR just
59  * creating an association (the connect call) will assert the CREATE_INP
60  * lock. This will assure us that during all the lookup of INP and INFO if
61  * another creator is also locking/looking up we can gate the two to
62  * synchronize. So the CREATE_INP lock is also another one we must use
63  * extreme caution in locking to make sure we don't hit a re-entrancy issue.
64  *
65  * For non FreeBSD 5.x we provide a bunch of EMPTY lock macros so we can
66  * blatantly put locks everywhere and they reduce to nothing on
67  * NetBSD/OpenBSD and FreeBSD 4.x
68  *
69  */
70 
71 /*
72  * When working with the global SCTP lists we lock and unlock the INP_INFO
73  * lock. So when we go to lookup an association we will want to do a
74  * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
75  * the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
76  */
77 
78 extern struct sctp_foo_stuff sctp_logoff[];
79 extern int sctp_logoff_stuff;
80 
81 #define SCTP_IPI_COUNT_INIT()
82 
83 #define SCTP_STATLOG_INIT_LOCK()
84 #define SCTP_STATLOG_LOCK()
85 #define SCTP_STATLOG_UNLOCK()
86 #define SCTP_STATLOG_DESTROY()
87 
88 #define SCTP_INP_INFO_LOCK_DESTROY() do { \
89         if(rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) { \
90              rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
91         } \
92         rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx)); \
93       }  while (0)
94 
95 #define SCTP_INP_INFO_LOCK_INIT() \
96         rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info");
97 
98 
99 #define SCTP_INP_INFO_RLOCK()	do { 					\
100              rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx));                         \
101 } while (0)
102 
103 #define SCTP_MCORE_QLOCK_INIT(cpstr) do { \
104 		mtx_init(&(cpstr)->que_mtx,	      \
105 			 "sctp-mcore_queue","queue_lock",	\
106 			 MTX_DEF|MTX_DUPOK);		\
107 } while (0)
108 
109 #define SCTP_MCORE_QLOCK(cpstr)  do { \
110 		mtx_lock(&(cpstr)->que_mtx);	\
111 } while (0)
112 
113 #define SCTP_MCORE_QUNLOCK(cpstr)  do { \
114 		mtx_unlock(&(cpstr)->que_mtx);	\
115 } while (0)
116 
117 #define SCTP_MCORE_QDESTROY(cpstr)  do { \
118 	if(mtx_owned(&(cpstr)->core_mtx)) {	\
119 		mtx_unlock(&(cpstr)->que_mtx);	\
120         } \
121 	mtx_destroy(&(cpstr)->que_mtx);	\
122 } while (0)
123 
124 
125 #define SCTP_MCORE_LOCK_INIT(cpstr) do { \
126 		mtx_init(&(cpstr)->core_mtx,	      \
127 			 "sctp-cpulck","cpu_proc_lock",	\
128 			 MTX_DEF|MTX_DUPOK);		\
129 } while (0)
130 
131 #define SCTP_MCORE_LOCK(cpstr)  do { \
132 		mtx_lock(&(cpstr)->core_mtx);	\
133 } while (0)
134 
135 #define SCTP_MCORE_UNLOCK(cpstr)  do { \
136 		mtx_unlock(&(cpstr)->core_mtx);	\
137 } while (0)
138 
139 #define SCTP_MCORE_DESTROY(cpstr)  do { \
140 	if(mtx_owned(&(cpstr)->core_mtx)) {	\
141 		mtx_unlock(&(cpstr)->core_mtx);	\
142         } \
143 	mtx_destroy(&(cpstr)->core_mtx);	\
144 } while (0)
145 
146 #define SCTP_INP_INFO_WLOCK()	do { 					\
147             rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx));                         \
148 } while (0)
149 
150 
151 #define SCTP_INP_INFO_RUNLOCK()		rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx))
152 #define SCTP_INP_INFO_WUNLOCK()		rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx))
153 
154 
155 #define SCTP_IPI_ADDR_INIT()								\
156         rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr")
157 #define SCTP_IPI_ADDR_DESTROY() do  { \
158         if(rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) { \
159              rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
160         } \
161 	rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx)); \
162       }  while (0)
163 #define SCTP_IPI_ADDR_RLOCK()	do { 					\
164              rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx));                         \
165 } while (0)
166 #define SCTP_IPI_ADDR_WLOCK()	do { 					\
167              rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx));                         \
168 } while (0)
169 
170 #define SCTP_IPI_ADDR_RUNLOCK()		rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx))
171 #define SCTP_IPI_ADDR_WUNLOCK()		rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx))
172 
173 
174 #define SCTP_IPI_ITERATOR_WQ_INIT() \
175         mtx_init(&sctp_it_ctl.ipi_iterator_wq_mtx, "sctp-it-wq", "sctp_it_wq", MTX_DEF)
176 
177 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
178 	mtx_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
179 
180 #define SCTP_IPI_ITERATOR_WQ_LOCK()	do { 					\
181              mtx_lock(&sctp_it_ctl.ipi_iterator_wq_mtx);                \
182 } while (0)
183 
184 #define SCTP_IPI_ITERATOR_WQ_UNLOCK()		mtx_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
185 
186 
187 #define SCTP_IP_PKTLOG_INIT() \
188         mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog", "packetlog", MTX_DEF)
189 
190 
191 #define SCTP_IP_PKTLOG_LOCK()	do { 			\
192              mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx));     \
193 } while (0)
194 
195 #define SCTP_IP_PKTLOG_UNLOCK()	mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
196 
197 #define SCTP_IP_PKTLOG_DESTROY() \
198 	mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
199 
200 
201 
202 
203 
204 /*
205  * The INP locks we will use for locking an SCTP endpoint, so for example if
206  * we want to change something at the endpoint level for example random_store
207  * or cookie secrets we lock the INP level.
208  */
209 
210 #define SCTP_INP_READ_INIT(_inp) \
211 	mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr", MTX_DEF | MTX_DUPOK)
212 
213 #define SCTP_INP_READ_DESTROY(_inp) \
214 	mtx_destroy(&(_inp)->inp_rdata_mtx)
215 
216 #define SCTP_INP_READ_LOCK(_inp)	do { \
217         mtx_lock(&(_inp)->inp_rdata_mtx);    \
218 } while (0)
219 
220 
221 #define SCTP_INP_READ_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_rdata_mtx)
222 
223 
224 #define SCTP_INP_LOCK_INIT(_inp) \
225 	mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp", MTX_DEF | MTX_DUPOK)
226 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
227 	mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create", \
228 		 MTX_DEF | MTX_DUPOK)
229 
230 #define SCTP_INP_LOCK_DESTROY(_inp) \
231 	mtx_destroy(&(_inp)->inp_mtx)
232 
233 #define SCTP_INP_LOCK_CONTENDED(_inp) ((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
234 
235 #define SCTP_INP_READ_CONTENDED(_inp) ((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
236 
237 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) ((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
238 
239 
240 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
241 	mtx_destroy(&(_inp)->inp_create_mtx)
242 
243 
244 #ifdef SCTP_LOCK_LOGGING
245 #define SCTP_INP_RLOCK(_inp)	do { 					\
246 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
247         mtx_lock(&(_inp)->inp_mtx);                                     \
248 } while (0)
249 
250 #define SCTP_INP_WLOCK(_inp)	do { 					\
251 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
252         mtx_lock(&(_inp)->inp_mtx);                                     \
253 } while (0)
254 
255 #else
256 
257 #define SCTP_INP_RLOCK(_inp)	do { 					\
258         mtx_lock(&(_inp)->inp_mtx);                                     \
259 } while (0)
260 
261 #define SCTP_INP_WLOCK(_inp)	do { 					\
262         mtx_lock(&(_inp)->inp_mtx);                                     \
263 } while (0)
264 
265 #endif
266 
267 
268 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
269 	mtx_init(&(_tcb)->tcb_send_mtx, "sctp-send-tcb", "tcbs", MTX_DEF | MTX_DUPOK)
270 
271 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_send_mtx)
272 
273 #define SCTP_TCB_SEND_LOCK(_tcb)  do { \
274 	mtx_lock(&(_tcb)->tcb_send_mtx); \
275 } while (0)
276 
277 #define SCTP_TCB_SEND_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_send_mtx)
278 
279 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
280 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
281 
282 
283 #ifdef SCTP_LOCK_LOGGING
284 #define SCTP_ASOC_CREATE_LOCK(_inp) \
285 	do {								\
286 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
287 		mtx_lock(&(_inp)->inp_create_mtx);			\
288 	} while (0)
289 #else
290 
291 #define SCTP_ASOC_CREATE_LOCK(_inp) \
292 	do {								\
293 		mtx_lock(&(_inp)->inp_create_mtx);			\
294 	} while (0)
295 #endif
296 
297 #define SCTP_INP_RUNLOCK(_inp)		mtx_unlock(&(_inp)->inp_mtx)
298 #define SCTP_INP_WUNLOCK(_inp)		mtx_unlock(&(_inp)->inp_mtx)
299 #define SCTP_ASOC_CREATE_UNLOCK(_inp)	mtx_unlock(&(_inp)->inp_create_mtx)
300 
301 /*
302  * For the majority of things (once we have found the association) we will
303  * lock the actual association mutex. This will protect all the assoiciation
304  * level queues and streams and such. We will need to lock the socket layer
305  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
306  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
307  */
308 
309 #define SCTP_TCB_LOCK_INIT(_tcb) \
310 	mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb", MTX_DEF | MTX_DUPOK)
311 
312 #define SCTP_TCB_LOCK_DESTROY(_tcb)	mtx_destroy(&(_tcb)->tcb_mtx)
313 
314 #ifdef SCTP_LOCK_LOGGING
315 #define SCTP_TCB_LOCK(_tcb)  do {					\
316 	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)  sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);          \
317 	mtx_lock(&(_tcb)->tcb_mtx);                                     \
318 } while (0)
319 
320 #else
321 #define SCTP_TCB_LOCK(_tcb)  do {					\
322 	mtx_lock(&(_tcb)->tcb_mtx);                                     \
323 } while (0)
324 
325 #endif
326 
327 
328 #define SCTP_TCB_TRYLOCK(_tcb) 	mtx_trylock(&(_tcb)->tcb_mtx)
329 
330 #define SCTP_TCB_UNLOCK(_tcb)		mtx_unlock(&(_tcb)->tcb_mtx)
331 
332 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)	      do { \
333                                                 if (mtx_owned(&(_tcb)->tcb_mtx)) \
334                                                      mtx_unlock(&(_tcb)->tcb_mtx); \
335                                               } while (0)
336 
337 
338 
339 #ifdef INVARIANTS
340 #define SCTP_TCB_LOCK_ASSERT(_tcb) do { \
341                             if (mtx_owned(&(_tcb)->tcb_mtx) == 0) \
342                                 panic("Don't own TCB lock"); \
343                             } while (0)
344 #else
345 #define SCTP_TCB_LOCK_ASSERT(_tcb)
346 #endif
347 
348 #define SCTP_ITERATOR_LOCK_INIT() \
349         mtx_init(&sctp_it_ctl.it_mtx, "sctp-it", "iterator", MTX_DEF)
350 
351 #ifdef INVARIANTS
352 #define SCTP_ITERATOR_LOCK() \
353 	do {								\
354 		if (mtx_owned(&sctp_it_ctl.it_mtx))			\
355 			panic("Iterator Lock");				\
356 		mtx_lock(&sctp_it_ctl.it_mtx);				\
357 	} while (0)
358 #else
359 #define SCTP_ITERATOR_LOCK() \
360 	do {								\
361 		mtx_lock(&sctp_it_ctl.it_mtx);				\
362 	} while (0)
363 
364 #endif
365 
366 #define SCTP_ITERATOR_UNLOCK()	        mtx_unlock(&sctp_it_ctl.it_mtx)
367 #define SCTP_ITERATOR_LOCK_DESTROY()	mtx_destroy(&sctp_it_ctl.it_mtx)
368 
369 
370 #define SCTP_WQ_ADDR_INIT() do { \
371         mtx_init(&SCTP_BASE_INFO(wq_addr_mtx), "sctp-addr-wq","sctp_addr_wq",MTX_DEF); \
372  } while (0)
373 
374 #define SCTP_WQ_ADDR_DESTROY() do  { \
375         if(mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx))) { \
376              mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
377         } \
378 	    mtx_destroy(&SCTP_BASE_INFO(wq_addr_mtx)); \
379       }  while (0)
380 
381 #define SCTP_WQ_ADDR_LOCK()	do { \
382              mtx_lock(&SCTP_BASE_INFO(wq_addr_mtx));  \
383 } while (0)
384 #define SCTP_WQ_ADDR_UNLOCK() do { \
385 		mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
386 } while (0)
387 
388 
389 
390 #define SCTP_INCR_EP_COUNT() \
391                 do { \
392 		       atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
393 	        } while (0)
394 
395 #define SCTP_DECR_EP_COUNT() \
396                 do { \
397 		       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
398 	        } while (0)
399 
400 #define SCTP_INCR_ASOC_COUNT() \
401                 do { \
402 	               atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
403 	        } while (0)
404 
405 #define SCTP_DECR_ASOC_COUNT() \
406                 do { \
407 	               atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
408 	        } while (0)
409 
410 #define SCTP_INCR_LADDR_COUNT() \
411                 do { \
412 	               atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
413 	        } while (0)
414 
415 #define SCTP_DECR_LADDR_COUNT() \
416                 do { \
417 	               atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
418 	        } while (0)
419 
420 #define SCTP_INCR_RADDR_COUNT() \
421                 do { \
422  	               atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
423 	        } while (0)
424 
425 #define SCTP_DECR_RADDR_COUNT() \
426                 do { \
427  	               atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1); \
428 	        } while (0)
429 
430 #define SCTP_INCR_CHK_COUNT() \
431                 do { \
432   	               atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
433 	        } while (0)
434 #ifdef INVARIANTS
435 #define SCTP_DECR_CHK_COUNT() \
436                 do { \
437                        if(SCTP_BASE_INFO(ipi_count_chunk) == 0) \
438                              panic("chunk count to 0?");    \
439   	               atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
440 	        } while (0)
441 #else
442 #define SCTP_DECR_CHK_COUNT() \
443                 do { \
444                        if(SCTP_BASE_INFO(ipi_count_chunk) != 0) \
445   	               atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
446 	        } while (0)
447 #endif
448 #define SCTP_INCR_READQ_COUNT() \
449                 do { \
450 		       atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq),1); \
451 	        } while (0)
452 
453 #define SCTP_DECR_READQ_COUNT() \
454                 do { \
455 		       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
456 	        } while (0)
457 
458 #define SCTP_INCR_STRMOQ_COUNT() \
459                 do { \
460 		       atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
461 	        } while (0)
462 
463 #define SCTP_DECR_STRMOQ_COUNT() \
464                 do { \
465 		       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
466 	        } while (0)
467 
468 
469 #if defined(SCTP_SO_LOCK_TESTING)
470 #define SCTP_INP_SO(sctpinp)	(sctpinp)->ip_inp.inp.inp_socket
471 #define SCTP_SOCKET_LOCK(so, refcnt)
472 #define SCTP_SOCKET_UNLOCK(so, refcnt)
473 #endif
474 
475 #endif
476