xref: /freebsd/sys/netinet/sctp_bsd_addr.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_pcb.h>
41 #include <netinet/sctp_header.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_bsd_addr.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctputil.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_sysctl.h>
50 #include <netinet/sctp_indata.h>
51 #include <sys/unistd.h>
52 
53 /* Declare all of our malloc named types */
54 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
55 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
56 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
57 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
58 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
59 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
60 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
61 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
62 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
63 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
64 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
65 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
66 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
67 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
68 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
69 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
70 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
71 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
72 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
73 MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
74 
75 /* Global NON-VNET structure that controls the iterator */
76 struct iterator_control sctp_it_ctl;
77 static int __sctp_thread_based_iterator_started = 0;
78 
79 
80 static void
81 sctp_cleanup_itqueue(void)
82 {
83 	struct sctp_iterator *it, *nit;
84 
85 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
86 		if (it->function_atend != NULL) {
87 			(*it->function_atend) (it->pointer, it->val);
88 		}
89 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
90 		SCTP_FREE(it, SCTP_M_ITER);
91 	}
92 }
93 
94 
95 void
96 sctp_wakeup_iterator(void)
97 {
98 	wakeup(&sctp_it_ctl.iterator_running);
99 }
100 
101 static void
102 sctp_iterator_thread(void *v)
103 {
104 	SCTP_IPI_ITERATOR_WQ_LOCK();
105 	while (1) {
106 		msleep(&sctp_it_ctl.iterator_running,
107 		    &sctp_it_ctl.ipi_iterator_wq_mtx,
108 		    0, "waiting_for_work", 0);
109 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
110 			SCTP_IPI_ITERATOR_WQ_DESTROY();
111 			SCTP_ITERATOR_LOCK_DESTROY();
112 			sctp_cleanup_itqueue();
113 			__sctp_thread_based_iterator_started = 0;
114 			kthread_exit();
115 		}
116 		sctp_iterator_worker();
117 	}
118 }
119 
120 void
121 sctp_startup_iterator(void)
122 {
123 	if (__sctp_thread_based_iterator_started) {
124 		/* You only get one */
125 		return;
126 	}
127 	/* init the iterator head */
128 	__sctp_thread_based_iterator_started = 1;
129 	sctp_it_ctl.iterator_running = 0;
130 	sctp_it_ctl.iterator_flags = 0;
131 	sctp_it_ctl.cur_it = NULL;
132 	SCTP_ITERATOR_LOCK_INIT();
133 	SCTP_IPI_ITERATOR_WQ_INIT();
134 	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
135 
136 	int ret;
137 
138 	ret = kproc_create(sctp_iterator_thread,
139 	    (void *)NULL,
140 	    &sctp_it_ctl.thread_proc,
141 	    RFPROC,
142 	    SCTP_KTHREAD_PAGES,
143 	    SCTP_KTRHEAD_NAME);
144 }
145 
146 #ifdef INET6
147 
148 void
149 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
150 {
151 	struct in6_ifaddr *ifa6;
152 
153 	ifa6 = (struct in6_ifaddr *)ifa->ifa;
154 	ifa->flags = ifa6->ia6_flags;
155 	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
156 		if (ifa->flags &
157 		    IN6_IFF_DEPRECATED) {
158 			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
159 		} else {
160 			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
161 		}
162 	} else {
163 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
164 	}
165 	if (ifa->flags &
166 	    (IN6_IFF_DETACHED |
167 	    IN6_IFF_ANYCAST |
168 	    IN6_IFF_NOTREADY)) {
169 		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
170 	} else {
171 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
172 	}
173 }
174 
175 #endif				/* INET6 */
176 
177 
178 static uint32_t
179 sctp_is_desired_interface_type(struct ifaddr *ifa)
180 {
181 	int result;
182 
183 	/* check the interface type to see if it's one we care about */
184 	switch (ifa->ifa_ifp->if_type) {
185 	case IFT_ETHER:
186 	case IFT_ISO88023:
187 	case IFT_ISO88024:
188 	case IFT_ISO88025:
189 	case IFT_ISO88026:
190 	case IFT_STARLAN:
191 	case IFT_P10:
192 	case IFT_P80:
193 	case IFT_HY:
194 	case IFT_FDDI:
195 	case IFT_XETHER:
196 	case IFT_ISDNBASIC:
197 	case IFT_ISDNPRIMARY:
198 	case IFT_PTPSERIAL:
199 	case IFT_OTHER:
200 	case IFT_PPP:
201 	case IFT_LOOP:
202 	case IFT_SLIP:
203 	case IFT_GIF:
204 	case IFT_L2VLAN:
205 	case IFT_IP:
206 	case IFT_IPOVERCDLC:
207 	case IFT_IPOVERCLAW:
208 	case IFT_VIRTUALIPADDRESS:
209 		result = 1;
210 		break;
211 	default:
212 		result = 0;
213 	}
214 
215 	return (result);
216 }
217 
218 
219 
220 
221 static void
222 sctp_init_ifns_for_vrf(int vrfid)
223 {
224 	/*
225 	 * Here we must apply ANY locks needed by the IFN we access and also
226 	 * make sure we lock any IFA that exists as we float through the
227 	 * list of IFA's
228 	 */
229 	struct ifnet *ifn;
230 	struct ifaddr *ifa;
231 	struct sctp_ifa *sctp_ifa;
232 	uint32_t ifa_flags;
233 
234 #ifdef INET6
235 	struct in6_ifaddr *ifa6;
236 
237 #endif
238 
239 	IFNET_RLOCK();
240 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
241 		IF_ADDR_LOCK(ifn);
242 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
243 			if (ifa->ifa_addr == NULL) {
244 				continue;
245 			}
246 			switch (ifa->ifa_addr->sa_family) {
247 #ifdef INET
248 			case AF_INET:
249 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
250 					continue;
251 				}
252 				break;
253 #endif
254 #ifdef INET6
255 			case AF_INET6:
256 				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
257 					/* skip unspecifed addresses */
258 					continue;
259 				}
260 				break;
261 #endif
262 			default:
263 				continue;
264 			}
265 			if (sctp_is_desired_interface_type(ifa) == 0) {
266 				/* non desired type */
267 				continue;
268 			}
269 			switch (ifa->ifa_addr->sa_family) {
270 #ifdef INET
271 			case AF_INET:
272 				ifa_flags = 0;
273 				break;
274 #endif
275 #ifdef INET6
276 			case AF_INET6:
277 				ifa6 = (struct in6_ifaddr *)ifa;
278 				ifa_flags = ifa6->ia6_flags;
279 				break;
280 #endif
281 			default:
282 				ifa_flags = 0;
283 				break;
284 			}
285 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
286 			    (void *)ifn,
287 			    ifn->if_index,
288 			    ifn->if_type,
289 			    ifn->if_xname,
290 			    (void *)ifa,
291 			    ifa->ifa_addr,
292 			    ifa_flags,
293 			    0);
294 			if (sctp_ifa) {
295 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
296 			}
297 		}
298 		IF_ADDR_UNLOCK(ifn);
299 	}
300 	IFNET_RUNLOCK();
301 }
302 
303 void
304 sctp_init_vrf_list(int vrfid)
305 {
306 	if (vrfid > SCTP_MAX_VRF_ID)
307 		/* can't do that */
308 		return;
309 
310 	/* Don't care about return here */
311 	(void)sctp_allocate_vrf(vrfid);
312 
313 	/*
314 	 * Now we need to build all the ifn's for this vrf and there
315 	 * addresses
316 	 */
317 	sctp_init_ifns_for_vrf(vrfid);
318 }
319 
320 void
321 sctp_addr_change(struct ifaddr *ifa, int cmd)
322 {
323 	uint32_t ifa_flags = 0;
324 
325 	/*
326 	 * BSD only has one VRF, if this changes we will need to hook in the
327 	 * right things here to get the id to pass to the address managment
328 	 * routine.
329 	 */
330 	if (SCTP_BASE_VAR(first_time) == 0) {
331 		/* Special test to see if my ::1 will showup with this */
332 		SCTP_BASE_VAR(first_time) = 1;
333 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
334 	}
335 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
336 		/* don't know what to do with this */
337 		return;
338 	}
339 	if (ifa->ifa_addr == NULL) {
340 		return;
341 	}
342 	switch (ifa->ifa_addr->sa_family) {
343 #ifdef INET
344 	case AF_INET:
345 		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
346 			return;
347 		}
348 		break;
349 #endif
350 #ifdef INET6
351 	case AF_INET6:
352 		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
353 		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
354 			/* skip unspecifed addresses */
355 			return;
356 		}
357 		break;
358 #endif
359 	default:
360 		/* non inet/inet6 skip */
361 		return;
362 	}
363 
364 	if (sctp_is_desired_interface_type(ifa) == 0) {
365 		/* non desired type */
366 		return;
367 	}
368 	if (cmd == RTM_ADD) {
369 		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
370 		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type,
371 		    ifa->ifa_ifp->if_xname,
372 		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
373 	} else {
374 
375 		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
376 		    ifa->ifa_ifp->if_index,
377 		    ifa->ifa_ifp->if_xname
378 		    );
379 		/*
380 		 * We don't bump refcount here so when it completes the
381 		 * final delete will happen.
382 		 */
383 	}
384 }
385 
386 void
387      sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
388 	struct ifnet *ifn;
389 	struct ifaddr *ifa;
390 
391 	IFNET_RLOCK();
392 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
393 		if (!(*pred) (ifn)) {
394 			continue;
395 		}
396 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
397 			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
398 		}
399 	}
400 	IFNET_RUNLOCK();
401 }
402 
403 struct mbuf *
404 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
405     int how, int allonebuf, int type)
406 {
407 	struct mbuf *m = NULL;
408 
409 	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
410 	if (m == NULL) {
411 		/* bad, no memory */
412 		return (m);
413 	}
414 	if (allonebuf) {
415 		int siz;
416 
417 		if (SCTP_BUF_IS_EXTENDED(m)) {
418 			siz = SCTP_BUF_EXTEND_SIZE(m);
419 		} else {
420 			if (want_header)
421 				siz = MHLEN;
422 			else
423 				siz = MLEN;
424 		}
425 		if (siz < space_needed) {
426 			m_freem(m);
427 			return (NULL);
428 		}
429 	}
430 	if (SCTP_BUF_NEXT(m)) {
431 		sctp_m_freem(SCTP_BUF_NEXT(m));
432 		SCTP_BUF_NEXT(m) = NULL;
433 	}
434 #ifdef SCTP_MBUF_LOGGING
435 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
436 		if (SCTP_BUF_IS_EXTENDED(m)) {
437 			sctp_log_mb(m, SCTP_MBUF_IALLOC);
438 		}
439 	}
440 #endif
441 	return (m);
442 }
443 
444 
445 #ifdef SCTP_PACKET_LOGGING
446 void
447 sctp_packet_log(struct mbuf *m, int length)
448 {
449 	int *lenat, thisone;
450 	void *copyto;
451 	uint32_t *tick_tock;
452 	int total_len;
453 	int grabbed_lock = 0;
454 	int value, newval, thisend, thisbegin;
455 
456 	/*
457 	 * Buffer layout. -sizeof this entry (total_len) -previous end
458 	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
459 	 * where this started (thisbegin) x <--end points here
460 	 */
461 	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
462 	/* Log a packet to the buffer. */
463 	if (total_len > SCTP_PACKET_LOG_SIZE) {
464 		/* Can't log this packet I have not a buffer big enough */
465 		return;
466 	}
467 	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
468 		return;
469 	}
470 	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
471 try_again:
472 	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
473 		SCTP_IP_PKTLOG_LOCK();
474 		grabbed_lock = 1;
475 again_locked:
476 		value = SCTP_BASE_VAR(packet_log_end);
477 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
478 		if (newval >= SCTP_PACKET_LOG_SIZE) {
479 			/* we wrapped */
480 			thisbegin = 0;
481 			thisend = total_len;
482 		} else {
483 			thisbegin = SCTP_BASE_VAR(packet_log_end);
484 			thisend = newval;
485 		}
486 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
487 			goto again_locked;
488 		}
489 	} else {
490 		value = SCTP_BASE_VAR(packet_log_end);
491 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
492 		if (newval >= SCTP_PACKET_LOG_SIZE) {
493 			/* we wrapped */
494 			thisbegin = 0;
495 			thisend = total_len;
496 		} else {
497 			thisbegin = SCTP_BASE_VAR(packet_log_end);
498 			thisend = newval;
499 		}
500 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
501 			goto try_again;
502 		}
503 	}
504 	/* Sanity check */
505 	if (thisend >= SCTP_PACKET_LOG_SIZE) {
506 		printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
507 		    thisbegin,
508 		    thisend,
509 		    SCTP_BASE_VAR(packet_log_writers),
510 		    grabbed_lock,
511 		    SCTP_BASE_VAR(packet_log_end));
512 		SCTP_BASE_VAR(packet_log_end) = 0;
513 		goto no_log;
514 
515 	}
516 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
517 	*lenat = total_len;
518 	lenat++;
519 	*lenat = value;
520 	lenat++;
521 	tick_tock = (uint32_t *) lenat;
522 	lenat++;
523 	*tick_tock = sctp_get_tick_count();
524 	copyto = (void *)lenat;
525 	thisone = thisend - sizeof(int);
526 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
527 	*lenat = thisbegin;
528 	if (grabbed_lock) {
529 		SCTP_IP_PKTLOG_UNLOCK();
530 		grabbed_lock = 0;
531 	}
532 	m_copydata(m, 0, length, (caddr_t)copyto);
533 no_log:
534 	if (grabbed_lock) {
535 		SCTP_IP_PKTLOG_UNLOCK();
536 	}
537 	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
538 }
539 
540 
541 int
542 sctp_copy_out_packet_log(uint8_t * target, int length)
543 {
544 	/*
545 	 * We wind through the packet log starting at start copying up to
546 	 * length bytes out. We return the number of bytes copied.
547 	 */
548 	int tocopy, this_copy;
549 	int *lenat;
550 	int did_delay = 0;
551 
552 	tocopy = length;
553 	if (length < (int)(2 * sizeof(int))) {
554 		/* not enough room */
555 		return (0);
556 	}
557 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
558 		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
559 again:
560 		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
561 			/*
562 			 * we delay here for just a moment hoping the
563 			 * writer(s) that were present when we entered will
564 			 * have left and we only have locking ones that will
565 			 * contend with us for the lock. This does not
566 			 * assure 100% access, but its good enough for a
567 			 * logging facility like this.
568 			 */
569 			did_delay = 1;
570 			DELAY(10);
571 			goto again;
572 		}
573 	}
574 	SCTP_IP_PKTLOG_LOCK();
575 	lenat = (int *)target;
576 	*lenat = SCTP_BASE_VAR(packet_log_end);
577 	lenat++;
578 	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
579 	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
580 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
581 		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
582 		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
583 	}
584 	SCTP_IP_PKTLOG_UNLOCK();
585 	return (this_copy + sizeof(int));
586 }
587 
588 #endif
589