xref: /freebsd/sys/netinet/sctp_bsd_addr.c (revision b2db760808f74bb53c232900091c9da801ebbfcc)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_pcb.h>
39 #include <netinet/sctp_header.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_output.h>
42 #include <netinet/sctp_bsd_addr.h>
43 #include <netinet/sctp_uio.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_timer.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_sysctl.h>
48 #include <netinet/sctp_indata.h>
49 #include <sys/unistd.h>
50 
51 /* Declare all of our malloc named types */
52 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
53 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
54 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
55 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
56 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
57 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
58 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
59 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
60 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
61 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
62 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
63 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
64 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
65 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
66 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
67 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
68 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
69 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
70 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
71 
72 /* Global NON-VNET structure that controls the iterator */
73 struct iterator_control sctp_it_ctl;
74 static int __sctp_thread_based_iterator_started = 0;
75 
76 
77 static void
78 sctp_cleanup_itqueue(void)
79 {
80 	struct sctp_iterator *it;
81 
82 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
83 		if (it->function_atend != NULL) {
84 			(*it->function_atend) (it->pointer, it->val);
85 		}
86 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
87 		SCTP_FREE(it, SCTP_M_ITER);
88 	}
89 }
90 
91 
92 void
93 sctp_wakeup_iterator(void)
94 {
95 	wakeup(&sctp_it_ctl.iterator_running);
96 }
97 
98 static void
99 sctp_iterator_thread(void *v)
100 {
101 	SCTP_IPI_ITERATOR_WQ_LOCK();
102 	while (1) {
103 		msleep(&sctp_it_ctl.iterator_running,
104 		    &sctp_it_ctl.ipi_iterator_wq_mtx,
105 		    0, "waiting_for_work", 0);
106 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
107 			SCTP_IPI_ITERATOR_WQ_DESTROY();
108 			SCTP_ITERATOR_LOCK_DESTROY();
109 			sctp_cleanup_itqueue();
110 			__sctp_thread_based_iterator_started = 0;
111 			kthread_exit();
112 		}
113 		sctp_iterator_worker();
114 	}
115 }
116 
117 void
118 sctp_startup_iterator(void)
119 {
120 	if (__sctp_thread_based_iterator_started) {
121 		/* You only get one */
122 		return;
123 	}
124 	/* init the iterator head */
125 	__sctp_thread_based_iterator_started = 1;
126 	sctp_it_ctl.iterator_running = 0;
127 	sctp_it_ctl.iterator_flags = 0;
128 	sctp_it_ctl.cur_it = NULL;
129 	SCTP_ITERATOR_LOCK_INIT();
130 	SCTP_IPI_ITERATOR_WQ_INIT();
131 	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
132 
133 	int ret;
134 
135 	ret = kproc_create(sctp_iterator_thread,
136 	    (void *)NULL,
137 	    &sctp_it_ctl.thread_proc,
138 	    RFPROC,
139 	    SCTP_KTHREAD_PAGES,
140 	    SCTP_KTRHEAD_NAME);
141 }
142 
143 #ifdef INET6
144 
145 void
146 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
147 {
148 	struct in6_ifaddr *ifa6;
149 
150 	ifa6 = (struct in6_ifaddr *)ifa->ifa;
151 	ifa->flags = ifa6->ia6_flags;
152 	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
153 		if (ifa->flags &
154 		    IN6_IFF_DEPRECATED) {
155 			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
156 		} else {
157 			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
158 		}
159 	} else {
160 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
161 	}
162 	if (ifa->flags &
163 	    (IN6_IFF_DETACHED |
164 	    IN6_IFF_ANYCAST |
165 	    IN6_IFF_NOTREADY)) {
166 		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
167 	} else {
168 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
169 	}
170 }
171 
172 #endif				/* INET6 */
173 
174 
175 static uint32_t
176 sctp_is_desired_interface_type(struct ifaddr *ifa)
177 {
178 	int result;
179 
180 	/* check the interface type to see if it's one we care about */
181 	switch (ifa->ifa_ifp->if_type) {
182 	case IFT_ETHER:
183 	case IFT_ISO88023:
184 	case IFT_ISO88024:
185 	case IFT_ISO88025:
186 	case IFT_ISO88026:
187 	case IFT_STARLAN:
188 	case IFT_P10:
189 	case IFT_P80:
190 	case IFT_HY:
191 	case IFT_FDDI:
192 	case IFT_XETHER:
193 	case IFT_ISDNBASIC:
194 	case IFT_ISDNPRIMARY:
195 	case IFT_PTPSERIAL:
196 	case IFT_OTHER:
197 	case IFT_PPP:
198 	case IFT_LOOP:
199 	case IFT_SLIP:
200 	case IFT_GIF:
201 	case IFT_L2VLAN:
202 	case IFT_IP:
203 	case IFT_IPOVERCDLC:
204 	case IFT_IPOVERCLAW:
205 	case IFT_VIRTUALIPADDRESS:
206 		result = 1;
207 		break;
208 	default:
209 		result = 0;
210 	}
211 
212 	return (result);
213 }
214 
215 
216 
217 
218 static void
219 sctp_init_ifns_for_vrf(int vrfid)
220 {
221 	/*
222 	 * Here we must apply ANY locks needed by the IFN we access and also
223 	 * make sure we lock any IFA that exists as we float through the
224 	 * list of IFA's
225 	 */
226 	struct ifnet *ifn;
227 	struct ifaddr *ifa;
228 	struct in6_ifaddr *ifa6;
229 	struct sctp_ifa *sctp_ifa;
230 	uint32_t ifa_flags;
231 
232 	IFNET_RLOCK();
233 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
234 		IF_ADDR_LOCK(ifn);
235 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
236 			if (ifa->ifa_addr == NULL) {
237 				continue;
238 			}
239 			if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
240 				/* non inet/inet6 skip */
241 				continue;
242 			}
243 			if (ifa->ifa_addr->sa_family == AF_INET6) {
244 				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
245 					/* skip unspecifed addresses */
246 					continue;
247 				}
248 			} else {
249 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
250 					continue;
251 				}
252 			}
253 			if (sctp_is_desired_interface_type(ifa) == 0) {
254 				/* non desired type */
255 				continue;
256 			}
257 			if (ifa->ifa_addr->sa_family == AF_INET6) {
258 				ifa6 = (struct in6_ifaddr *)ifa;
259 				ifa_flags = ifa6->ia6_flags;
260 			} else {
261 				ifa_flags = 0;
262 			}
263 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
264 			    (void *)ifn,
265 			    ifn->if_index,
266 			    ifn->if_type,
267 			    ifn->if_xname,
268 			    (void *)ifa,
269 			    ifa->ifa_addr,
270 			    ifa_flags,
271 			    0);
272 			if (sctp_ifa) {
273 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
274 			}
275 		}
276 		IF_ADDR_UNLOCK(ifn);
277 	}
278 	IFNET_RUNLOCK();
279 }
280 
281 void
282 sctp_init_vrf_list(int vrfid)
283 {
284 	if (vrfid > SCTP_MAX_VRF_ID)
285 		/* can't do that */
286 		return;
287 
288 	/* Don't care about return here */
289 	(void)sctp_allocate_vrf(vrfid);
290 
291 	/*
292 	 * Now we need to build all the ifn's for this vrf and there
293 	 * addresses
294 	 */
295 	sctp_init_ifns_for_vrf(vrfid);
296 }
297 
298 void
299 sctp_addr_change(struct ifaddr *ifa, int cmd)
300 {
301 	struct sctp_ifa *ifap = NULL;
302 	uint32_t ifa_flags = 0;
303 
304 	/*
305 	 * BSD only has one VRF, if this changes we will need to hook in the
306 	 * right things here to get the id to pass to the address managment
307 	 * routine.
308 	 */
309 	if (SCTP_BASE_VAR(first_time) == 0) {
310 		/* Special test to see if my ::1 will showup with this */
311 		SCTP_BASE_VAR(first_time) = 1;
312 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
313 	}
314 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
315 		/* don't know what to do with this */
316 		return;
317 	}
318 	if (ifa->ifa_addr == NULL) {
319 		return;
320 	}
321 	if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
322 		/* non inet/inet6 skip */
323 		return;
324 	}
325 	if (ifa->ifa_addr->sa_family == AF_INET6) {
326 		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
327 		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
328 			/* skip unspecifed addresses */
329 			return;
330 		}
331 	} else {
332 		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
333 			return;
334 		}
335 	}
336 
337 	if (sctp_is_desired_interface_type(ifa) == 0) {
338 		/* non desired type */
339 		return;
340 	}
341 	if (cmd == RTM_ADD) {
342 		ifap = sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
343 		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type,
344 		    ifa->ifa_ifp->if_xname,
345 		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
346 	} else {
347 
348 		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
349 		    ifa->ifa_ifp->if_index,
350 		    ifa->ifa_ifp->if_xname
351 		    );
352 		/*
353 		 * We don't bump refcount here so when it completes the
354 		 * final delete will happen.
355 		 */
356 	}
357 }
358 
359 void
360      sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
361 	struct ifnet *ifn;
362 	struct ifaddr *ifa;
363 
364 	IFNET_RLOCK();
365 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
366 		if (!(*pred) (ifn)) {
367 			continue;
368 		}
369 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
370 			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
371 		}
372 	}
373 	IFNET_RUNLOCK();
374 }
375 
376 struct mbuf *
377 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
378     int how, int allonebuf, int type)
379 {
380 	struct mbuf *m = NULL;
381 
382 	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
383 	if (m == NULL) {
384 		/* bad, no memory */
385 		return (m);
386 	}
387 	if (allonebuf) {
388 		int siz;
389 
390 		if (SCTP_BUF_IS_EXTENDED(m)) {
391 			siz = SCTP_BUF_EXTEND_SIZE(m);
392 		} else {
393 			if (want_header)
394 				siz = MHLEN;
395 			else
396 				siz = MLEN;
397 		}
398 		if (siz < space_needed) {
399 			m_freem(m);
400 			return (NULL);
401 		}
402 	}
403 	if (SCTP_BUF_NEXT(m)) {
404 		sctp_m_freem(SCTP_BUF_NEXT(m));
405 		SCTP_BUF_NEXT(m) = NULL;
406 	}
407 #ifdef SCTP_MBUF_LOGGING
408 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
409 		if (SCTP_BUF_IS_EXTENDED(m)) {
410 			sctp_log_mb(m, SCTP_MBUF_IALLOC);
411 		}
412 	}
413 #endif
414 	return (m);
415 }
416 
417 
418 #ifdef SCTP_PACKET_LOGGING
419 void
420 sctp_packet_log(struct mbuf *m, int length)
421 {
422 	int *lenat, thisone;
423 	void *copyto;
424 	uint32_t *tick_tock;
425 	int total_len;
426 	int grabbed_lock = 0;
427 	int value, newval, thisend, thisbegin;
428 
429 	/*
430 	 * Buffer layout. -sizeof this entry (total_len) -previous end
431 	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
432 	 * where this started (thisbegin) x <--end points here
433 	 */
434 	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
435 	/* Log a packet to the buffer. */
436 	if (total_len > SCTP_PACKET_LOG_SIZE) {
437 		/* Can't log this packet I have not a buffer big enough */
438 		return;
439 	}
440 	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
441 		return;
442 	}
443 	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
444 try_again:
445 	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
446 		SCTP_IP_PKTLOG_LOCK();
447 		grabbed_lock = 1;
448 again_locked:
449 		value = SCTP_BASE_VAR(packet_log_end);
450 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
451 		if (newval >= SCTP_PACKET_LOG_SIZE) {
452 			/* we wrapped */
453 			thisbegin = 0;
454 			thisend = total_len;
455 		} else {
456 			thisbegin = SCTP_BASE_VAR(packet_log_end);
457 			thisend = newval;
458 		}
459 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
460 			goto again_locked;
461 		}
462 	} else {
463 		value = SCTP_BASE_VAR(packet_log_end);
464 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
465 		if (newval >= SCTP_PACKET_LOG_SIZE) {
466 			/* we wrapped */
467 			thisbegin = 0;
468 			thisend = total_len;
469 		} else {
470 			thisbegin = SCTP_BASE_VAR(packet_log_end);
471 			thisend = newval;
472 		}
473 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
474 			goto try_again;
475 		}
476 	}
477 	/* Sanity check */
478 	if (thisend >= SCTP_PACKET_LOG_SIZE) {
479 		printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
480 		    thisbegin,
481 		    thisend,
482 		    SCTP_BASE_VAR(packet_log_writers),
483 		    grabbed_lock,
484 		    SCTP_BASE_VAR(packet_log_end));
485 		SCTP_BASE_VAR(packet_log_end) = 0;
486 		goto no_log;
487 
488 	}
489 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
490 	*lenat = total_len;
491 	lenat++;
492 	*lenat = value;
493 	lenat++;
494 	tick_tock = (uint32_t *) lenat;
495 	lenat++;
496 	*tick_tock = sctp_get_tick_count();
497 	copyto = (void *)lenat;
498 	thisone = thisend - sizeof(int);
499 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
500 	*lenat = thisbegin;
501 	if (grabbed_lock) {
502 		SCTP_IP_PKTLOG_UNLOCK();
503 		grabbed_lock = 0;
504 	}
505 	m_copydata(m, 0, length, (caddr_t)copyto);
506 no_log:
507 	if (grabbed_lock) {
508 		SCTP_IP_PKTLOG_UNLOCK();
509 	}
510 	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
511 }
512 
513 
514 int
515 sctp_copy_out_packet_log(uint8_t * target, int length)
516 {
517 	/*
518 	 * We wind through the packet log starting at start copying up to
519 	 * length bytes out. We return the number of bytes copied.
520 	 */
521 	int tocopy, this_copy;
522 	int *lenat;
523 	int did_delay = 0;
524 
525 	tocopy = length;
526 	if (length < (int)(2 * sizeof(int))) {
527 		/* not enough room */
528 		return (0);
529 	}
530 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
531 		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
532 again:
533 		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
534 			/*
535 			 * we delay here for just a moment hoping the
536 			 * writer(s) that were present when we entered will
537 			 * have left and we only have locking ones that will
538 			 * contend with us for the lock. This does not
539 			 * assure 100% access, but its good enough for a
540 			 * logging facility like this.
541 			 */
542 			did_delay = 1;
543 			DELAY(10);
544 			goto again;
545 		}
546 	}
547 	SCTP_IP_PKTLOG_LOCK();
548 	lenat = (int *)target;
549 	*lenat = SCTP_BASE_VAR(packet_log_end);
550 	lenat++;
551 	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
552 	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
553 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
554 		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
555 		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
556 	}
557 	SCTP_IP_PKTLOG_UNLOCK();
558 	return (this_copy + sizeof(int));
559 }
560 
561 #endif
562