xref: /freebsd/sys/netinet/sctp_bsd_addr.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <netinet/sctp_os.h>
36 #include <netinet/sctp_var.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctp_header.h>
39 #include <netinet/sctputil.h>
40 #include <netinet/sctp_output.h>
41 #include <netinet/sctp_bsd_addr.h>
42 #include <netinet/sctp_uio.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_timer.h>
45 #include <netinet/sctp_asconf.h>
46 #include <netinet/sctp_sysctl.h>
47 #include <netinet/sctp_indata.h>
48 #include <sys/unistd.h>
49 
50 /* Declare all of our malloc named types */
51 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
52 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
53 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
54 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
55 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
56 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
57 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
58 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
59 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
60 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
61 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
62 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
63 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
64 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
65 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
66 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
67 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
68 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
69 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
70 MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
71 
72 /* Global NON-VNET structure that controls the iterator */
73 struct iterator_control sctp_it_ctl;
74 
75 void
76 sctp_wakeup_iterator(void)
77 {
78 	wakeup(&sctp_it_ctl.iterator_running);
79 }
80 
81 static void
82 sctp_iterator_thread(void *v SCTP_UNUSED)
83 {
84 	SCTP_IPI_ITERATOR_WQ_LOCK();
85 	/* In FreeBSD this thread never terminates. */
86 	for (;;) {
87 		msleep(&sctp_it_ctl.iterator_running,
88 		    &sctp_it_ctl.ipi_iterator_wq_mtx,
89 		    0, "waiting_for_work", 0);
90 		sctp_iterator_worker();
91 	}
92 }
93 
94 void
95 sctp_startup_iterator(void)
96 {
97 	if (sctp_it_ctl.thread_proc) {
98 		/* You only get one */
99 		return;
100 	}
101 	/* Initialize global locks here, thus only once. */
102 	SCTP_ITERATOR_LOCK_INIT();
103 	SCTP_IPI_ITERATOR_WQ_INIT();
104 	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
105 	kproc_create(sctp_iterator_thread,
106 	    (void *)NULL,
107 	    &sctp_it_ctl.thread_proc,
108 	    0,
109 	    SCTP_KTHREAD_PAGES,
110 	    SCTP_KTRHEAD_NAME);
111 }
112 
113 #ifdef INET6
114 
115 void
116 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
117 {
118 	struct in6_ifaddr *ifa6;
119 
120 	ifa6 = (struct in6_ifaddr *)ifa->ifa;
121 	ifa->flags = ifa6->ia6_flags;
122 	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
123 		if (ifa->flags &
124 		    IN6_IFF_DEPRECATED) {
125 			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
126 		} else {
127 			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
128 		}
129 	} else {
130 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
131 	}
132 	if (ifa->flags &
133 	    (IN6_IFF_DETACHED |
134 	    IN6_IFF_ANYCAST |
135 	    IN6_IFF_NOTREADY)) {
136 		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
137 	} else {
138 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
139 	}
140 }
141 #endif				/* INET6 */
142 
143 static uint32_t
144 sctp_is_desired_interface_type(struct ifnet *ifn)
145 {
146 	int result;
147 
148 	/* check the interface type to see if it's one we care about */
149 	switch (ifn->if_type) {
150 	case IFT_ETHER:
151 	case IFT_ISO88023:
152 	case IFT_ISO88024:
153 	case IFT_ISO88025:
154 	case IFT_ISO88026:
155 	case IFT_STARLAN:
156 	case IFT_P10:
157 	case IFT_P80:
158 	case IFT_HY:
159 	case IFT_FDDI:
160 	case IFT_XETHER:
161 	case IFT_ISDNBASIC:
162 	case IFT_ISDNPRIMARY:
163 	case IFT_PTPSERIAL:
164 	case IFT_OTHER:
165 	case IFT_PPP:
166 	case IFT_LOOP:
167 	case IFT_SLIP:
168 	case IFT_GIF:
169 	case IFT_L2VLAN:
170 	case IFT_STF:
171 	case IFT_IP:
172 	case IFT_IPOVERCDLC:
173 	case IFT_IPOVERCLAW:
174 	case IFT_PROPVIRTUAL:	/* NetGraph Virtual too */
175 	case IFT_VIRTUALIPADDRESS:
176 		result = 1;
177 		break;
178 	default:
179 		result = 0;
180 	}
181 
182 	return (result);
183 }
184 
185 static void
186 sctp_init_ifns_for_vrf(int vrfid)
187 {
188 	/*
189 	 * Here we must apply ANY locks needed by the IFN we access and also
190 	 * make sure we lock any IFA that exists as we float through the
191 	 * list of IFA's
192 	 */
193 	struct epoch_tracker et;
194 	struct ifnet *ifn;
195 	struct ifaddr *ifa;
196 	struct sctp_ifa *sctp_ifa;
197 	uint32_t ifa_flags;
198 #ifdef INET6
199 	struct in6_ifaddr *ifa6;
200 #endif
201 
202 	IFNET_RLOCK();
203 	NET_EPOCH_ENTER(et);
204 	CK_STAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
205 		if (sctp_is_desired_interface_type(ifn) == 0) {
206 			/* non desired type */
207 			continue;
208 		}
209 		CK_STAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
210 			if (ifa->ifa_addr == NULL) {
211 				continue;
212 			}
213 			switch (ifa->ifa_addr->sa_family) {
214 #ifdef INET
215 			case AF_INET:
216 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
217 					continue;
218 				}
219 				break;
220 #endif
221 #ifdef INET6
222 			case AF_INET6:
223 				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
224 					/* skip unspecified addresses */
225 					continue;
226 				}
227 				break;
228 #endif
229 			default:
230 				continue;
231 			}
232 			switch (ifa->ifa_addr->sa_family) {
233 #ifdef INET
234 			case AF_INET:
235 				ifa_flags = 0;
236 				break;
237 #endif
238 #ifdef INET6
239 			case AF_INET6:
240 				ifa6 = (struct in6_ifaddr *)ifa;
241 				ifa_flags = ifa6->ia6_flags;
242 				break;
243 #endif
244 			default:
245 				ifa_flags = 0;
246 				break;
247 			}
248 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
249 			    (void *)ifn,
250 			    ifn->if_index,
251 			    ifn->if_type,
252 			    ifn->if_xname,
253 			    (void *)ifa,
254 			    ifa->ifa_addr,
255 			    ifa_flags,
256 			    0);
257 			if (sctp_ifa) {
258 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
259 			}
260 		}
261 	}
262 	NET_EPOCH_EXIT(et);
263 	IFNET_RUNLOCK();
264 }
265 
266 void
267 sctp_init_vrf_list(int vrfid)
268 {
269 	if (vrfid > SCTP_MAX_VRF_ID)
270 		/* can't do that */
271 		return;
272 
273 	/* Don't care about return here */
274 	(void)sctp_allocate_vrf(vrfid);
275 
276 	/*
277 	 * Now we need to build all the ifn's for this vrf and there
278 	 * addresses
279 	 */
280 	sctp_init_ifns_for_vrf(vrfid);
281 }
282 
283 void
284 sctp_addr_change(struct ifaddr *ifa, int cmd)
285 {
286 	uint32_t ifa_flags = 0;
287 
288 	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
289 		return;
290 	}
291 	/*
292 	 * BSD only has one VRF, if this changes we will need to hook in the
293 	 * right things here to get the id to pass to the address management
294 	 * routine.
295 	 */
296 	if (SCTP_BASE_VAR(first_time) == 0) {
297 		/* Special test to see if my ::1 will showup with this */
298 		SCTP_BASE_VAR(first_time) = 1;
299 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
300 	}
301 
302 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
303 		/* don't know what to do with this */
304 		return;
305 	}
306 
307 	if (ifa->ifa_addr == NULL) {
308 		return;
309 	}
310 	if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
311 		/* non desired type */
312 		return;
313 	}
314 	switch (ifa->ifa_addr->sa_family) {
315 #ifdef INET
316 	case AF_INET:
317 		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
318 			return;
319 		}
320 		break;
321 #endif
322 #ifdef INET6
323 	case AF_INET6:
324 		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
325 		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
326 			/* skip unspecified addresses */
327 			return;
328 		}
329 		break;
330 #endif
331 	default:
332 		/* non inet/inet6 skip */
333 		return;
334 	}
335 	if (cmd == RTM_ADD) {
336 		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
337 		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
338 		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
339 	} else {
340 		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
341 		    ifa->ifa_ifp->if_index,
342 		    ifa->ifa_ifp->if_xname);
343 
344 		/*
345 		 * We don't bump refcount here so when it completes the
346 		 * final delete will happen.
347 		 */
348 	}
349 }
350 
351 void
352 sctp_addr_change_event_handler(void *arg __unused, struct ifaddr *ifa, int cmd)
353 {
354 	sctp_addr_change(ifa, cmd);
355 }
356 
357 struct mbuf *
358 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
359     int how, int allonebuf, int type)
360 {
361 	struct mbuf *m = NULL;
362 
363 	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
364 	if (m == NULL) {
365 		/* bad, no memory */
366 		return (m);
367 	}
368 	if (allonebuf) {
369 		if (SCTP_BUF_SIZE(m) < space_needed) {
370 			m_freem(m);
371 			return (NULL);
372 		}
373 		KASSERT(SCTP_BUF_NEXT(m) == NULL, ("%s: no chain allowed", __func__));
374 	}
375 #ifdef SCTP_MBUF_LOGGING
376 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
377 		sctp_log_mb(m, SCTP_MBUF_IALLOC);
378 	}
379 #endif
380 	return (m);
381 }
382 
383 #ifdef SCTP_PACKET_LOGGING
384 void
385 sctp_packet_log(struct mbuf *m)
386 {
387 	int *lenat, thisone;
388 	void *copyto;
389 	uint32_t *tick_tock;
390 	int length;
391 	int total_len;
392 	int grabbed_lock = 0;
393 	int value, newval, thisend, thisbegin;
394 
395 	/*
396 	 * Buffer layout. -sizeof this entry (total_len) -previous end
397 	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
398 	 * where this started (thisbegin) x <--end points here
399 	 */
400 	length = SCTP_HEADER_LEN(m);
401 	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
402 	/* Log a packet to the buffer. */
403 	if (total_len > SCTP_PACKET_LOG_SIZE) {
404 		/* Can't log this packet I have not a buffer big enough */
405 		return;
406 	}
407 	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
408 		return;
409 	}
410 	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
411 try_again:
412 	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
413 		SCTP_IP_PKTLOG_LOCK();
414 		grabbed_lock = 1;
415 again_locked:
416 		value = SCTP_BASE_VAR(packet_log_end);
417 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
418 		if (newval >= SCTP_PACKET_LOG_SIZE) {
419 			/* we wrapped */
420 			thisbegin = 0;
421 			thisend = total_len;
422 		} else {
423 			thisbegin = SCTP_BASE_VAR(packet_log_end);
424 			thisend = newval;
425 		}
426 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
427 			goto again_locked;
428 		}
429 	} else {
430 		value = SCTP_BASE_VAR(packet_log_end);
431 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
432 		if (newval >= SCTP_PACKET_LOG_SIZE) {
433 			/* we wrapped */
434 			thisbegin = 0;
435 			thisend = total_len;
436 		} else {
437 			thisbegin = SCTP_BASE_VAR(packet_log_end);
438 			thisend = newval;
439 		}
440 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
441 			goto try_again;
442 		}
443 	}
444 	/* Sanity check */
445 	if (thisend >= SCTP_PACKET_LOG_SIZE) {
446 		SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
447 		    thisbegin,
448 		    thisend,
449 		    SCTP_BASE_VAR(packet_log_writers),
450 		    grabbed_lock,
451 		    SCTP_BASE_VAR(packet_log_end));
452 		SCTP_BASE_VAR(packet_log_end) = 0;
453 		goto no_log;
454 	}
455 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
456 	*lenat = total_len;
457 	lenat++;
458 	*lenat = value;
459 	lenat++;
460 	tick_tock = (uint32_t *)lenat;
461 	lenat++;
462 	*tick_tock = sctp_get_tick_count();
463 	copyto = (void *)lenat;
464 	thisone = thisend - sizeof(int);
465 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
466 	*lenat = thisbegin;
467 	if (grabbed_lock) {
468 		SCTP_IP_PKTLOG_UNLOCK();
469 		grabbed_lock = 0;
470 	}
471 	m_copydata(m, 0, length, (caddr_t)copyto);
472 no_log:
473 	if (grabbed_lock) {
474 		SCTP_IP_PKTLOG_UNLOCK();
475 	}
476 	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
477 }
478 
479 int
480 sctp_copy_out_packet_log(uint8_t *target, int length)
481 {
482 	/*
483 	 * We wind through the packet log starting at start copying up to
484 	 * length bytes out. We return the number of bytes copied.
485 	 */
486 	int this_copy;
487 	int *lenat;
488 	int did_delay = 0;
489 
490 	if (length < (int)(2 * sizeof(int))) {
491 		/* not enough room */
492 		return (0);
493 	}
494 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
495 		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
496 again:
497 		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
498 			/*
499 			 * we delay here for just a moment hoping the
500 			 * writer(s) that were present when we entered will
501 			 * have left and we only have locking ones that will
502 			 * contend with us for the lock. This does not
503 			 * assure 100% access, but its good enough for a
504 			 * logging facility like this.
505 			 */
506 			did_delay = 1;
507 			DELAY(10);
508 			goto again;
509 		}
510 	}
511 	SCTP_IP_PKTLOG_LOCK();
512 	lenat = (int *)target;
513 	*lenat = SCTP_BASE_VAR(packet_log_end);
514 	lenat++;
515 	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
516 	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
517 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
518 		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
519 		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
520 	}
521 	SCTP_IP_PKTLOG_UNLOCK();
522 	return (this_copy + sizeof(int));
523 }
524 
525 #endif
526