xref: /freebsd/sys/netinet/sctp_bsd_addr.c (revision 1d386b48a555f61cb7325543adbbb5c3f3407a66)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_pcb.h>
39 #include <netinet/sctp_header.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_output.h>
42 #include <netinet/sctp_bsd_addr.h>
43 #include <netinet/sctp_uio.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_timer.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_sysctl.h>
48 #include <netinet/sctp_indata.h>
49 #include <sys/unistd.h>
50 
51 /* Declare all of our malloc named types */
52 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
53 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
54 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
55 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
56 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
57 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
58 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
59 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
60 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
61 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
62 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
63 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
64 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
65 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
66 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
67 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
68 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
69 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
70 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
71 MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
72 
73 /* Global NON-VNET structure that controls the iterator */
74 struct iterator_control sctp_it_ctl;
75 
76 void
77 sctp_wakeup_iterator(void)
78 {
79 	wakeup(&sctp_it_ctl.iterator_running);
80 }
81 
82 static void
83 sctp_iterator_thread(void *v SCTP_UNUSED)
84 {
85 	SCTP_IPI_ITERATOR_WQ_LOCK();
86 	/* In FreeBSD this thread never terminates. */
87 	for (;;) {
88 		msleep(&sctp_it_ctl.iterator_running,
89 		    &sctp_it_ctl.ipi_iterator_wq_mtx,
90 		    0, "waiting_for_work", 0);
91 		sctp_iterator_worker();
92 	}
93 }
94 
95 void
96 sctp_startup_iterator(void)
97 {
98 	if (sctp_it_ctl.thread_proc) {
99 		/* You only get one */
100 		return;
101 	}
102 	/* Initialize global locks here, thus only once. */
103 	SCTP_ITERATOR_LOCK_INIT();
104 	SCTP_IPI_ITERATOR_WQ_INIT();
105 	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
106 	kproc_create(sctp_iterator_thread,
107 	    (void *)NULL,
108 	    &sctp_it_ctl.thread_proc,
109 	    0,
110 	    SCTP_KTHREAD_PAGES,
111 	    SCTP_KTRHEAD_NAME);
112 }
113 
114 #ifdef INET6
115 
116 void
117 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
118 {
119 	struct in6_ifaddr *ifa6;
120 
121 	ifa6 = (struct in6_ifaddr *)ifa->ifa;
122 	ifa->flags = ifa6->ia6_flags;
123 	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
124 		if (ifa->flags &
125 		    IN6_IFF_DEPRECATED) {
126 			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
127 		} else {
128 			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
129 		}
130 	} else {
131 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
132 	}
133 	if (ifa->flags &
134 	    (IN6_IFF_DETACHED |
135 	    IN6_IFF_ANYCAST |
136 	    IN6_IFF_NOTREADY)) {
137 		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
138 	} else {
139 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
140 	}
141 }
142 #endif				/* INET6 */
143 
144 static uint32_t
145 sctp_is_desired_interface_type(struct ifnet *ifn)
146 {
147 	int result;
148 
149 	/* check the interface type to see if it's one we care about */
150 	switch (ifn->if_type) {
151 	case IFT_ETHER:
152 	case IFT_ISO88023:
153 	case IFT_ISO88024:
154 	case IFT_ISO88025:
155 	case IFT_ISO88026:
156 	case IFT_STARLAN:
157 	case IFT_P10:
158 	case IFT_P80:
159 	case IFT_HY:
160 	case IFT_FDDI:
161 	case IFT_XETHER:
162 	case IFT_ISDNBASIC:
163 	case IFT_ISDNPRIMARY:
164 	case IFT_PTPSERIAL:
165 	case IFT_OTHER:
166 	case IFT_PPP:
167 	case IFT_LOOP:
168 	case IFT_SLIP:
169 	case IFT_GIF:
170 	case IFT_L2VLAN:
171 	case IFT_STF:
172 	case IFT_IP:
173 	case IFT_IPOVERCDLC:
174 	case IFT_IPOVERCLAW:
175 	case IFT_PROPVIRTUAL:	/* NetGraph Virtual too */
176 	case IFT_VIRTUALIPADDRESS:
177 		result = 1;
178 		break;
179 	default:
180 		result = 0;
181 	}
182 
183 	return (result);
184 }
185 
186 static void
187 sctp_init_ifns_for_vrf(int vrfid)
188 {
189 	/*
190 	 * Here we must apply ANY locks needed by the IFN we access and also
191 	 * make sure we lock any IFA that exists as we float through the
192 	 * list of IFA's
193 	 */
194 	struct epoch_tracker et;
195 	struct ifnet *ifn;
196 	struct ifaddr *ifa;
197 	struct sctp_ifa *sctp_ifa;
198 	uint32_t ifa_flags;
199 #ifdef INET6
200 	struct in6_ifaddr *ifa6;
201 #endif
202 
203 	IFNET_RLOCK();
204 	NET_EPOCH_ENTER(et);
205 	CK_STAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
206 		if (sctp_is_desired_interface_type(ifn) == 0) {
207 			/* non desired type */
208 			continue;
209 		}
210 		CK_STAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
211 			if (ifa->ifa_addr == NULL) {
212 				continue;
213 			}
214 			switch (ifa->ifa_addr->sa_family) {
215 #ifdef INET
216 			case AF_INET:
217 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
218 					continue;
219 				}
220 				break;
221 #endif
222 #ifdef INET6
223 			case AF_INET6:
224 				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
225 					/* skip unspecified addresses */
226 					continue;
227 				}
228 				break;
229 #endif
230 			default:
231 				continue;
232 			}
233 			switch (ifa->ifa_addr->sa_family) {
234 #ifdef INET
235 			case AF_INET:
236 				ifa_flags = 0;
237 				break;
238 #endif
239 #ifdef INET6
240 			case AF_INET6:
241 				ifa6 = (struct in6_ifaddr *)ifa;
242 				ifa_flags = ifa6->ia6_flags;
243 				break;
244 #endif
245 			default:
246 				ifa_flags = 0;
247 				break;
248 			}
249 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
250 			    (void *)ifn,
251 			    ifn->if_index,
252 			    ifn->if_type,
253 			    ifn->if_xname,
254 			    (void *)ifa,
255 			    ifa->ifa_addr,
256 			    ifa_flags,
257 			    0);
258 			if (sctp_ifa) {
259 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
260 			}
261 		}
262 	}
263 	NET_EPOCH_EXIT(et);
264 	IFNET_RUNLOCK();
265 }
266 
267 void
268 sctp_init_vrf_list(int vrfid)
269 {
270 	if (vrfid > SCTP_MAX_VRF_ID)
271 		/* can't do that */
272 		return;
273 
274 	/* Don't care about return here */
275 	(void)sctp_allocate_vrf(vrfid);
276 
277 	/*
278 	 * Now we need to build all the ifn's for this vrf and there
279 	 * addresses
280 	 */
281 	sctp_init_ifns_for_vrf(vrfid);
282 }
283 
284 void
285 sctp_addr_change(struct ifaddr *ifa, int cmd)
286 {
287 	uint32_t ifa_flags = 0;
288 
289 	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
290 		return;
291 	}
292 	/*
293 	 * BSD only has one VRF, if this changes we will need to hook in the
294 	 * right things here to get the id to pass to the address management
295 	 * routine.
296 	 */
297 	if (SCTP_BASE_VAR(first_time) == 0) {
298 		/* Special test to see if my ::1 will showup with this */
299 		SCTP_BASE_VAR(first_time) = 1;
300 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
301 	}
302 
303 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
304 		/* don't know what to do with this */
305 		return;
306 	}
307 
308 	if (ifa->ifa_addr == NULL) {
309 		return;
310 	}
311 	if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
312 		/* non desired type */
313 		return;
314 	}
315 	switch (ifa->ifa_addr->sa_family) {
316 #ifdef INET
317 	case AF_INET:
318 		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
319 			return;
320 		}
321 		break;
322 #endif
323 #ifdef INET6
324 	case AF_INET6:
325 		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
326 		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
327 			/* skip unspecified addresses */
328 			return;
329 		}
330 		break;
331 #endif
332 	default:
333 		/* non inet/inet6 skip */
334 		return;
335 	}
336 	if (cmd == RTM_ADD) {
337 		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
338 		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
339 		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
340 	} else {
341 		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
342 		    ifa->ifa_ifp->if_index,
343 		    ifa->ifa_ifp->if_xname);
344 
345 		/*
346 		 * We don't bump refcount here so when it completes the
347 		 * final delete will happen.
348 		 */
349 	}
350 }
351 
352 void
353 sctp_addr_change_event_handler(void *arg __unused, struct ifaddr *ifa, int cmd)
354 {
355 	sctp_addr_change(ifa, cmd);
356 }
357 
358 struct mbuf *
359 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
360     int how, int allonebuf, int type)
361 {
362 	struct mbuf *m = NULL;
363 
364 	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
365 	if (m == NULL) {
366 		/* bad, no memory */
367 		return (m);
368 	}
369 	if (allonebuf) {
370 		if (SCTP_BUF_SIZE(m) < space_needed) {
371 			m_freem(m);
372 			return (NULL);
373 		}
374 		KASSERT(SCTP_BUF_NEXT(m) == NULL, ("%s: no chain allowed", __func__));
375 	}
376 #ifdef SCTP_MBUF_LOGGING
377 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
378 		sctp_log_mb(m, SCTP_MBUF_IALLOC);
379 	}
380 #endif
381 	return (m);
382 }
383 
384 #ifdef SCTP_PACKET_LOGGING
385 void
386 sctp_packet_log(struct mbuf *m)
387 {
388 	int *lenat, thisone;
389 	void *copyto;
390 	uint32_t *tick_tock;
391 	int length;
392 	int total_len;
393 	int grabbed_lock = 0;
394 	int value, newval, thisend, thisbegin;
395 
396 	/*
397 	 * Buffer layout. -sizeof this entry (total_len) -previous end
398 	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
399 	 * where this started (thisbegin) x <--end points here
400 	 */
401 	length = SCTP_HEADER_LEN(m);
402 	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
403 	/* Log a packet to the buffer. */
404 	if (total_len > SCTP_PACKET_LOG_SIZE) {
405 		/* Can't log this packet I have not a buffer big enough */
406 		return;
407 	}
408 	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
409 		return;
410 	}
411 	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
412 try_again:
413 	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
414 		SCTP_IP_PKTLOG_LOCK();
415 		grabbed_lock = 1;
416 again_locked:
417 		value = SCTP_BASE_VAR(packet_log_end);
418 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
419 		if (newval >= SCTP_PACKET_LOG_SIZE) {
420 			/* we wrapped */
421 			thisbegin = 0;
422 			thisend = total_len;
423 		} else {
424 			thisbegin = SCTP_BASE_VAR(packet_log_end);
425 			thisend = newval;
426 		}
427 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
428 			goto again_locked;
429 		}
430 	} else {
431 		value = SCTP_BASE_VAR(packet_log_end);
432 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
433 		if (newval >= SCTP_PACKET_LOG_SIZE) {
434 			/* we wrapped */
435 			thisbegin = 0;
436 			thisend = total_len;
437 		} else {
438 			thisbegin = SCTP_BASE_VAR(packet_log_end);
439 			thisend = newval;
440 		}
441 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
442 			goto try_again;
443 		}
444 	}
445 	/* Sanity check */
446 	if (thisend >= SCTP_PACKET_LOG_SIZE) {
447 		SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
448 		    thisbegin,
449 		    thisend,
450 		    SCTP_BASE_VAR(packet_log_writers),
451 		    grabbed_lock,
452 		    SCTP_BASE_VAR(packet_log_end));
453 		SCTP_BASE_VAR(packet_log_end) = 0;
454 		goto no_log;
455 	}
456 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
457 	*lenat = total_len;
458 	lenat++;
459 	*lenat = value;
460 	lenat++;
461 	tick_tock = (uint32_t *)lenat;
462 	lenat++;
463 	*tick_tock = sctp_get_tick_count();
464 	copyto = (void *)lenat;
465 	thisone = thisend - sizeof(int);
466 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
467 	*lenat = thisbegin;
468 	if (grabbed_lock) {
469 		SCTP_IP_PKTLOG_UNLOCK();
470 		grabbed_lock = 0;
471 	}
472 	m_copydata(m, 0, length, (caddr_t)copyto);
473 no_log:
474 	if (grabbed_lock) {
475 		SCTP_IP_PKTLOG_UNLOCK();
476 	}
477 	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
478 }
479 
480 int
481 sctp_copy_out_packet_log(uint8_t *target, int length)
482 {
483 	/*
484 	 * We wind through the packet log starting at start copying up to
485 	 * length bytes out. We return the number of bytes copied.
486 	 */
487 	int this_copy;
488 	int *lenat;
489 	int did_delay = 0;
490 
491 	if (length < (int)(2 * sizeof(int))) {
492 		/* not enough room */
493 		return (0);
494 	}
495 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
496 		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
497 again:
498 		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
499 			/*
500 			 * we delay here for just a moment hoping the
501 			 * writer(s) that were present when we entered will
502 			 * have left and we only have locking ones that will
503 			 * contend with us for the lock. This does not
504 			 * assure 100% access, but its good enough for a
505 			 * logging facility like this.
506 			 */
507 			did_delay = 1;
508 			DELAY(10);
509 			goto again;
510 		}
511 	}
512 	SCTP_IP_PKTLOG_LOCK();
513 	lenat = (int *)target;
514 	*lenat = SCTP_BASE_VAR(packet_log_end);
515 	lenat++;
516 	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
517 	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
518 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
519 		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
520 		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
521 	}
522 	SCTP_IP_PKTLOG_UNLOCK();
523 	return (this_copy + sizeof(int));
524 }
525 
526 #endif
527