xref: /freebsd/sys/net/if_ethersubr.c (revision 0bd0c3295ac09f759f2816b73cbd2d950e3bef7e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_netgraph.h"
35 #include "opt_mbuf_profiling.h"
36 #include "opt_rss.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/devctl.h>
41 #include <sys/eventhandler.h>
42 #include <sys/jail.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/module.h>
48 #include <sys/msan.h>
49 #include <sys/proc.h>
50 #include <sys/priv.h>
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/uuid.h>
56 #ifdef KDB
57 #include <sys/kdb.h>
58 #endif
59 
60 #include <net/ieee_oui.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_private.h>
64 #include <net/if_arp.h>
65 #include <net/netisr.h>
66 #include <net/route.h>
67 #include <net/if_llc.h>
68 #include <net/if_dl.h>
69 #include <net/if_types.h>
70 #include <net/bpf.h>
71 #include <net/ethernet.h>
72 #include <net/if_bridgevar.h>
73 #include <net/if_vlan_var.h>
74 #include <net/if_llatbl.h>
75 #include <net/pfil.h>
76 #include <net/rss_config.h>
77 #include <net/vnet.h>
78 
79 #include <netpfil/pf/pf_mtag.h>
80 
81 #if defined(INET) || defined(INET6)
82 #include <netinet/in.h>
83 #include <netinet/in_var.h>
84 #include <netinet/if_ether.h>
85 #include <netinet/ip_carp.h>
86 #include <netinet/ip_var.h>
87 #endif
88 #ifdef INET6
89 #include <netinet6/nd6.h>
90 #endif
91 #include <security/mac/mac_framework.h>
92 
93 #include <crypto/sha1.h>
94 
95 VNET_DEFINE(pfil_head_t, link_pfil_head);	/* Packet filter hooks */
96 
97 /* netgraph node hooks for ng_ether(4) */
98 void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
99 void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
100 int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
101 
102 /* if_bridge(4) support */
103 void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
104 bool	(*bridge_same_p)(const void *, const void *);
105 void	*(*bridge_get_softc_p)(struct ifnet *);
106 bool	(*bridge_member_ifaddrs_p)(void);
107 
108 /* if_lagg(4) support */
109 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
110 
111 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
112 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
113 
114 static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
115 		struct sockaddr *);
116 static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
117 
118 static inline bool ether_do_pcp(struct ifnet *, struct mbuf *);
119 
120 #define senderr(e) do { error = (e); goto bad;} while (0)
121 
122 static void
update_mbuf_csumflags(struct mbuf * src,struct mbuf * dst)123 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
124 {
125 	int csum_flags = 0;
126 
127 	if (src->m_pkthdr.csum_flags & CSUM_IP)
128 		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
129 	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
130 		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
131 	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
132 		csum_flags |= CSUM_SCTP_VALID;
133 	dst->m_pkthdr.csum_flags |= csum_flags;
134 	if (csum_flags & CSUM_DATA_VALID)
135 		dst->m_pkthdr.csum_data = 0xffff;
136 }
137 
138 /*
139  * Handle link-layer encapsulation requests.
140  */
141 static int
ether_requestencap(struct ifnet * ifp,struct if_encap_req * req)142 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
143 {
144 	struct ether_header *eh;
145 	struct arphdr *ah;
146 	uint16_t etype;
147 	const u_char *lladdr;
148 
149 	if (req->rtype != IFENCAP_LL)
150 		return (EOPNOTSUPP);
151 
152 	if (req->bufsize < ETHER_HDR_LEN)
153 		return (ENOMEM);
154 
155 	eh = (struct ether_header *)req->buf;
156 	lladdr = req->lladdr;
157 	req->lladdr_off = 0;
158 
159 	switch (req->family) {
160 	case AF_INET:
161 		etype = htons(ETHERTYPE_IP);
162 		break;
163 	case AF_INET6:
164 		etype = htons(ETHERTYPE_IPV6);
165 		break;
166 	case AF_ARP:
167 		ah = (struct arphdr *)req->hdata;
168 		ah->ar_hrd = htons(ARPHRD_ETHER);
169 
170 		switch(ntohs(ah->ar_op)) {
171 		case ARPOP_REVREQUEST:
172 		case ARPOP_REVREPLY:
173 			etype = htons(ETHERTYPE_REVARP);
174 			break;
175 		case ARPOP_REQUEST:
176 		case ARPOP_REPLY:
177 		default:
178 			etype = htons(ETHERTYPE_ARP);
179 			break;
180 		}
181 
182 		if (req->flags & IFENCAP_FLAG_BROADCAST)
183 			lladdr = ifp->if_broadcastaddr;
184 		break;
185 	default:
186 		return (EAFNOSUPPORT);
187 	}
188 
189 	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
190 	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
191 	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
192 	req->bufsize = sizeof(struct ether_header);
193 
194 	return (0);
195 }
196 
197 static int
ether_resolve_addr(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro,u_char * phdr,uint32_t * pflags,struct llentry ** plle)198 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
199 	const struct sockaddr *dst, struct route *ro, u_char *phdr,
200 	uint32_t *pflags, struct llentry **plle)
201 {
202 	uint32_t lleflags = 0;
203 	int error = 0;
204 #if defined(INET) || defined(INET6)
205 	struct ether_header *eh = (struct ether_header *)phdr;
206 	uint16_t etype;
207 #endif
208 
209 	if (plle)
210 		*plle = NULL;
211 
212 	switch (dst->sa_family) {
213 #ifdef INET
214 	case AF_INET:
215 		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
216 			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
217 			    plle);
218 		else {
219 			if (m->m_flags & M_BCAST)
220 				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
221 				    ETHER_ADDR_LEN);
222 			else {
223 				const struct in_addr *a;
224 				a = &(((const struct sockaddr_in *)dst)->sin_addr);
225 				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
226 			}
227 			etype = htons(ETHERTYPE_IP);
228 			memcpy(&eh->ether_type, &etype, sizeof(etype));
229 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
230 		}
231 		break;
232 #endif
233 #ifdef INET6
234 	case AF_INET6:
235 		if ((m->m_flags & M_MCAST) == 0) {
236 			int af = RO_GET_FAMILY(ro, dst);
237 			error = nd6_resolve(ifp, LLE_SF(af, 0), m, dst, phdr,
238 			    &lleflags, plle);
239 		} else {
240 			const struct in6_addr *a6;
241 			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
242 			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
243 			etype = htons(ETHERTYPE_IPV6);
244 			memcpy(&eh->ether_type, &etype, sizeof(etype));
245 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
246 		}
247 		break;
248 #endif
249 	default:
250 		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
251 		if (m != NULL)
252 			m_freem(m);
253 		return (EAFNOSUPPORT);
254 	}
255 
256 	if (error == EHOSTDOWN) {
257 		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
258 			error = EHOSTUNREACH;
259 	}
260 
261 	if (error != 0)
262 		return (error);
263 
264 	*pflags = RT_MAY_LOOP;
265 	if (lleflags & LLE_IFADDR)
266 		*pflags |= RT_L2_ME;
267 
268 	return (0);
269 }
270 
271 /*
272  * Ethernet output routine.
273  * Encapsulate a packet of type family for the local net.
274  * Use trailer local net encapsulation if enough data in first
275  * packet leaves a multiple of 512 bytes of data in remainder.
276  */
277 int
ether_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro)278 ether_output(struct ifnet *ifp, struct mbuf *m,
279 	const struct sockaddr *dst, struct route *ro)
280 {
281 	int error = 0;
282 	char linkhdr[ETHER_HDR_LEN], *phdr;
283 	struct ether_header *eh;
284 	struct pf_mtag *t;
285 	bool loop_copy;
286 	int hlen;	/* link layer header length */
287 	uint32_t pflags;
288 	struct llentry *lle = NULL;
289 	int addref = 0;
290 
291 	phdr = NULL;
292 	pflags = 0;
293 	if (ro != NULL) {
294 		/* XXX BPF uses ro_prepend */
295 		if (ro->ro_prepend != NULL) {
296 			phdr = ro->ro_prepend;
297 			hlen = ro->ro_plen;
298 		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
299 			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
300 				lle = ro->ro_lle;
301 				if (lle != NULL &&
302 				    (lle->la_flags & LLE_VALID) == 0) {
303 					LLE_FREE(lle);
304 					lle = NULL;	/* redundant */
305 					ro->ro_lle = NULL;
306 				}
307 				if (lle == NULL) {
308 					/* if we lookup, keep cache */
309 					addref = 1;
310 				} else
311 					/*
312 					 * Notify LLE code that
313 					 * the entry was used
314 					 * by datapath.
315 					 */
316 					llentry_provide_feedback(lle);
317 			}
318 			if (lle != NULL) {
319 				phdr = lle->r_linkdata;
320 				hlen = lle->r_hdrlen;
321 				pflags = lle->r_flags;
322 			}
323 		}
324 	}
325 
326 #ifdef MAC
327 	error = mac_ifnet_check_transmit(ifp, m);
328 	if (error)
329 		senderr(error);
330 #endif
331 
332 	M_PROFILE(m);
333 	if (ifp->if_flags & IFF_MONITOR)
334 		senderr(ENETDOWN);
335 	if (!((ifp->if_flags & IFF_UP) &&
336 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
337 		senderr(ENETDOWN);
338 
339 	if (phdr == NULL) {
340 		/* No prepend data supplied. Try to calculate ourselves. */
341 		phdr = linkhdr;
342 		hlen = ETHER_HDR_LEN;
343 		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
344 		    addref ? &lle : NULL);
345 		if (addref && lle != NULL)
346 			ro->ro_lle = lle;
347 		if (error != 0)
348 			return (error == EWOULDBLOCK ? 0 : error);
349 	}
350 
351 	if ((pflags & RT_L2_ME) != 0) {
352 		update_mbuf_csumflags(m, m);
353 		return (if_simloop(ifp, m, RO_GET_FAMILY(ro, dst), 0));
354 	}
355 	loop_copy = (pflags & RT_MAY_LOOP) != 0;
356 
357 	/*
358 	 * Add local net header.  If no space in first mbuf,
359 	 * allocate another.
360 	 *
361 	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
362 	 * This is done because BPF code shifts m_data pointer
363 	 * to the end of ethernet header prior to calling if_output().
364 	 */
365 	M_PREPEND(m, hlen, M_NOWAIT);
366 	if (m == NULL)
367 		senderr(ENOBUFS);
368 	if ((pflags & RT_HAS_HEADER) == 0) {
369 		eh = mtod(m, struct ether_header *);
370 		memcpy(eh, phdr, hlen);
371 	}
372 
373 	/*
374 	 * If a simplex interface, and the packet is being sent to our
375 	 * Ethernet address or a broadcast address, loopback a copy.
376 	 * XXX To make a simplex device behave exactly like a duplex
377 	 * device, we should copy in the case of sending to our own
378 	 * ethernet address (thus letting the original actually appear
379 	 * on the wire). However, we don't do that here for security
380 	 * reasons and compatibility with the original behavior.
381 	 */
382 	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
383 	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
384 		struct mbuf *n;
385 
386 		/*
387 		 * Because if_simloop() modifies the packet, we need a
388 		 * writable copy through m_dup() instead of a readonly
389 		 * one as m_copy[m] would give us. The alternative would
390 		 * be to modify if_simloop() to handle the readonly mbuf,
391 		 * but performancewise it is mostly equivalent (trading
392 		 * extra data copying vs. extra locking).
393 		 *
394 		 * XXX This is a local workaround.  A number of less
395 		 * often used kernel parts suffer from the same bug.
396 		 * See PR kern/105943 for a proposed general solution.
397 		 */
398 		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
399 			update_mbuf_csumflags(m, n);
400 			(void)if_simloop(ifp, n, RO_GET_FAMILY(ro, dst), hlen);
401 		} else
402 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
403 	}
404 
405        /*
406 	* Bridges require special output handling.
407 	*/
408 	if (ifp->if_bridge) {
409 		BRIDGE_OUTPUT(ifp, m, error);
410 		return (error);
411 	}
412 
413 #if defined(INET) || defined(INET6)
414 	if (ifp->if_carp &&
415 	    (error = (*carp_output_p)(ifp, m, dst)))
416 		goto bad;
417 #endif
418 
419 	/* Handle ng_ether(4) processing, if any */
420 	if (ifp->if_l2com != NULL) {
421 		KASSERT(ng_ether_output_p != NULL,
422 		    ("ng_ether_output_p is NULL"));
423 		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
424 bad:			if (m != NULL)
425 				m_freem(m);
426 			return (error);
427 		}
428 		if (m == NULL)
429 			return (0);
430 	}
431 
432 	/* Continue with link-layer output */
433 	return ether_output_frame(ifp, m);
434 }
435 
436 static bool
ether_set_pcp(struct mbuf ** mp,struct ifnet * ifp,uint8_t pcp)437 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
438 {
439 	struct ether_8021q_tag qtag;
440 	struct ether_header *eh;
441 
442 	eh = mtod(*mp, struct ether_header *);
443 	if (eh->ether_type == htons(ETHERTYPE_VLAN) ||
444 	    eh->ether_type == htons(ETHERTYPE_QINQ)) {
445 		(*mp)->m_flags &= ~M_VLANTAG;
446 		return (true);
447 	}
448 
449 	qtag.vid = 0;
450 	qtag.pcp = pcp;
451 	qtag.proto = ETHERTYPE_VLAN;
452 	if (ether_8021q_frame(mp, ifp, ifp, &qtag))
453 		return (true);
454 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
455 	return (false);
456 }
457 
458 /*
459  * Ethernet link layer output routine to send a raw frame to the device.
460  *
461  * This assumes that the 14 byte Ethernet header is present and contiguous
462  * in the first mbuf (if BRIDGE'ing).
463  */
464 int
ether_output_frame(struct ifnet * ifp,struct mbuf * m)465 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
466 {
467 	if (ether_do_pcp(ifp, m) && !ether_set_pcp(&m, ifp, ifp->if_pcp))
468 		return (0);
469 
470 	if (PFIL_HOOKED_OUT(V_link_pfil_head))
471 		switch (pfil_mbuf_out(V_link_pfil_head, &m, ifp, NULL)) {
472 		case PFIL_DROPPED:
473 			return (EACCES);
474 		case PFIL_CONSUMED:
475 			return (0);
476 		}
477 
478 #ifdef EXPERIMENTAL
479 #if defined(INET6) && defined(INET)
480 	/* draft-ietf-6man-ipv6only-flag */
481 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
482 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
483 		struct ether_header *eh;
484 
485 		eh = mtod(m, struct ether_header *);
486 		switch (ntohs(eh->ether_type)) {
487 		case ETHERTYPE_IP:
488 		case ETHERTYPE_ARP:
489 		case ETHERTYPE_REVARP:
490 			m_freem(m);
491 			return (EAFNOSUPPORT);
492 			/* NOTREACHED */
493 			break;
494 		};
495 	}
496 #endif
497 #endif
498 
499 	/*
500 	 * Queue message on interface, update output statistics if successful,
501 	 * and start output if interface not yet active.
502 	 *
503 	 * If KMSAN is enabled, use it to verify that the data does not contain
504 	 * any uninitialized bytes.
505 	 */
506 	kmsan_check_mbuf(m, "ether_output");
507 	return ((ifp->if_transmit)(ifp, m));
508 }
509 
510 /*
511  * Process a received Ethernet packet; the packet is in the
512  * mbuf chain m with the ethernet header at the front.
513  */
514 static void
ether_input_internal(struct ifnet * ifp,struct mbuf * m)515 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
516 {
517 	struct ether_header *eh;
518 	u_short etype;
519 
520 	if ((ifp->if_flags & IFF_UP) == 0) {
521 		m_freem(m);
522 		return;
523 	}
524 #ifdef DIAGNOSTIC
525 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
526 		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
527 		m_freem(m);
528 		return;
529 	}
530 #endif
531 	if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
532 		/* Drivers should pullup and ensure the mbuf is valid */
533 		if_printf(ifp, "discard frame w/o leading ethernet "
534 				"header (len %d pkt len %d)\n",
535 				m->m_len, m->m_pkthdr.len);
536 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
537 		m_freem(m);
538 		return;
539 	}
540 	eh = mtod(m, struct ether_header *);
541 	etype = ntohs(eh->ether_type);
542 	random_harvest_queue_ether(m, sizeof(*m));
543 
544 #ifdef EXPERIMENTAL
545 #if defined(INET6) && defined(INET)
546 	/* draft-ietf-6man-ipv6only-flag */
547 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
548 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
549 		switch (etype) {
550 		case ETHERTYPE_IP:
551 		case ETHERTYPE_ARP:
552 		case ETHERTYPE_REVARP:
553 			m_freem(m);
554 			return;
555 			/* NOTREACHED */
556 			break;
557 		};
558 	}
559 #endif
560 #endif
561 
562 	CURVNET_SET_QUIET(ifp->if_vnet);
563 
564 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
565 		if (ETHER_IS_BROADCAST(eh->ether_dhost))
566 			m->m_flags |= M_BCAST;
567 		else
568 			m->m_flags |= M_MCAST;
569 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
570 	}
571 
572 #ifdef MAC
573 	/*
574 	 * Tag the mbuf with an appropriate MAC label before any other
575 	 * consumers can get to it.
576 	 */
577 	mac_ifnet_create_mbuf(ifp, m);
578 #endif
579 
580 	/*
581 	 * Give bpf a chance at the packet.
582 	 */
583 	ETHER_BPF_MTAP(ifp, m);
584 
585 	if (!(ifp->if_capenable & IFCAP_HWSTATS))
586 		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
587 
588 	/* Allow monitor mode to claim this frame, after stats are updated. */
589 	if (ifp->if_flags & IFF_MONITOR) {
590 		m_freem(m);
591 		CURVNET_RESTORE();
592 		return;
593 	}
594 
595 	/* Handle input from a lagg(4) port */
596 	if (ifp->if_type == IFT_IEEE8023ADLAG) {
597 		KASSERT(lagg_input_ethernet_p != NULL,
598 		    ("%s: if_lagg not loaded!", __func__));
599 		m = (*lagg_input_ethernet_p)(ifp, m);
600 		if (m != NULL)
601 			ifp = m->m_pkthdr.rcvif;
602 		else {
603 			CURVNET_RESTORE();
604 			return;
605 		}
606 	}
607 
608 	/*
609 	 * If the hardware did not process an 802.1Q tag, do this now,
610 	 * to allow 802.1P priority frames to be passed to the main input
611 	 * path correctly.
612 	 */
613 	if ((m->m_flags & M_VLANTAG) == 0 &&
614 	    ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
615 		struct ether_vlan_header *evl;
616 
617 		if (m->m_len < sizeof(*evl) &&
618 		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
619 #ifdef DIAGNOSTIC
620 			if_printf(ifp, "cannot pullup VLAN header\n");
621 #endif
622 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
623 			CURVNET_RESTORE();
624 			return;
625 		}
626 
627 		evl = mtod(m, struct ether_vlan_header *);
628 		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
629 		m->m_flags |= M_VLANTAG;
630 
631 		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
632 		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
633 		m_adj(m, ETHER_VLAN_ENCAP_LEN);
634 		eh = mtod(m, struct ether_header *);
635 	}
636 
637 	M_SETFIB(m, ifp->if_fib);
638 
639 	/* Allow ng_ether(4) to claim this frame. */
640 	if (ifp->if_l2com != NULL) {
641 		KASSERT(ng_ether_input_p != NULL,
642 		    ("%s: ng_ether_input_p is NULL", __func__));
643 		m->m_flags &= ~M_PROMISC;
644 		(*ng_ether_input_p)(ifp, &m);
645 		if (m == NULL) {
646 			CURVNET_RESTORE();
647 			return;
648 		}
649 		eh = mtod(m, struct ether_header *);
650 	}
651 
652 	/*
653 	 * Allow if_bridge(4) to claim this frame.
654 	 *
655 	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
656 	 * and the frame should be delivered locally.
657 	 *
658 	 * If M_BRIDGE_INJECT is set, the packet was received directly by the
659 	 * bridge via netmap, so "ifp" is the bridge itself and the packet
660 	 * should be re-examined.
661 	 */
662 	if (ifp->if_bridge != NULL || (m->m_flags & M_BRIDGE_INJECT) != 0) {
663 		m->m_flags &= ~M_PROMISC;
664 		BRIDGE_INPUT(ifp, m);
665 		if (m == NULL) {
666 			CURVNET_RESTORE();
667 			return;
668 		}
669 		eh = mtod(m, struct ether_header *);
670 	}
671 
672 #if defined(INET) || defined(INET6)
673 	/*
674 	 * Clear M_PROMISC on frame so that carp(4) will see it when the
675 	 * mbuf flows up to Layer 3.
676 	 * FreeBSD's implementation of carp(4) uses the inprotosw
677 	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
678 	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
679 	 * is outside the scope of the M_PROMISC test below.
680 	 * TODO: Maintain a hash table of ethernet addresses other than
681 	 * ether_dhost which may be active on this ifp.
682 	 */
683 	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
684 		m->m_flags &= ~M_PROMISC;
685 	} else
686 #endif
687 	{
688 		/*
689 		 * If the frame received was not for our MAC address, set the
690 		 * M_PROMISC flag on the mbuf chain. The frame may need to
691 		 * be seen by the rest of the Ethernet input path in case of
692 		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
693 		 * seen by upper protocol layers.
694 		 */
695 		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
696 		    memcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
697 			m->m_flags |= M_PROMISC;
698 	}
699 
700 	ether_demux(ifp, m);
701 	CURVNET_RESTORE();
702 }
703 
704 /*
705  * Ethernet input dispatch; by default, direct dispatch here regardless of
706  * global configuration.  However, if RSS is enabled, hook up RSS affinity
707  * so that when deferred or hybrid dispatch is enabled, we can redistribute
708  * load based on RSS.
709  *
710  * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
711  * not it had already done work distribution via multi-queue.  Then we could
712  * direct dispatch in the event load balancing was already complete and
713  * handle the case of interfaces with different capabilities better.
714  *
715  * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
716  * at multiple layers?
717  *
718  * XXXRW: For now, enable all this only if RSS is compiled in, although it
719  * works fine without RSS.  Need to characterise the performance overhead
720  * of the detour through the netisr code in the event the result is always
721  * direct dispatch.
722  */
723 static void
ether_nh_input(struct mbuf * m)724 ether_nh_input(struct mbuf *m)
725 {
726 
727 	M_ASSERTPKTHDR(m);
728 	KASSERT(m->m_pkthdr.rcvif != NULL,
729 	    ("%s: NULL interface pointer", __func__));
730 	ether_input_internal(m->m_pkthdr.rcvif, m);
731 }
732 
733 static struct netisr_handler	ether_nh = {
734 	.nh_name = "ether",
735 	.nh_handler = ether_nh_input,
736 	.nh_proto = NETISR_ETHER,
737 #ifdef RSS
738 	.nh_policy = NETISR_POLICY_CPU,
739 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
740 	.nh_m2cpuid = rss_m2cpuid,
741 #else
742 	.nh_policy = NETISR_POLICY_SOURCE,
743 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
744 #endif
745 };
746 
747 static void
ether_init(__unused void * arg)748 ether_init(__unused void *arg)
749 {
750 
751 	netisr_register(&ether_nh);
752 }
753 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
754 
755 static void
vnet_ether_init(const __unused void * arg)756 vnet_ether_init(const __unused void *arg)
757 {
758 	struct pfil_head_args args;
759 
760 	args.pa_version = PFIL_VERSION;
761 	args.pa_flags = PFIL_IN | PFIL_OUT;
762 	args.pa_type = PFIL_TYPE_ETHERNET;
763 	args.pa_headname = PFIL_ETHER_NAME;
764 	V_link_pfil_head = pfil_head_register(&args);
765 
766 #ifdef VIMAGE
767 	netisr_register_vnet(&ether_nh);
768 #endif
769 }
770 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
771     vnet_ether_init, NULL);
772 
773 #ifdef VIMAGE
774 static void
vnet_ether_pfil_destroy(const __unused void * arg)775 vnet_ether_pfil_destroy(const __unused void *arg)
776 {
777 
778 	pfil_head_unregister(V_link_pfil_head);
779 }
780 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
781     vnet_ether_pfil_destroy, NULL);
782 
783 static void
vnet_ether_destroy(__unused void * arg)784 vnet_ether_destroy(__unused void *arg)
785 {
786 
787 	netisr_unregister_vnet(&ether_nh);
788 }
789 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
790     vnet_ether_destroy, NULL);
791 #endif
792 
793 static void
ether_input(struct ifnet * ifp,struct mbuf * m)794 ether_input(struct ifnet *ifp, struct mbuf *m)
795 {
796 	struct epoch_tracker et;
797 	struct mbuf *mn;
798 	bool needs_epoch;
799 
800 	needs_epoch = (ifp->if_flags & IFF_NEEDSEPOCH);
801 #ifdef INVARIANTS
802 	/*
803 	 * This temporary code is here to prevent epoch unaware and unmarked
804 	 * drivers to panic the system.  Once all drivers are taken care of,
805 	 * the whole INVARIANTS block should go away.
806 	 */
807 	if (!needs_epoch && !in_epoch(net_epoch_preempt)) {
808 		static bool printedonce;
809 
810 		needs_epoch = true;
811 		if (!printedonce) {
812 			printedonce = true;
813 			if_printf(ifp, "called %s w/o net epoch! "
814 			    "PLEASE file a bug report.", __func__);
815 #ifdef KDB
816 			kdb_backtrace();
817 #endif
818 		}
819 	}
820 #endif
821 
822 	/*
823 	 * The drivers are allowed to pass in a chain of packets linked with
824 	 * m_nextpkt. We split them up into separate packets here and pass
825 	 * them up. This allows the drivers to amortize the receive lock.
826 	 */
827 	CURVNET_SET_QUIET(ifp->if_vnet);
828 	if (__predict_false(needs_epoch))
829 		NET_EPOCH_ENTER(et);
830 	while (m) {
831 		mn = m->m_nextpkt;
832 		m->m_nextpkt = NULL;
833 
834 		/*
835 		 * We will rely on rcvif being set properly in the deferred
836 		 * context, so assert it is correct here.
837 		 */
838 		MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
839 		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
840 		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
841 		netisr_dispatch(NETISR_ETHER, m);
842 		m = mn;
843 	}
844 	if (__predict_false(needs_epoch))
845 		NET_EPOCH_EXIT(et);
846 	CURVNET_RESTORE();
847 }
848 
849 /*
850  * Upper layer processing for a received Ethernet packet.
851  */
852 void
ether_demux(struct ifnet * ifp,struct mbuf * m)853 ether_demux(struct ifnet *ifp, struct mbuf *m)
854 {
855 	struct ether_header *eh;
856 	int i, isr;
857 	u_short ether_type;
858 
859 	NET_EPOCH_ASSERT();
860 	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
861 
862 	/* Do not grab PROMISC frames in case we are re-entered. */
863 	if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
864 		i = pfil_mbuf_in(V_link_pfil_head, &m, ifp, NULL);
865 		if (i != PFIL_PASS)
866 			return;
867 	}
868 
869 	eh = mtod(m, struct ether_header *);
870 	ether_type = ntohs(eh->ether_type);
871 
872 	/*
873 	 * If this frame has a VLAN tag other than 0, call vlan_input()
874 	 * if its module is loaded. Otherwise, drop.
875 	 */
876 	if ((m->m_flags & M_VLANTAG) &&
877 	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
878 		if (ifp->if_vlantrunk == NULL) {
879 			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
880 			m_freem(m);
881 			return;
882 		}
883 		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
884 		    __func__));
885 		/* Clear before possibly re-entering ether_input(). */
886 		m->m_flags &= ~M_PROMISC;
887 		(*vlan_input_p)(ifp, m);
888 		return;
889 	}
890 
891 	/*
892 	 * Pass promiscuously received frames to the upper layer if the user
893 	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
894 	 */
895 	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
896 		m_freem(m);
897 		return;
898 	}
899 
900 	/*
901 	 * Reset layer specific mbuf flags to avoid confusing upper layers.
902 	 */
903 	m->m_flags &= ~M_VLANTAG;
904 	m_clrprotoflags(m);
905 
906 	/*
907 	 * Dispatch frame to upper layer.
908 	 */
909 	switch (ether_type) {
910 #ifdef INET
911 	case ETHERTYPE_IP:
912 		isr = NETISR_IP;
913 		break;
914 
915 	case ETHERTYPE_ARP:
916 		if (ifp->if_flags & IFF_NOARP) {
917 			/* Discard packet if ARP is disabled on interface */
918 			m_freem(m);
919 			return;
920 		}
921 		isr = NETISR_ARP;
922 		break;
923 #endif
924 #ifdef INET6
925 	case ETHERTYPE_IPV6:
926 		isr = NETISR_IPV6;
927 		break;
928 #endif
929 	default:
930 		goto discard;
931 	}
932 
933 	/* Strip off Ethernet header. */
934 	m_adj(m, ETHER_HDR_LEN);
935 
936 	netisr_dispatch(isr, m);
937 	return;
938 
939 discard:
940 	/*
941 	 * Packet is to be discarded.  If netgraph is present,
942 	 * hand the packet to it for last chance processing;
943 	 * otherwise dispose of it.
944 	 */
945 	if (ifp->if_l2com != NULL) {
946 		KASSERT(ng_ether_input_orphan_p != NULL,
947 		    ("ng_ether_input_orphan_p is NULL"));
948 		(*ng_ether_input_orphan_p)(ifp, m);
949 		return;
950 	}
951 	m_freem(m);
952 }
953 
954 /*
955  * Convert Ethernet address to printable (loggable) representation.
956  * This routine is for compatibility; it's better to just use
957  *
958  *	printf("%6D", <pointer to address>, ":");
959  *
960  * since there's no static buffer involved.
961  */
962 char *
ether_sprintf(const u_char * ap)963 ether_sprintf(const u_char *ap)
964 {
965 	static char etherbuf[18];
966 	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
967 	return (etherbuf);
968 }
969 
970 /*
971  * Perform common duties while attaching to interface list
972  */
973 void
ether_ifattach(struct ifnet * ifp,const u_int8_t * lla)974 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
975 {
976 	int i;
977 	struct ifaddr *ifa;
978 	struct sockaddr_dl *sdl;
979 
980 	ifp->if_addrlen = ETHER_ADDR_LEN;
981 	ifp->if_hdrlen = (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0 ?
982 	    ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN : ETHER_HDR_LEN;
983 	ifp->if_mtu = ETHERMTU;
984 	if_attach(ifp);
985 	ifp->if_output = ether_output;
986 	ifp->if_input = ether_input;
987 	ifp->if_resolvemulti = ether_resolvemulti;
988 	ifp->if_requestencap = ether_requestencap;
989 	if (ifp->if_baudrate == 0)
990 		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
991 	ifp->if_broadcastaddr = etherbroadcastaddr;
992 
993 	ifa = ifp->if_addr;
994 	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
995 	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
996 	sdl->sdl_type = IFT_ETHER;
997 	sdl->sdl_alen = ifp->if_addrlen;
998 	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
999 
1000 	if (ifp->if_hw_addr != NULL)
1001 		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
1002 
1003 	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1004 
1005 	/* Announce Ethernet MAC address if non-zero. */
1006 	for (i = 0; i < ifp->if_addrlen; i++)
1007 		if (lla[i] != 0)
1008 			break;
1009 	if (i != ifp->if_addrlen)
1010 		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1011 
1012 	uuid_ether_add(LLADDR(sdl));
1013 
1014 	/* Add necessary bits are setup; announce it now. */
1015 	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1016 	if (IS_DEFAULT_VNET(curvnet))
1017 		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1018 }
1019 
1020 /*
1021  * Perform common duties while detaching an Ethernet interface
1022  */
1023 void
ether_ifdetach(struct ifnet * ifp)1024 ether_ifdetach(struct ifnet *ifp)
1025 {
1026 	struct sockaddr_dl *sdl;
1027 
1028 	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1029 	uuid_ether_del(LLADDR(sdl));
1030 
1031 	bpfdetach(ifp);
1032 	if_detach(ifp);
1033 }
1034 
1035 SYSCTL_DECL(_net_link);
1036 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1037     "Ethernet");
1038 
1039 #if 0
1040 /*
1041  * This is for reference.  We have a table-driven version
1042  * of the little-endian crc32 generator, which is faster
1043  * than the double-loop.
1044  */
1045 uint32_t
1046 ether_crc32_le(const uint8_t *buf, size_t len)
1047 {
1048 	size_t i;
1049 	uint32_t crc;
1050 	int bit;
1051 	uint8_t data;
1052 
1053 	crc = 0xffffffff;	/* initial value */
1054 
1055 	for (i = 0; i < len; i++) {
1056 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1057 			carry = (crc ^ data) & 1;
1058 			crc >>= 1;
1059 			if (carry)
1060 				crc = (crc ^ ETHER_CRC_POLY_LE);
1061 		}
1062 	}
1063 
1064 	return (crc);
1065 }
1066 #else
1067 uint32_t
ether_crc32_le(const uint8_t * buf,size_t len)1068 ether_crc32_le(const uint8_t *buf, size_t len)
1069 {
1070 	static const uint32_t crctab[] = {
1071 		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1072 		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1073 		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1074 		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1075 	};
1076 	size_t i;
1077 	uint32_t crc;
1078 
1079 	crc = 0xffffffff;	/* initial value */
1080 
1081 	for (i = 0; i < len; i++) {
1082 		crc ^= buf[i];
1083 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1084 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1085 	}
1086 
1087 	return (crc);
1088 }
1089 #endif
1090 
1091 uint32_t
ether_crc32_be(const uint8_t * buf,size_t len)1092 ether_crc32_be(const uint8_t *buf, size_t len)
1093 {
1094 	size_t i;
1095 	uint32_t crc, carry;
1096 	int bit;
1097 	uint8_t data;
1098 
1099 	crc = 0xffffffff;	/* initial value */
1100 
1101 	for (i = 0; i < len; i++) {
1102 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1103 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1104 			crc <<= 1;
1105 			if (carry)
1106 				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1107 		}
1108 	}
1109 
1110 	return (crc);
1111 }
1112 
1113 int
ether_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1114 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1115 {
1116 	struct ifaddr *ifa = (struct ifaddr *) data;
1117 	struct ifreq *ifr = (struct ifreq *) data;
1118 	int error = 0;
1119 
1120 	switch (command) {
1121 	case SIOCSIFADDR:
1122 		ifp->if_flags |= IFF_UP;
1123 
1124 		switch (ifa->ifa_addr->sa_family) {
1125 #ifdef INET
1126 		case AF_INET:
1127 			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1128 			arp_ifinit(ifp, ifa);
1129 			break;
1130 #endif
1131 		default:
1132 			ifp->if_init(ifp->if_softc);
1133 			break;
1134 		}
1135 		break;
1136 
1137 	case SIOCGIFADDR:
1138 		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1139 		    ETHER_ADDR_LEN);
1140 		break;
1141 
1142 	case SIOCSIFMTU:
1143 		/*
1144 		 * Set the interface MTU.
1145 		 */
1146 		if (ifr->ifr_mtu > ETHERMTU) {
1147 			error = EINVAL;
1148 		} else {
1149 			ifp->if_mtu = ifr->ifr_mtu;
1150 		}
1151 		break;
1152 
1153 	case SIOCSLANPCP:
1154 		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1155 		if (error != 0)
1156 			break;
1157 		if (ifr->ifr_lan_pcp > 7 &&
1158 		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1159 			error = EINVAL;
1160 		} else {
1161 			ifp->if_pcp = ifr->ifr_lan_pcp;
1162 			/* broadcast event about PCP change */
1163 			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1164 		}
1165 		break;
1166 
1167 	case SIOCGLANPCP:
1168 		ifr->ifr_lan_pcp = ifp->if_pcp;
1169 		break;
1170 
1171 	default:
1172 		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1173 		break;
1174 	}
1175 	return (error);
1176 }
1177 
1178 static int
ether_resolvemulti(struct ifnet * ifp,struct sockaddr ** llsa,struct sockaddr * sa)1179 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1180 	struct sockaddr *sa)
1181 {
1182 	struct sockaddr_dl *sdl;
1183 #ifdef INET
1184 	struct sockaddr_in *sin;
1185 #endif
1186 #ifdef INET6
1187 	struct sockaddr_in6 *sin6;
1188 #endif
1189 	u_char *e_addr;
1190 
1191 	switch(sa->sa_family) {
1192 	case AF_LINK:
1193 		/*
1194 		 * No mapping needed. Just check that it's a valid MC address.
1195 		 */
1196 		sdl = (struct sockaddr_dl *)sa;
1197 		e_addr = LLADDR(sdl);
1198 		if (!ETHER_IS_MULTICAST(e_addr))
1199 			return EADDRNOTAVAIL;
1200 		*llsa = NULL;
1201 		return 0;
1202 
1203 #ifdef INET
1204 	case AF_INET:
1205 		sin = (struct sockaddr_in *)sa;
1206 		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1207 			return EADDRNOTAVAIL;
1208 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1209 		sdl->sdl_alen = ETHER_ADDR_LEN;
1210 		e_addr = LLADDR(sdl);
1211 		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1212 		*llsa = (struct sockaddr *)sdl;
1213 		return 0;
1214 #endif
1215 #ifdef INET6
1216 	case AF_INET6:
1217 		sin6 = (struct sockaddr_in6 *)sa;
1218 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1219 			/*
1220 			 * An IP6 address of 0 means listen to all
1221 			 * of the Ethernet multicast address used for IP6.
1222 			 * (This is used for multicast routers.)
1223 			 */
1224 			ifp->if_flags |= IFF_ALLMULTI;
1225 			*llsa = NULL;
1226 			return 0;
1227 		}
1228 		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1229 			return EADDRNOTAVAIL;
1230 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1231 		sdl->sdl_alen = ETHER_ADDR_LEN;
1232 		e_addr = LLADDR(sdl);
1233 		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1234 		*llsa = (struct sockaddr *)sdl;
1235 		return 0;
1236 #endif
1237 
1238 	default:
1239 		/*
1240 		 * Well, the text isn't quite right, but it's the name
1241 		 * that counts...
1242 		 */
1243 		return EAFNOSUPPORT;
1244 	}
1245 }
1246 
1247 static moduledata_t ether_mod = {
1248 	.name = "ether",
1249 };
1250 
1251 void
ether_vlan_mtap(struct bpf_if * bp,struct mbuf * m,void * data,u_int dlen)1252 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1253 {
1254 	struct ether_vlan_header vlan;
1255 	struct mbuf mv, mb;
1256 
1257 	KASSERT((m->m_flags & M_VLANTAG) != 0,
1258 	    ("%s: vlan information not present", __func__));
1259 	KASSERT(m->m_len >= sizeof(struct ether_header),
1260 	    ("%s: mbuf not large enough for header", __func__));
1261 	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1262 	vlan.evl_proto = vlan.evl_encap_proto;
1263 	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1264 	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1265 	m->m_len -= sizeof(struct ether_header);
1266 	m->m_data += sizeof(struct ether_header);
1267 	/*
1268 	 * If a data link has been supplied by the caller, then we will need to
1269 	 * re-create a stack allocated mbuf chain with the following structure:
1270 	 *
1271 	 * (1) mbuf #1 will contain the supplied data link
1272 	 * (2) mbuf #2 will contain the vlan header
1273 	 * (3) mbuf #3 will contain the original mbuf's packet data
1274 	 *
1275 	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1276 	 */
1277 	if (data != NULL) {
1278 		mv.m_next = m;
1279 		mv.m_data = (caddr_t)&vlan;
1280 		mv.m_len = sizeof(vlan);
1281 		mb.m_next = &mv;
1282 		mb.m_data = data;
1283 		mb.m_len = dlen;
1284 		bpf_mtap(bp, &mb);
1285 	} else
1286 		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1287 	m->m_len += sizeof(struct ether_header);
1288 	m->m_data -= sizeof(struct ether_header);
1289 }
1290 
1291 struct mbuf *
ether_vlanencap_proto(struct mbuf * m,uint16_t tag,uint16_t proto)1292 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1293 {
1294 	struct ether_vlan_header *evl;
1295 
1296 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1297 	if (m == NULL)
1298 		return (NULL);
1299 	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1300 
1301 	if (m->m_len < sizeof(*evl)) {
1302 		m = m_pullup(m, sizeof(*evl));
1303 		if (m == NULL)
1304 			return (NULL);
1305 	}
1306 
1307 	/*
1308 	 * Transform the Ethernet header into an Ethernet header
1309 	 * with 802.1Q encapsulation.
1310 	 */
1311 	evl = mtod(m, struct ether_vlan_header *);
1312 	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1313 	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1314 	evl->evl_encap_proto = htons(proto);
1315 	evl->evl_tag = htons(tag);
1316 	return (m);
1317 }
1318 
1319 void
ether_bpf_mtap_if(struct ifnet * ifp,struct mbuf * m)1320 ether_bpf_mtap_if(struct ifnet *ifp, struct mbuf *m)
1321 {
1322 	if (bpf_peers_present(ifp->if_bpf)) {
1323 		M_ASSERTVALID(m);
1324 		if ((m->m_flags & M_VLANTAG) != 0)
1325 			ether_vlan_mtap(ifp->if_bpf, m, NULL, 0);
1326 		else
1327 			bpf_mtap(ifp->if_bpf, m);
1328 	}
1329 }
1330 
1331 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1332     "IEEE 802.1Q VLAN");
1333 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1334     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1335     "for consistency");
1336 
1337 VNET_DEFINE_STATIC(int, soft_pad);
1338 #define	V_soft_pad	VNET(soft_pad)
1339 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1340     &VNET_NAME(soft_pad), 0,
1341     "pad short frames before tagging");
1342 
1343 /*
1344  * For now, make preserving PCP via an mbuf tag optional, as it increases
1345  * per-packet memory allocations and frees.  In the future, it would be
1346  * preferable to reuse ether_vtag for this, or similar.
1347  */
1348 VNET_DEFINE(int, vlan_mtag_pcp) = 0;
1349 #define	V_vlan_mtag_pcp	VNET(vlan_mtag_pcp)
1350 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW | CTLFLAG_VNET,
1351     &VNET_NAME(vlan_mtag_pcp), 0,
1352     "Retain VLAN PCP information as packets are passed up the stack");
1353 
1354 static inline bool
ether_do_pcp(struct ifnet * ifp,struct mbuf * m)1355 ether_do_pcp(struct ifnet *ifp, struct mbuf *m)
1356 {
1357 	if (ifp->if_type == IFT_L2VLAN)
1358 		return (false);
1359 	if (ifp->if_pcp != IFNET_PCP_NONE || (m->m_flags & M_VLANTAG) != 0)
1360 		return (true);
1361 	if (V_vlan_mtag_pcp &&
1362 	    m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL) != NULL)
1363 		return (true);
1364 	return (false);
1365 }
1366 
1367 bool
ether_8021q_frame(struct mbuf ** mp,struct ifnet * ife,struct ifnet * p,const struct ether_8021q_tag * qtag)1368 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1369     const struct ether_8021q_tag *qtag)
1370 {
1371 	struct m_tag *mtag;
1372 	int n;
1373 	uint16_t tag;
1374 	uint8_t pcp = qtag->pcp;
1375 	static const char pad[8];	/* just zeros */
1376 
1377 	/*
1378 	 * Pad the frame to the minimum size allowed if told to.
1379 	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1380 	 * paragraph C.4.4.3.b.  It can help to work around buggy
1381 	 * bridges that violate paragraph C.4.4.3.a from the same
1382 	 * document, i.e., fail to pad short frames after untagging.
1383 	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1384 	 * untagging it will produce a 62-byte frame, which is a runt
1385 	 * and requires padding.  There are VLAN-enabled network
1386 	 * devices that just discard such runts instead or mishandle
1387 	 * them somehow.
1388 	 */
1389 	if (V_soft_pad && p->if_type == IFT_ETHER) {
1390 		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1391 		     n > 0; n -= sizeof(pad)) {
1392 			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1393 				break;
1394 		}
1395 		if (n > 0) {
1396 			m_freem(*mp);
1397 			*mp = NULL;
1398 			if_printf(ife, "cannot pad short frame");
1399 			return (false);
1400 		}
1401 	}
1402 
1403 	/*
1404 	 * If PCP is set in mbuf, use it
1405 	 */
1406 	if ((*mp)->m_flags & M_VLANTAG) {
1407 		pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1408 	}
1409 
1410 	/*
1411 	 * If underlying interface can do VLAN tag insertion itself,
1412 	 * just pass the packet along. However, we need some way to
1413 	 * tell the interface where the packet came from so that it
1414 	 * knows how to find the VLAN tag to use, so we attach a
1415 	 * packet tag that holds it.
1416 	 */
1417 	if (V_vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1418 	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1419 		tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1420 	else
1421 		tag = EVL_MAKETAG(qtag->vid, pcp, 0);
1422 	if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1423 	    (qtag->proto == ETHERTYPE_VLAN)) {
1424 		(*mp)->m_pkthdr.ether_vtag = tag;
1425 		(*mp)->m_flags |= M_VLANTAG;
1426 	} else {
1427 		*mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1428 		if (*mp == NULL) {
1429 			if_printf(ife, "unable to prepend 802.1Q header");
1430 			return (false);
1431 		}
1432 		(*mp)->m_flags &= ~M_VLANTAG;
1433 	}
1434 	return (true);
1435 }
1436 
1437 /*
1438  * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1439  * cryptographic hash function on the containing jail's name, UUID and the
1440  * interface name to attempt to provide a unique but stable address.
1441  * Pseudo-interfaces which require a MAC address should use this function to
1442  * allocate non-locally-administered addresses.
1443  */
1444 void
ether_gen_addr_byname(const char * nameunit,struct ether_addr * hwaddr)1445 ether_gen_addr_byname(const char *nameunit, struct ether_addr *hwaddr)
1446 {
1447 	SHA1_CTX ctx;
1448 	char *buf;
1449 	char uuid[HOSTUUIDLEN + 1];
1450 	uint64_t addr;
1451 	int i, sz;
1452 	unsigned char digest[SHA1_RESULTLEN];
1453 	char jailname[MAXHOSTNAMELEN];
1454 
1455 	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1456 	if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1457 		/* Fall back to a random mac address. */
1458 		goto rando;
1459 	}
1460 
1461 	/* If each (vnet) jail would also have a unique hostuuid this would not
1462 	 * be necessary. */
1463 	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1464 	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, nameunit,
1465 	    jailname);
1466 	if (sz < 0) {
1467 		/* Fall back to a random mac address. */
1468 		goto rando;
1469 	}
1470 
1471 	SHA1Init(&ctx);
1472 	SHA1Update(&ctx, buf, sz);
1473 	SHA1Final(digest, &ctx);
1474 	free(buf, M_TEMP);
1475 
1476 	addr = (digest[0] << 8) | digest[1] | OUI_FREEBSD_GENERATED_LOW;
1477 	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1478 		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1479 		    0xFF;
1480 	}
1481 
1482 	return;
1483 rando:
1484 	arc4rand(hwaddr, sizeof(*hwaddr), 0);
1485 	/* Unicast */
1486 	hwaddr->octet[0] &= 0xFE;
1487 	/* Locally administered. */
1488 	hwaddr->octet[0] |= 0x02;
1489 }
1490 
1491 void
ether_gen_addr(struct ifnet * ifp,struct ether_addr * hwaddr)1492 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1493 {
1494 	ether_gen_addr_byname(if_name(ifp), hwaddr);
1495 }
1496 
1497 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1498 MODULE_VERSION(ether, 1);
1499