xref: /freebsd/sys/net/if_ethersubr.c (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)if_ethersubr.c	8.1 (Berkeley) 6/10/93
32  * $FreeBSD$
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_netgraph.h"
38 #include "opt_mbuf_profiling.h"
39 #include "opt_rss.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/devctl.h>
44 #include <sys/eventhandler.h>
45 #include <sys/jail.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/module.h>
51 #include <sys/msan.h>
52 #include <sys/proc.h>
53 #include <sys/priv.h>
54 #include <sys/random.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/uuid.h>
59 #ifdef KDB
60 #include <sys/kdb.h>
61 #endif
62 
63 #include <net/ieee_oui.h>
64 #include <net/if.h>
65 #include <net/if_var.h>
66 #include <net/if_private.h>
67 #include <net/if_arp.h>
68 #include <net/netisr.h>
69 #include <net/route.h>
70 #include <net/if_llc.h>
71 #include <net/if_dl.h>
72 #include <net/if_types.h>
73 #include <net/bpf.h>
74 #include <net/ethernet.h>
75 #include <net/if_bridgevar.h>
76 #include <net/if_vlan_var.h>
77 #include <net/if_llatbl.h>
78 #include <net/pfil.h>
79 #include <net/rss_config.h>
80 #include <net/vnet.h>
81 
82 #include <netpfil/pf/pf_mtag.h>
83 
84 #if defined(INET) || defined(INET6)
85 #include <netinet/in.h>
86 #include <netinet/in_var.h>
87 #include <netinet/if_ether.h>
88 #include <netinet/ip_carp.h>
89 #include <netinet/ip_var.h>
90 #endif
91 #ifdef INET6
92 #include <netinet6/nd6.h>
93 #endif
94 #include <security/mac/mac_framework.h>
95 
96 #include <crypto/sha1.h>
97 
98 #ifdef CTASSERT
99 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
100 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
101 #endif
102 
103 VNET_DEFINE(pfil_head_t, link_pfil_head);	/* Packet filter hooks */
104 
105 /* netgraph node hooks for ng_ether(4) */
106 void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
107 void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
108 int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
109 void	(*ng_ether_attach_p)(struct ifnet *ifp);
110 void	(*ng_ether_detach_p)(struct ifnet *ifp);
111 
112 void	(*vlan_input_p)(struct ifnet *, struct mbuf *);
113 
114 /* if_bridge(4) support */
115 void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
116 
117 /* if_lagg(4) support */
118 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
119 
120 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
121 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
122 
123 static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
124 		struct sockaddr *);
125 static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
126 
127 #define senderr(e) do { error = (e); goto bad;} while (0)
128 
129 static void
130 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
131 {
132 	int csum_flags = 0;
133 
134 	if (src->m_pkthdr.csum_flags & CSUM_IP)
135 		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
136 	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
137 		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
138 	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
139 		csum_flags |= CSUM_SCTP_VALID;
140 	dst->m_pkthdr.csum_flags |= csum_flags;
141 	if (csum_flags & CSUM_DATA_VALID)
142 		dst->m_pkthdr.csum_data = 0xffff;
143 }
144 
145 /*
146  * Handle link-layer encapsulation requests.
147  */
148 static int
149 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
150 {
151 	struct ether_header *eh;
152 	struct arphdr *ah;
153 	uint16_t etype;
154 	const u_char *lladdr;
155 
156 	if (req->rtype != IFENCAP_LL)
157 		return (EOPNOTSUPP);
158 
159 	if (req->bufsize < ETHER_HDR_LEN)
160 		return (ENOMEM);
161 
162 	eh = (struct ether_header *)req->buf;
163 	lladdr = req->lladdr;
164 	req->lladdr_off = 0;
165 
166 	switch (req->family) {
167 	case AF_INET:
168 		etype = htons(ETHERTYPE_IP);
169 		break;
170 	case AF_INET6:
171 		etype = htons(ETHERTYPE_IPV6);
172 		break;
173 	case AF_ARP:
174 		ah = (struct arphdr *)req->hdata;
175 		ah->ar_hrd = htons(ARPHRD_ETHER);
176 
177 		switch(ntohs(ah->ar_op)) {
178 		case ARPOP_REVREQUEST:
179 		case ARPOP_REVREPLY:
180 			etype = htons(ETHERTYPE_REVARP);
181 			break;
182 		case ARPOP_REQUEST:
183 		case ARPOP_REPLY:
184 		default:
185 			etype = htons(ETHERTYPE_ARP);
186 			break;
187 		}
188 
189 		if (req->flags & IFENCAP_FLAG_BROADCAST)
190 			lladdr = ifp->if_broadcastaddr;
191 		break;
192 	default:
193 		return (EAFNOSUPPORT);
194 	}
195 
196 	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
197 	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
198 	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
199 	req->bufsize = sizeof(struct ether_header);
200 
201 	return (0);
202 }
203 
204 static int
205 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
206 	const struct sockaddr *dst, struct route *ro, u_char *phdr,
207 	uint32_t *pflags, struct llentry **plle)
208 {
209 	uint32_t lleflags = 0;
210 	int error = 0;
211 #if defined(INET) || defined(INET6)
212 	struct ether_header *eh = (struct ether_header *)phdr;
213 	uint16_t etype;
214 #endif
215 
216 	if (plle)
217 		*plle = NULL;
218 
219 	switch (dst->sa_family) {
220 #ifdef INET
221 	case AF_INET:
222 		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
223 			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
224 			    plle);
225 		else {
226 			if (m->m_flags & M_BCAST)
227 				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
228 				    ETHER_ADDR_LEN);
229 			else {
230 				const struct in_addr *a;
231 				a = &(((const struct sockaddr_in *)dst)->sin_addr);
232 				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
233 			}
234 			etype = htons(ETHERTYPE_IP);
235 			memcpy(&eh->ether_type, &etype, sizeof(etype));
236 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
237 		}
238 		break;
239 #endif
240 #ifdef INET6
241 	case AF_INET6:
242 		if ((m->m_flags & M_MCAST) == 0) {
243 			int af = RO_GET_FAMILY(ro, dst);
244 			error = nd6_resolve(ifp, LLE_SF(af, 0), m, dst, phdr,
245 			    &lleflags, plle);
246 		} else {
247 			const struct in6_addr *a6;
248 			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
249 			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
250 			etype = htons(ETHERTYPE_IPV6);
251 			memcpy(&eh->ether_type, &etype, sizeof(etype));
252 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
253 		}
254 		break;
255 #endif
256 	default:
257 		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
258 		if (m != NULL)
259 			m_freem(m);
260 		return (EAFNOSUPPORT);
261 	}
262 
263 	if (error == EHOSTDOWN) {
264 		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
265 			error = EHOSTUNREACH;
266 	}
267 
268 	if (error != 0)
269 		return (error);
270 
271 	*pflags = RT_MAY_LOOP;
272 	if (lleflags & LLE_IFADDR)
273 		*pflags |= RT_L2_ME;
274 
275 	return (0);
276 }
277 
278 /*
279  * Ethernet output routine.
280  * Encapsulate a packet of type family for the local net.
281  * Use trailer local net encapsulation if enough data in first
282  * packet leaves a multiple of 512 bytes of data in remainder.
283  */
284 int
285 ether_output(struct ifnet *ifp, struct mbuf *m,
286 	const struct sockaddr *dst, struct route *ro)
287 {
288 	int error = 0;
289 	char linkhdr[ETHER_HDR_LEN], *phdr;
290 	struct ether_header *eh;
291 	struct pf_mtag *t;
292 	bool loop_copy;
293 	int hlen;	/* link layer header length */
294 	uint32_t pflags;
295 	struct llentry *lle = NULL;
296 	int addref = 0;
297 
298 	phdr = NULL;
299 	pflags = 0;
300 	if (ro != NULL) {
301 		/* XXX BPF uses ro_prepend */
302 		if (ro->ro_prepend != NULL) {
303 			phdr = ro->ro_prepend;
304 			hlen = ro->ro_plen;
305 		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
306 			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
307 				lle = ro->ro_lle;
308 				if (lle != NULL &&
309 				    (lle->la_flags & LLE_VALID) == 0) {
310 					LLE_FREE(lle);
311 					lle = NULL;	/* redundant */
312 					ro->ro_lle = NULL;
313 				}
314 				if (lle == NULL) {
315 					/* if we lookup, keep cache */
316 					addref = 1;
317 				} else
318 					/*
319 					 * Notify LLE code that
320 					 * the entry was used
321 					 * by datapath.
322 					 */
323 					llentry_provide_feedback(lle);
324 			}
325 			if (lle != NULL) {
326 				phdr = lle->r_linkdata;
327 				hlen = lle->r_hdrlen;
328 				pflags = lle->r_flags;
329 			}
330 		}
331 	}
332 
333 #ifdef MAC
334 	error = mac_ifnet_check_transmit(ifp, m);
335 	if (error)
336 		senderr(error);
337 #endif
338 
339 	M_PROFILE(m);
340 	if (ifp->if_flags & IFF_MONITOR)
341 		senderr(ENETDOWN);
342 	if (!((ifp->if_flags & IFF_UP) &&
343 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
344 		senderr(ENETDOWN);
345 
346 	if (phdr == NULL) {
347 		/* No prepend data supplied. Try to calculate ourselves. */
348 		phdr = linkhdr;
349 		hlen = ETHER_HDR_LEN;
350 		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
351 		    addref ? &lle : NULL);
352 		if (addref && lle != NULL)
353 			ro->ro_lle = lle;
354 		if (error != 0)
355 			return (error == EWOULDBLOCK ? 0 : error);
356 	}
357 
358 	if ((pflags & RT_L2_ME) != 0) {
359 		update_mbuf_csumflags(m, m);
360 		return (if_simloop(ifp, m, RO_GET_FAMILY(ro, dst), 0));
361 	}
362 	loop_copy = (pflags & RT_MAY_LOOP) != 0;
363 
364 	/*
365 	 * Add local net header.  If no space in first mbuf,
366 	 * allocate another.
367 	 *
368 	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
369 	 * This is done because BPF code shifts m_data pointer
370 	 * to the end of ethernet header prior to calling if_output().
371 	 */
372 	M_PREPEND(m, hlen, M_NOWAIT);
373 	if (m == NULL)
374 		senderr(ENOBUFS);
375 	if ((pflags & RT_HAS_HEADER) == 0) {
376 		eh = mtod(m, struct ether_header *);
377 		memcpy(eh, phdr, hlen);
378 	}
379 
380 	/*
381 	 * If a simplex interface, and the packet is being sent to our
382 	 * Ethernet address or a broadcast address, loopback a copy.
383 	 * XXX To make a simplex device behave exactly like a duplex
384 	 * device, we should copy in the case of sending to our own
385 	 * ethernet address (thus letting the original actually appear
386 	 * on the wire). However, we don't do that here for security
387 	 * reasons and compatibility with the original behavior.
388 	 */
389 	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
390 	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
391 		struct mbuf *n;
392 
393 		/*
394 		 * Because if_simloop() modifies the packet, we need a
395 		 * writable copy through m_dup() instead of a readonly
396 		 * one as m_copy[m] would give us. The alternative would
397 		 * be to modify if_simloop() to handle the readonly mbuf,
398 		 * but performancewise it is mostly equivalent (trading
399 		 * extra data copying vs. extra locking).
400 		 *
401 		 * XXX This is a local workaround.  A number of less
402 		 * often used kernel parts suffer from the same bug.
403 		 * See PR kern/105943 for a proposed general solution.
404 		 */
405 		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
406 			update_mbuf_csumflags(m, n);
407 			(void)if_simloop(ifp, n, RO_GET_FAMILY(ro, dst), hlen);
408 		} else
409 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
410 	}
411 
412        /*
413 	* Bridges require special output handling.
414 	*/
415 	if (ifp->if_bridge) {
416 		BRIDGE_OUTPUT(ifp, m, error);
417 		return (error);
418 	}
419 
420 #if defined(INET) || defined(INET6)
421 	if (ifp->if_carp &&
422 	    (error = (*carp_output_p)(ifp, m, dst)))
423 		goto bad;
424 #endif
425 
426 	/* Handle ng_ether(4) processing, if any */
427 	if (ifp->if_l2com != NULL) {
428 		KASSERT(ng_ether_output_p != NULL,
429 		    ("ng_ether_output_p is NULL"));
430 		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
431 bad:			if (m != NULL)
432 				m_freem(m);
433 			return (error);
434 		}
435 		if (m == NULL)
436 			return (0);
437 	}
438 
439 	/* Continue with link-layer output */
440 	return ether_output_frame(ifp, m);
441 }
442 
443 static bool
444 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
445 {
446 	struct ether_8021q_tag qtag;
447 	struct ether_header *eh;
448 
449 	eh = mtod(*mp, struct ether_header *);
450 	if (ntohs(eh->ether_type) == ETHERTYPE_VLAN ||
451 	    ntohs(eh->ether_type) == ETHERTYPE_QINQ)
452 		return (true);
453 
454 	qtag.vid = 0;
455 	qtag.pcp = pcp;
456 	qtag.proto = ETHERTYPE_VLAN;
457 	if (ether_8021q_frame(mp, ifp, ifp, &qtag))
458 		return (true);
459 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
460 	return (false);
461 }
462 
463 /*
464  * Ethernet link layer output routine to send a raw frame to the device.
465  *
466  * This assumes that the 14 byte Ethernet header is present and contiguous
467  * in the first mbuf (if BRIDGE'ing).
468  */
469 int
470 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
471 {
472 	uint8_t pcp;
473 
474 	pcp = ifp->if_pcp;
475 	if (pcp != IFNET_PCP_NONE && ifp->if_type != IFT_L2VLAN &&
476 	    !ether_set_pcp(&m, ifp, pcp))
477 		return (0);
478 
479 	if (PFIL_HOOKED_OUT(V_link_pfil_head))
480 		switch (pfil_mbuf_out(V_link_pfil_head, &m, ifp, NULL)) {
481 		case PFIL_DROPPED:
482 			return (EACCES);
483 		case PFIL_CONSUMED:
484 			return (0);
485 		}
486 
487 #ifdef EXPERIMENTAL
488 #if defined(INET6) && defined(INET)
489 	/* draft-ietf-6man-ipv6only-flag */
490 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
491 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
492 		struct ether_header *eh;
493 
494 		eh = mtod(m, struct ether_header *);
495 		switch (ntohs(eh->ether_type)) {
496 		case ETHERTYPE_IP:
497 		case ETHERTYPE_ARP:
498 		case ETHERTYPE_REVARP:
499 			m_freem(m);
500 			return (EAFNOSUPPORT);
501 			/* NOTREACHED */
502 			break;
503 		};
504 	}
505 #endif
506 #endif
507 
508 	/*
509 	 * Queue message on interface, update output statistics if successful,
510 	 * and start output if interface not yet active.
511 	 *
512 	 * If KMSAN is enabled, use it to verify that the data does not contain
513 	 * any uninitialized bytes.
514 	 */
515 	kmsan_check_mbuf(m, "ether_output");
516 	return ((ifp->if_transmit)(ifp, m));
517 }
518 
519 /*
520  * Process a received Ethernet packet; the packet is in the
521  * mbuf chain m with the ethernet header at the front.
522  */
523 static void
524 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
525 {
526 	struct ether_header *eh;
527 	u_short etype;
528 
529 	if ((ifp->if_flags & IFF_UP) == 0) {
530 		m_freem(m);
531 		return;
532 	}
533 #ifdef DIAGNOSTIC
534 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
535 		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
536 		m_freem(m);
537 		return;
538 	}
539 #endif
540 	if (m->m_len < ETHER_HDR_LEN) {
541 		/* XXX maybe should pullup? */
542 		if_printf(ifp, "discard frame w/o leading ethernet "
543 				"header (len %u pkt len %u)\n",
544 				m->m_len, m->m_pkthdr.len);
545 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
546 		m_freem(m);
547 		return;
548 	}
549 	eh = mtod(m, struct ether_header *);
550 	etype = ntohs(eh->ether_type);
551 	random_harvest_queue_ether(m, sizeof(*m));
552 
553 #ifdef EXPERIMENTAL
554 #if defined(INET6) && defined(INET)
555 	/* draft-ietf-6man-ipv6only-flag */
556 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
557 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
558 		switch (etype) {
559 		case ETHERTYPE_IP:
560 		case ETHERTYPE_ARP:
561 		case ETHERTYPE_REVARP:
562 			m_freem(m);
563 			return;
564 			/* NOTREACHED */
565 			break;
566 		};
567 	}
568 #endif
569 #endif
570 
571 	CURVNET_SET_QUIET(ifp->if_vnet);
572 
573 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
574 		if (ETHER_IS_BROADCAST(eh->ether_dhost))
575 			m->m_flags |= M_BCAST;
576 		else
577 			m->m_flags |= M_MCAST;
578 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
579 	}
580 
581 #ifdef MAC
582 	/*
583 	 * Tag the mbuf with an appropriate MAC label before any other
584 	 * consumers can get to it.
585 	 */
586 	mac_ifnet_create_mbuf(ifp, m);
587 #endif
588 
589 	/*
590 	 * Give bpf a chance at the packet.
591 	 */
592 	ETHER_BPF_MTAP(ifp, m);
593 
594 	/*
595 	 * If the CRC is still on the packet, trim it off. We do this once
596 	 * and once only in case we are re-entered. Nothing else on the
597 	 * Ethernet receive path expects to see the FCS.
598 	 */
599 	if (m->m_flags & M_HASFCS) {
600 		m_adj(m, -ETHER_CRC_LEN);
601 		m->m_flags &= ~M_HASFCS;
602 	}
603 
604 	if (!(ifp->if_capenable & IFCAP_HWSTATS))
605 		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
606 
607 	/* Allow monitor mode to claim this frame, after stats are updated. */
608 	if (ifp->if_flags & IFF_MONITOR) {
609 		m_freem(m);
610 		CURVNET_RESTORE();
611 		return;
612 	}
613 
614 	/* Handle input from a lagg(4) port */
615 	if (ifp->if_type == IFT_IEEE8023ADLAG) {
616 		KASSERT(lagg_input_ethernet_p != NULL,
617 		    ("%s: if_lagg not loaded!", __func__));
618 		m = (*lagg_input_ethernet_p)(ifp, m);
619 		if (m != NULL)
620 			ifp = m->m_pkthdr.rcvif;
621 		else {
622 			CURVNET_RESTORE();
623 			return;
624 		}
625 	}
626 
627 	/*
628 	 * If the hardware did not process an 802.1Q tag, do this now,
629 	 * to allow 802.1P priority frames to be passed to the main input
630 	 * path correctly.
631 	 */
632 	if ((m->m_flags & M_VLANTAG) == 0 &&
633 	    ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
634 		struct ether_vlan_header *evl;
635 
636 		if (m->m_len < sizeof(*evl) &&
637 		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
638 #ifdef DIAGNOSTIC
639 			if_printf(ifp, "cannot pullup VLAN header\n");
640 #endif
641 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
642 			CURVNET_RESTORE();
643 			return;
644 		}
645 
646 		evl = mtod(m, struct ether_vlan_header *);
647 		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
648 		m->m_flags |= M_VLANTAG;
649 
650 		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
651 		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
652 		m_adj(m, ETHER_VLAN_ENCAP_LEN);
653 		eh = mtod(m, struct ether_header *);
654 	}
655 
656 	M_SETFIB(m, ifp->if_fib);
657 
658 	/* Allow ng_ether(4) to claim this frame. */
659 	if (ifp->if_l2com != NULL) {
660 		KASSERT(ng_ether_input_p != NULL,
661 		    ("%s: ng_ether_input_p is NULL", __func__));
662 		m->m_flags &= ~M_PROMISC;
663 		(*ng_ether_input_p)(ifp, &m);
664 		if (m == NULL) {
665 			CURVNET_RESTORE();
666 			return;
667 		}
668 		eh = mtod(m, struct ether_header *);
669 	}
670 
671 	/*
672 	 * Allow if_bridge(4) to claim this frame.
673 	 *
674 	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
675 	 * and the frame should be delivered locally.
676 	 *
677 	 * If M_BRIDGE_INJECT is set, the packet was received directly by the
678 	 * bridge via netmap, so "ifp" is the bridge itself and the packet
679 	 * should be re-examined.
680 	 */
681 	if (ifp->if_bridge != NULL || (m->m_flags & M_BRIDGE_INJECT) != 0) {
682 		m->m_flags &= ~M_PROMISC;
683 		BRIDGE_INPUT(ifp, m);
684 		if (m == NULL) {
685 			CURVNET_RESTORE();
686 			return;
687 		}
688 		eh = mtod(m, struct ether_header *);
689 	}
690 
691 #if defined(INET) || defined(INET6)
692 	/*
693 	 * Clear M_PROMISC on frame so that carp(4) will see it when the
694 	 * mbuf flows up to Layer 3.
695 	 * FreeBSD's implementation of carp(4) uses the inprotosw
696 	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
697 	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
698 	 * is outside the scope of the M_PROMISC test below.
699 	 * TODO: Maintain a hash table of ethernet addresses other than
700 	 * ether_dhost which may be active on this ifp.
701 	 */
702 	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
703 		m->m_flags &= ~M_PROMISC;
704 	} else
705 #endif
706 	{
707 		/*
708 		 * If the frame received was not for our MAC address, set the
709 		 * M_PROMISC flag on the mbuf chain. The frame may need to
710 		 * be seen by the rest of the Ethernet input path in case of
711 		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
712 		 * seen by upper protocol layers.
713 		 */
714 		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
715 		    bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
716 			m->m_flags |= M_PROMISC;
717 	}
718 
719 	ether_demux(ifp, m);
720 	CURVNET_RESTORE();
721 }
722 
723 /*
724  * Ethernet input dispatch; by default, direct dispatch here regardless of
725  * global configuration.  However, if RSS is enabled, hook up RSS affinity
726  * so that when deferred or hybrid dispatch is enabled, we can redistribute
727  * load based on RSS.
728  *
729  * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
730  * not it had already done work distribution via multi-queue.  Then we could
731  * direct dispatch in the event load balancing was already complete and
732  * handle the case of interfaces with different capabilities better.
733  *
734  * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
735  * at multiple layers?
736  *
737  * XXXRW: For now, enable all this only if RSS is compiled in, although it
738  * works fine without RSS.  Need to characterise the performance overhead
739  * of the detour through the netisr code in the event the result is always
740  * direct dispatch.
741  */
742 static void
743 ether_nh_input(struct mbuf *m)
744 {
745 
746 	M_ASSERTPKTHDR(m);
747 	KASSERT(m->m_pkthdr.rcvif != NULL,
748 	    ("%s: NULL interface pointer", __func__));
749 	ether_input_internal(m->m_pkthdr.rcvif, m);
750 }
751 
752 static struct netisr_handler	ether_nh = {
753 	.nh_name = "ether",
754 	.nh_handler = ether_nh_input,
755 	.nh_proto = NETISR_ETHER,
756 #ifdef RSS
757 	.nh_policy = NETISR_POLICY_CPU,
758 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
759 	.nh_m2cpuid = rss_m2cpuid,
760 #else
761 	.nh_policy = NETISR_POLICY_SOURCE,
762 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
763 #endif
764 };
765 
766 static void
767 ether_init(__unused void *arg)
768 {
769 
770 	netisr_register(&ether_nh);
771 }
772 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
773 
774 static void
775 vnet_ether_init(__unused void *arg)
776 {
777 	struct pfil_head_args args;
778 
779 	args.pa_version = PFIL_VERSION;
780 	args.pa_flags = PFIL_IN | PFIL_OUT;
781 	args.pa_type = PFIL_TYPE_ETHERNET;
782 	args.pa_headname = PFIL_ETHER_NAME;
783 	V_link_pfil_head = pfil_head_register(&args);
784 
785 #ifdef VIMAGE
786 	netisr_register_vnet(&ether_nh);
787 #endif
788 }
789 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
790     vnet_ether_init, NULL);
791 
792 #ifdef VIMAGE
793 static void
794 vnet_ether_pfil_destroy(__unused void *arg)
795 {
796 
797 	pfil_head_unregister(V_link_pfil_head);
798 }
799 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
800     vnet_ether_pfil_destroy, NULL);
801 
802 static void
803 vnet_ether_destroy(__unused void *arg)
804 {
805 
806 	netisr_unregister_vnet(&ether_nh);
807 }
808 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
809     vnet_ether_destroy, NULL);
810 #endif
811 
812 static void
813 ether_input(struct ifnet *ifp, struct mbuf *m)
814 {
815 	struct epoch_tracker et;
816 	struct mbuf *mn;
817 	bool needs_epoch;
818 
819 	needs_epoch = (ifp->if_flags & IFF_NEEDSEPOCH);
820 #ifdef INVARIANTS
821 	/*
822 	 * This temporary code is here to prevent epoch unaware and unmarked
823 	 * drivers to panic the system.  Once all drivers are taken care of,
824 	 * the whole INVARIANTS block should go away.
825 	 */
826 	if (!needs_epoch && !in_epoch(net_epoch_preempt)) {
827 		static bool printedonce;
828 
829 		needs_epoch = true;
830 		if (!printedonce) {
831 			printedonce = true;
832 			if_printf(ifp, "called %s w/o net epoch! "
833 			    "PLEASE file a bug report.", __func__);
834 #ifdef KDB
835 			kdb_backtrace();
836 #endif
837 		}
838 	}
839 #endif
840 
841 	/*
842 	 * The drivers are allowed to pass in a chain of packets linked with
843 	 * m_nextpkt. We split them up into separate packets here and pass
844 	 * them up. This allows the drivers to amortize the receive lock.
845 	 */
846 	CURVNET_SET_QUIET(ifp->if_vnet);
847 	if (__predict_false(needs_epoch))
848 		NET_EPOCH_ENTER(et);
849 	while (m) {
850 		mn = m->m_nextpkt;
851 		m->m_nextpkt = NULL;
852 
853 		/*
854 		 * We will rely on rcvif being set properly in the deferred
855 		 * context, so assert it is correct here.
856 		 */
857 		MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
858 		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
859 		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
860 		netisr_dispatch(NETISR_ETHER, m);
861 		m = mn;
862 	}
863 	if (__predict_false(needs_epoch))
864 		NET_EPOCH_EXIT(et);
865 	CURVNET_RESTORE();
866 }
867 
868 /*
869  * Upper layer processing for a received Ethernet packet.
870  */
871 void
872 ether_demux(struct ifnet *ifp, struct mbuf *m)
873 {
874 	struct ether_header *eh;
875 	int i, isr;
876 	u_short ether_type;
877 
878 	NET_EPOCH_ASSERT();
879 	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
880 
881 	/* Do not grab PROMISC frames in case we are re-entered. */
882 	if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
883 		i = pfil_mbuf_in(V_link_pfil_head, &m, ifp, NULL);
884 		if (i != 0 || m == NULL)
885 			return;
886 	}
887 
888 	eh = mtod(m, struct ether_header *);
889 	ether_type = ntohs(eh->ether_type);
890 
891 	/*
892 	 * If this frame has a VLAN tag other than 0, call vlan_input()
893 	 * if its module is loaded. Otherwise, drop.
894 	 */
895 	if ((m->m_flags & M_VLANTAG) &&
896 	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
897 		if (ifp->if_vlantrunk == NULL) {
898 			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
899 			m_freem(m);
900 			return;
901 		}
902 		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
903 		    __func__));
904 		/* Clear before possibly re-entering ether_input(). */
905 		m->m_flags &= ~M_PROMISC;
906 		(*vlan_input_p)(ifp, m);
907 		return;
908 	}
909 
910 	/*
911 	 * Pass promiscuously received frames to the upper layer if the user
912 	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
913 	 */
914 	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
915 		m_freem(m);
916 		return;
917 	}
918 
919 	/*
920 	 * Reset layer specific mbuf flags to avoid confusing upper layers.
921 	 */
922 	m->m_flags &= ~M_VLANTAG;
923 	m_clrprotoflags(m);
924 
925 	/*
926 	 * Dispatch frame to upper layer.
927 	 */
928 	switch (ether_type) {
929 #ifdef INET
930 	case ETHERTYPE_IP:
931 		isr = NETISR_IP;
932 		break;
933 
934 	case ETHERTYPE_ARP:
935 		if (ifp->if_flags & IFF_NOARP) {
936 			/* Discard packet if ARP is disabled on interface */
937 			m_freem(m);
938 			return;
939 		}
940 		isr = NETISR_ARP;
941 		break;
942 #endif
943 #ifdef INET6
944 	case ETHERTYPE_IPV6:
945 		isr = NETISR_IPV6;
946 		break;
947 #endif
948 	default:
949 		goto discard;
950 	}
951 
952 	/* Strip off Ethernet header. */
953 	m_adj(m, ETHER_HDR_LEN);
954 
955 	netisr_dispatch(isr, m);
956 	return;
957 
958 discard:
959 	/*
960 	 * Packet is to be discarded.  If netgraph is present,
961 	 * hand the packet to it for last chance processing;
962 	 * otherwise dispose of it.
963 	 */
964 	if (ifp->if_l2com != NULL) {
965 		KASSERT(ng_ether_input_orphan_p != NULL,
966 		    ("ng_ether_input_orphan_p is NULL"));
967 		(*ng_ether_input_orphan_p)(ifp, m);
968 		return;
969 	}
970 	m_freem(m);
971 }
972 
973 /*
974  * Convert Ethernet address to printable (loggable) representation.
975  * This routine is for compatibility; it's better to just use
976  *
977  *	printf("%6D", <pointer to address>, ":");
978  *
979  * since there's no static buffer involved.
980  */
981 char *
982 ether_sprintf(const u_char *ap)
983 {
984 	static char etherbuf[18];
985 	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
986 	return (etherbuf);
987 }
988 
989 /*
990  * Perform common duties while attaching to interface list
991  */
992 void
993 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
994 {
995 	int i;
996 	struct ifaddr *ifa;
997 	struct sockaddr_dl *sdl;
998 
999 	ifp->if_addrlen = ETHER_ADDR_LEN;
1000 	ifp->if_hdrlen = ETHER_HDR_LEN;
1001 	ifp->if_mtu = ETHERMTU;
1002 	if_attach(ifp);
1003 	ifp->if_output = ether_output;
1004 	ifp->if_input = ether_input;
1005 	ifp->if_resolvemulti = ether_resolvemulti;
1006 	ifp->if_requestencap = ether_requestencap;
1007 #ifdef VIMAGE
1008 	ifp->if_reassign = ether_reassign;
1009 #endif
1010 	if (ifp->if_baudrate == 0)
1011 		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
1012 	ifp->if_broadcastaddr = etherbroadcastaddr;
1013 
1014 	ifa = ifp->if_addr;
1015 	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
1016 	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1017 	sdl->sdl_type = IFT_ETHER;
1018 	sdl->sdl_alen = ifp->if_addrlen;
1019 	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
1020 
1021 	if (ifp->if_hw_addr != NULL)
1022 		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
1023 
1024 	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1025 	if (ng_ether_attach_p != NULL)
1026 		(*ng_ether_attach_p)(ifp);
1027 
1028 	/* Announce Ethernet MAC address if non-zero. */
1029 	for (i = 0; i < ifp->if_addrlen; i++)
1030 		if (lla[i] != 0)
1031 			break;
1032 	if (i != ifp->if_addrlen)
1033 		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1034 
1035 	uuid_ether_add(LLADDR(sdl));
1036 
1037 	/* Add necessary bits are setup; announce it now. */
1038 	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1039 	if (IS_DEFAULT_VNET(curvnet))
1040 		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1041 }
1042 
1043 /*
1044  * Perform common duties while detaching an Ethernet interface
1045  */
1046 void
1047 ether_ifdetach(struct ifnet *ifp)
1048 {
1049 	struct sockaddr_dl *sdl;
1050 
1051 	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1052 	uuid_ether_del(LLADDR(sdl));
1053 
1054 	if (ifp->if_l2com != NULL) {
1055 		KASSERT(ng_ether_detach_p != NULL,
1056 		    ("ng_ether_detach_p is NULL"));
1057 		(*ng_ether_detach_p)(ifp);
1058 	}
1059 
1060 	bpfdetach(ifp);
1061 	if_detach(ifp);
1062 }
1063 
1064 #ifdef VIMAGE
1065 void
1066 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1067 {
1068 
1069 	if (ifp->if_l2com != NULL) {
1070 		KASSERT(ng_ether_detach_p != NULL,
1071 		    ("ng_ether_detach_p is NULL"));
1072 		(*ng_ether_detach_p)(ifp);
1073 	}
1074 
1075 	if (ng_ether_attach_p != NULL) {
1076 		CURVNET_SET_QUIET(new_vnet);
1077 		(*ng_ether_attach_p)(ifp);
1078 		CURVNET_RESTORE();
1079 	}
1080 }
1081 #endif
1082 
1083 SYSCTL_DECL(_net_link);
1084 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1085     "Ethernet");
1086 
1087 #if 0
1088 /*
1089  * This is for reference.  We have a table-driven version
1090  * of the little-endian crc32 generator, which is faster
1091  * than the double-loop.
1092  */
1093 uint32_t
1094 ether_crc32_le(const uint8_t *buf, size_t len)
1095 {
1096 	size_t i;
1097 	uint32_t crc;
1098 	int bit;
1099 	uint8_t data;
1100 
1101 	crc = 0xffffffff;	/* initial value */
1102 
1103 	for (i = 0; i < len; i++) {
1104 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1105 			carry = (crc ^ data) & 1;
1106 			crc >>= 1;
1107 			if (carry)
1108 				crc = (crc ^ ETHER_CRC_POLY_LE);
1109 		}
1110 	}
1111 
1112 	return (crc);
1113 }
1114 #else
1115 uint32_t
1116 ether_crc32_le(const uint8_t *buf, size_t len)
1117 {
1118 	static const uint32_t crctab[] = {
1119 		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1120 		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1121 		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1122 		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1123 	};
1124 	size_t i;
1125 	uint32_t crc;
1126 
1127 	crc = 0xffffffff;	/* initial value */
1128 
1129 	for (i = 0; i < len; i++) {
1130 		crc ^= buf[i];
1131 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1132 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1133 	}
1134 
1135 	return (crc);
1136 }
1137 #endif
1138 
1139 uint32_t
1140 ether_crc32_be(const uint8_t *buf, size_t len)
1141 {
1142 	size_t i;
1143 	uint32_t crc, carry;
1144 	int bit;
1145 	uint8_t data;
1146 
1147 	crc = 0xffffffff;	/* initial value */
1148 
1149 	for (i = 0; i < len; i++) {
1150 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1151 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1152 			crc <<= 1;
1153 			if (carry)
1154 				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1155 		}
1156 	}
1157 
1158 	return (crc);
1159 }
1160 
1161 int
1162 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1163 {
1164 	struct ifaddr *ifa = (struct ifaddr *) data;
1165 	struct ifreq *ifr = (struct ifreq *) data;
1166 	int error = 0;
1167 
1168 	switch (command) {
1169 	case SIOCSIFADDR:
1170 		ifp->if_flags |= IFF_UP;
1171 
1172 		switch (ifa->ifa_addr->sa_family) {
1173 #ifdef INET
1174 		case AF_INET:
1175 			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1176 			arp_ifinit(ifp, ifa);
1177 			break;
1178 #endif
1179 		default:
1180 			ifp->if_init(ifp->if_softc);
1181 			break;
1182 		}
1183 		break;
1184 
1185 	case SIOCGIFADDR:
1186 		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1187 		    ETHER_ADDR_LEN);
1188 		break;
1189 
1190 	case SIOCSIFMTU:
1191 		/*
1192 		 * Set the interface MTU.
1193 		 */
1194 		if (ifr->ifr_mtu > ETHERMTU) {
1195 			error = EINVAL;
1196 		} else {
1197 			ifp->if_mtu = ifr->ifr_mtu;
1198 		}
1199 		break;
1200 
1201 	case SIOCSLANPCP:
1202 		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1203 		if (error != 0)
1204 			break;
1205 		if (ifr->ifr_lan_pcp > 7 &&
1206 		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1207 			error = EINVAL;
1208 		} else {
1209 			ifp->if_pcp = ifr->ifr_lan_pcp;
1210 			/* broadcast event about PCP change */
1211 			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1212 		}
1213 		break;
1214 
1215 	case SIOCGLANPCP:
1216 		ifr->ifr_lan_pcp = ifp->if_pcp;
1217 		break;
1218 
1219 	default:
1220 		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1221 		break;
1222 	}
1223 	return (error);
1224 }
1225 
1226 static int
1227 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1228 	struct sockaddr *sa)
1229 {
1230 	struct sockaddr_dl *sdl;
1231 #ifdef INET
1232 	struct sockaddr_in *sin;
1233 #endif
1234 #ifdef INET6
1235 	struct sockaddr_in6 *sin6;
1236 #endif
1237 	u_char *e_addr;
1238 
1239 	switch(sa->sa_family) {
1240 	case AF_LINK:
1241 		/*
1242 		 * No mapping needed. Just check that it's a valid MC address.
1243 		 */
1244 		sdl = (struct sockaddr_dl *)sa;
1245 		e_addr = LLADDR(sdl);
1246 		if (!ETHER_IS_MULTICAST(e_addr))
1247 			return EADDRNOTAVAIL;
1248 		*llsa = NULL;
1249 		return 0;
1250 
1251 #ifdef INET
1252 	case AF_INET:
1253 		sin = (struct sockaddr_in *)sa;
1254 		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1255 			return EADDRNOTAVAIL;
1256 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1257 		sdl->sdl_alen = ETHER_ADDR_LEN;
1258 		e_addr = LLADDR(sdl);
1259 		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1260 		*llsa = (struct sockaddr *)sdl;
1261 		return 0;
1262 #endif
1263 #ifdef INET6
1264 	case AF_INET6:
1265 		sin6 = (struct sockaddr_in6 *)sa;
1266 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1267 			/*
1268 			 * An IP6 address of 0 means listen to all
1269 			 * of the Ethernet multicast address used for IP6.
1270 			 * (This is used for multicast routers.)
1271 			 */
1272 			ifp->if_flags |= IFF_ALLMULTI;
1273 			*llsa = NULL;
1274 			return 0;
1275 		}
1276 		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1277 			return EADDRNOTAVAIL;
1278 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1279 		sdl->sdl_alen = ETHER_ADDR_LEN;
1280 		e_addr = LLADDR(sdl);
1281 		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1282 		*llsa = (struct sockaddr *)sdl;
1283 		return 0;
1284 #endif
1285 
1286 	default:
1287 		/*
1288 		 * Well, the text isn't quite right, but it's the name
1289 		 * that counts...
1290 		 */
1291 		return EAFNOSUPPORT;
1292 	}
1293 }
1294 
1295 static moduledata_t ether_mod = {
1296 	.name = "ether",
1297 };
1298 
1299 void
1300 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1301 {
1302 	struct ether_vlan_header vlan;
1303 	struct mbuf mv, mb;
1304 
1305 	KASSERT((m->m_flags & M_VLANTAG) != 0,
1306 	    ("%s: vlan information not present", __func__));
1307 	KASSERT(m->m_len >= sizeof(struct ether_header),
1308 	    ("%s: mbuf not large enough for header", __func__));
1309 	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1310 	vlan.evl_proto = vlan.evl_encap_proto;
1311 	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1312 	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1313 	m->m_len -= sizeof(struct ether_header);
1314 	m->m_data += sizeof(struct ether_header);
1315 	/*
1316 	 * If a data link has been supplied by the caller, then we will need to
1317 	 * re-create a stack allocated mbuf chain with the following structure:
1318 	 *
1319 	 * (1) mbuf #1 will contain the supplied data link
1320 	 * (2) mbuf #2 will contain the vlan header
1321 	 * (3) mbuf #3 will contain the original mbuf's packet data
1322 	 *
1323 	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1324 	 */
1325 	if (data != NULL) {
1326 		mv.m_next = m;
1327 		mv.m_data = (caddr_t)&vlan;
1328 		mv.m_len = sizeof(vlan);
1329 		mb.m_next = &mv;
1330 		mb.m_data = data;
1331 		mb.m_len = dlen;
1332 		bpf_mtap(bp, &mb);
1333 	} else
1334 		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1335 	m->m_len += sizeof(struct ether_header);
1336 	m->m_data -= sizeof(struct ether_header);
1337 }
1338 
1339 struct mbuf *
1340 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1341 {
1342 	struct ether_vlan_header *evl;
1343 
1344 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1345 	if (m == NULL)
1346 		return (NULL);
1347 	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1348 
1349 	if (m->m_len < sizeof(*evl)) {
1350 		m = m_pullup(m, sizeof(*evl));
1351 		if (m == NULL)
1352 			return (NULL);
1353 	}
1354 
1355 	/*
1356 	 * Transform the Ethernet header into an Ethernet header
1357 	 * with 802.1Q encapsulation.
1358 	 */
1359 	evl = mtod(m, struct ether_vlan_header *);
1360 	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1361 	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1362 	evl->evl_encap_proto = htons(proto);
1363 	evl->evl_tag = htons(tag);
1364 	return (m);
1365 }
1366 
1367 void
1368 ether_bpf_mtap_if(struct ifnet *ifp, struct mbuf *m)
1369 {
1370 	if (bpf_peers_present(ifp->if_bpf)) {
1371 		M_ASSERTVALID(m);
1372 		if ((m->m_flags & M_VLANTAG) != 0)
1373 			ether_vlan_mtap(ifp->if_bpf, m, NULL, 0);
1374 		else
1375 			bpf_mtap(ifp->if_bpf, m);
1376 	}
1377 }
1378 
1379 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1380     "IEEE 802.1Q VLAN");
1381 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1382     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1383     "for consistency");
1384 
1385 VNET_DEFINE_STATIC(int, soft_pad);
1386 #define	V_soft_pad	VNET(soft_pad)
1387 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1388     &VNET_NAME(soft_pad), 0,
1389     "pad short frames before tagging");
1390 
1391 /*
1392  * For now, make preserving PCP via an mbuf tag optional, as it increases
1393  * per-packet memory allocations and frees.  In the future, it would be
1394  * preferable to reuse ether_vtag for this, or similar.
1395  */
1396 VNET_DEFINE(int, vlan_mtag_pcp) = 0;
1397 #define	V_vlan_mtag_pcp	VNET(vlan_mtag_pcp)
1398 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW | CTLFLAG_VNET,
1399     &VNET_NAME(vlan_mtag_pcp), 0,
1400     "Retain VLAN PCP information as packets are passed up the stack");
1401 
1402 bool
1403 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1404     struct ether_8021q_tag *qtag)
1405 {
1406 	struct m_tag *mtag;
1407 	int n;
1408 	uint16_t tag;
1409 	static const char pad[8];	/* just zeros */
1410 
1411 	/*
1412 	 * Pad the frame to the minimum size allowed if told to.
1413 	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1414 	 * paragraph C.4.4.3.b.  It can help to work around buggy
1415 	 * bridges that violate paragraph C.4.4.3.a from the same
1416 	 * document, i.e., fail to pad short frames after untagging.
1417 	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1418 	 * untagging it will produce a 62-byte frame, which is a runt
1419 	 * and requires padding.  There are VLAN-enabled network
1420 	 * devices that just discard such runts instead or mishandle
1421 	 * them somehow.
1422 	 */
1423 	if (V_soft_pad && p->if_type == IFT_ETHER) {
1424 		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1425 		     n > 0; n -= sizeof(pad)) {
1426 			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1427 				break;
1428 		}
1429 		if (n > 0) {
1430 			m_freem(*mp);
1431 			*mp = NULL;
1432 			if_printf(ife, "cannot pad short frame");
1433 			return (false);
1434 		}
1435 	}
1436 
1437 	/*
1438 	 * If PCP is set in mbuf, use it
1439 	 */
1440 	if ((*mp)->m_flags & M_VLANTAG) {
1441 		qtag->pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1442 	}
1443 
1444 	/*
1445 	 * If underlying interface can do VLAN tag insertion itself,
1446 	 * just pass the packet along. However, we need some way to
1447 	 * tell the interface where the packet came from so that it
1448 	 * knows how to find the VLAN tag to use, so we attach a
1449 	 * packet tag that holds it.
1450 	 */
1451 	if (V_vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1452 	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1453 		tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1454 	else
1455 		tag = EVL_MAKETAG(qtag->vid, qtag->pcp, 0);
1456 	if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1457 	    (qtag->proto == ETHERTYPE_VLAN)) {
1458 		(*mp)->m_pkthdr.ether_vtag = tag;
1459 		(*mp)->m_flags |= M_VLANTAG;
1460 	} else {
1461 		*mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1462 		if (*mp == NULL) {
1463 			if_printf(ife, "unable to prepend 802.1Q header");
1464 			return (false);
1465 		}
1466 	}
1467 	return (true);
1468 }
1469 
1470 /*
1471  * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1472  * cryptographic hash function on the containing jail's name, UUID and the
1473  * interface name to attempt to provide a unique but stable address.
1474  * Pseudo-interfaces which require a MAC address should use this function to
1475  * allocate non-locally-administered addresses.
1476  */
1477 void
1478 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1479 {
1480 	SHA1_CTX ctx;
1481 	char *buf;
1482 	char uuid[HOSTUUIDLEN + 1];
1483 	uint64_t addr;
1484 	int i, sz;
1485 	char digest[SHA1_RESULTLEN];
1486 	char jailname[MAXHOSTNAMELEN];
1487 
1488 	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1489 	if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1490 		/* Fall back to a random mac address. */
1491 		goto rando;
1492 	}
1493 
1494 	/* If each (vnet) jail would also have a unique hostuuid this would not
1495 	 * be necessary. */
1496 	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1497 	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, if_name(ifp),
1498 	    jailname);
1499 	if (sz < 0) {
1500 		/* Fall back to a random mac address. */
1501 		goto rando;
1502 	}
1503 
1504 	SHA1Init(&ctx);
1505 	SHA1Update(&ctx, buf, sz);
1506 	SHA1Final(digest, &ctx);
1507 	free(buf, M_TEMP);
1508 
1509 	addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1510 	    OUI_FREEBSD_GENERATED_MASK;
1511 	addr = OUI_FREEBSD(addr);
1512 	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1513 		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1514 		    0xFF;
1515 	}
1516 
1517 	return;
1518 rando:
1519 	arc4rand(hwaddr, sizeof(*hwaddr), 0);
1520 	/* Unicast */
1521 	hwaddr->octet[0] &= 0xFE;
1522 	/* Locally administered. */
1523 	hwaddr->octet[0] |= 0x02;
1524 }
1525 
1526 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1527 MODULE_VERSION(ether, 1);
1528