xref: /freebsd/sys/net/if_ethersubr.c (revision 045c8f526484cb3b97f5fd693987f4376fa43c5f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)if_ethersubr.c	8.1 (Berkeley) 6/10/93
32  * $FreeBSD$
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_netgraph.h"
38 #include "opt_mbuf_profiling.h"
39 #include "opt_rss.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/devctl.h>
44 #include <sys/eventhandler.h>
45 #include <sys/jail.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/module.h>
51 #include <sys/msan.h>
52 #include <sys/proc.h>
53 #include <sys/priv.h>
54 #include <sys/random.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/uuid.h>
59 
60 #include <net/ieee_oui.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/netisr.h>
65 #include <net/route.h>
66 #include <net/if_llc.h>
67 #include <net/if_dl.h>
68 #include <net/if_types.h>
69 #include <net/bpf.h>
70 #include <net/ethernet.h>
71 #include <net/if_bridgevar.h>
72 #include <net/if_vlan_var.h>
73 #include <net/if_llatbl.h>
74 #include <net/pfil.h>
75 #include <net/rss_config.h>
76 #include <net/vnet.h>
77 
78 #include <netpfil/pf/pf_mtag.h>
79 
80 #if defined(INET) || defined(INET6)
81 #include <netinet/in.h>
82 #include <netinet/in_var.h>
83 #include <netinet/if_ether.h>
84 #include <netinet/ip_carp.h>
85 #include <netinet/ip_var.h>
86 #endif
87 #ifdef INET6
88 #include <netinet6/nd6.h>
89 #endif
90 #include <security/mac/mac_framework.h>
91 
92 #include <crypto/sha1.h>
93 
94 #ifdef CTASSERT
95 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
96 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
97 #endif
98 
99 VNET_DEFINE(pfil_head_t, link_pfil_head);	/* Packet filter hooks */
100 
101 /* netgraph node hooks for ng_ether(4) */
102 void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
103 void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
104 int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
105 void	(*ng_ether_attach_p)(struct ifnet *ifp);
106 void	(*ng_ether_detach_p)(struct ifnet *ifp);
107 
108 void	(*vlan_input_p)(struct ifnet *, struct mbuf *);
109 
110 /* if_bridge(4) support */
111 void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
112 
113 /* if_lagg(4) support */
114 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
115 
116 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
117 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
118 
119 static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
120 		struct sockaddr *);
121 static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
122 
123 #define senderr(e) do { error = (e); goto bad;} while (0)
124 
125 static void
126 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
127 {
128 	int csum_flags = 0;
129 
130 	if (src->m_pkthdr.csum_flags & CSUM_IP)
131 		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
132 	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
133 		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
134 	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
135 		csum_flags |= CSUM_SCTP_VALID;
136 	dst->m_pkthdr.csum_flags |= csum_flags;
137 	if (csum_flags & CSUM_DATA_VALID)
138 		dst->m_pkthdr.csum_data = 0xffff;
139 }
140 
141 /*
142  * Handle link-layer encapsulation requests.
143  */
144 static int
145 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
146 {
147 	struct ether_header *eh;
148 	struct arphdr *ah;
149 	uint16_t etype;
150 	const u_char *lladdr;
151 
152 	if (req->rtype != IFENCAP_LL)
153 		return (EOPNOTSUPP);
154 
155 	if (req->bufsize < ETHER_HDR_LEN)
156 		return (ENOMEM);
157 
158 	eh = (struct ether_header *)req->buf;
159 	lladdr = req->lladdr;
160 	req->lladdr_off = 0;
161 
162 	switch (req->family) {
163 	case AF_INET:
164 		etype = htons(ETHERTYPE_IP);
165 		break;
166 	case AF_INET6:
167 		etype = htons(ETHERTYPE_IPV6);
168 		break;
169 	case AF_ARP:
170 		ah = (struct arphdr *)req->hdata;
171 		ah->ar_hrd = htons(ARPHRD_ETHER);
172 
173 		switch(ntohs(ah->ar_op)) {
174 		case ARPOP_REVREQUEST:
175 		case ARPOP_REVREPLY:
176 			etype = htons(ETHERTYPE_REVARP);
177 			break;
178 		case ARPOP_REQUEST:
179 		case ARPOP_REPLY:
180 		default:
181 			etype = htons(ETHERTYPE_ARP);
182 			break;
183 		}
184 
185 		if (req->flags & IFENCAP_FLAG_BROADCAST)
186 			lladdr = ifp->if_broadcastaddr;
187 		break;
188 	default:
189 		return (EAFNOSUPPORT);
190 	}
191 
192 	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
193 	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
194 	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
195 	req->bufsize = sizeof(struct ether_header);
196 
197 	return (0);
198 }
199 
200 static int
201 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
202 	const struct sockaddr *dst, struct route *ro, u_char *phdr,
203 	uint32_t *pflags, struct llentry **plle)
204 {
205 	struct ether_header *eh;
206 	uint32_t lleflags = 0;
207 	int error = 0;
208 #if defined(INET) || defined(INET6)
209 	uint16_t etype;
210 #endif
211 
212 	if (plle)
213 		*plle = NULL;
214 	eh = (struct ether_header *)phdr;
215 
216 	switch (dst->sa_family) {
217 #ifdef INET
218 	case AF_INET:
219 		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
220 			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
221 			    plle);
222 		else {
223 			if (m->m_flags & M_BCAST)
224 				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
225 				    ETHER_ADDR_LEN);
226 			else {
227 				const struct in_addr *a;
228 				a = &(((const struct sockaddr_in *)dst)->sin_addr);
229 				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
230 			}
231 			etype = htons(ETHERTYPE_IP);
232 			memcpy(&eh->ether_type, &etype, sizeof(etype));
233 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
234 		}
235 		break;
236 #endif
237 #ifdef INET6
238 	case AF_INET6:
239 		if ((m->m_flags & M_MCAST) == 0) {
240 			int af = RO_GET_FAMILY(ro, dst);
241 			error = nd6_resolve(ifp, LLE_SF(af, 0), m, dst, phdr,
242 			    &lleflags, plle);
243 		} else {
244 			const struct in6_addr *a6;
245 			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
246 			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
247 			etype = htons(ETHERTYPE_IPV6);
248 			memcpy(&eh->ether_type, &etype, sizeof(etype));
249 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
250 		}
251 		break;
252 #endif
253 	default:
254 		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
255 		if (m != NULL)
256 			m_freem(m);
257 		return (EAFNOSUPPORT);
258 	}
259 
260 	if (error == EHOSTDOWN) {
261 		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
262 			error = EHOSTUNREACH;
263 	}
264 
265 	if (error != 0)
266 		return (error);
267 
268 	*pflags = RT_MAY_LOOP;
269 	if (lleflags & LLE_IFADDR)
270 		*pflags |= RT_L2_ME;
271 
272 	return (0);
273 }
274 
275 /*
276  * Ethernet output routine.
277  * Encapsulate a packet of type family for the local net.
278  * Use trailer local net encapsulation if enough data in first
279  * packet leaves a multiple of 512 bytes of data in remainder.
280  */
281 int
282 ether_output(struct ifnet *ifp, struct mbuf *m,
283 	const struct sockaddr *dst, struct route *ro)
284 {
285 	int error = 0;
286 	char linkhdr[ETHER_HDR_LEN], *phdr;
287 	struct ether_header *eh;
288 	struct pf_mtag *t;
289 	bool loop_copy;
290 	int hlen;	/* link layer header length */
291 	uint32_t pflags;
292 	struct llentry *lle = NULL;
293 	int addref = 0;
294 
295 	phdr = NULL;
296 	pflags = 0;
297 	if (ro != NULL) {
298 		/* XXX BPF uses ro_prepend */
299 		if (ro->ro_prepend != NULL) {
300 			phdr = ro->ro_prepend;
301 			hlen = ro->ro_plen;
302 		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
303 			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
304 				lle = ro->ro_lle;
305 				if (lle != NULL &&
306 				    (lle->la_flags & LLE_VALID) == 0) {
307 					LLE_FREE(lle);
308 					lle = NULL;	/* redundant */
309 					ro->ro_lle = NULL;
310 				}
311 				if (lle == NULL) {
312 					/* if we lookup, keep cache */
313 					addref = 1;
314 				} else
315 					/*
316 					 * Notify LLE code that
317 					 * the entry was used
318 					 * by datapath.
319 					 */
320 					llentry_provide_feedback(lle);
321 			}
322 			if (lle != NULL) {
323 				phdr = lle->r_linkdata;
324 				hlen = lle->r_hdrlen;
325 				pflags = lle->r_flags;
326 			}
327 		}
328 	}
329 
330 #ifdef MAC
331 	error = mac_ifnet_check_transmit(ifp, m);
332 	if (error)
333 		senderr(error);
334 #endif
335 
336 	M_PROFILE(m);
337 	if (ifp->if_flags & IFF_MONITOR)
338 		senderr(ENETDOWN);
339 	if (!((ifp->if_flags & IFF_UP) &&
340 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
341 		senderr(ENETDOWN);
342 
343 	if (phdr == NULL) {
344 		/* No prepend data supplied. Try to calculate ourselves. */
345 		phdr = linkhdr;
346 		hlen = ETHER_HDR_LEN;
347 		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
348 		    addref ? &lle : NULL);
349 		if (addref && lle != NULL)
350 			ro->ro_lle = lle;
351 		if (error != 0)
352 			return (error == EWOULDBLOCK ? 0 : error);
353 	}
354 
355 	if ((pflags & RT_L2_ME) != 0) {
356 		update_mbuf_csumflags(m, m);
357 		return (if_simloop(ifp, m, RO_GET_FAMILY(ro, dst), 0));
358 	}
359 	loop_copy = (pflags & RT_MAY_LOOP) != 0;
360 
361 	/*
362 	 * Add local net header.  If no space in first mbuf,
363 	 * allocate another.
364 	 *
365 	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
366 	 * This is done because BPF code shifts m_data pointer
367 	 * to the end of ethernet header prior to calling if_output().
368 	 */
369 	M_PREPEND(m, hlen, M_NOWAIT);
370 	if (m == NULL)
371 		senderr(ENOBUFS);
372 	if ((pflags & RT_HAS_HEADER) == 0) {
373 		eh = mtod(m, struct ether_header *);
374 		memcpy(eh, phdr, hlen);
375 	}
376 
377 	/*
378 	 * If a simplex interface, and the packet is being sent to our
379 	 * Ethernet address or a broadcast address, loopback a copy.
380 	 * XXX To make a simplex device behave exactly like a duplex
381 	 * device, we should copy in the case of sending to our own
382 	 * ethernet address (thus letting the original actually appear
383 	 * on the wire). However, we don't do that here for security
384 	 * reasons and compatibility with the original behavior.
385 	 */
386 	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
387 	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
388 		struct mbuf *n;
389 
390 		/*
391 		 * Because if_simloop() modifies the packet, we need a
392 		 * writable copy through m_dup() instead of a readonly
393 		 * one as m_copy[m] would give us. The alternative would
394 		 * be to modify if_simloop() to handle the readonly mbuf,
395 		 * but performancewise it is mostly equivalent (trading
396 		 * extra data copying vs. extra locking).
397 		 *
398 		 * XXX This is a local workaround.  A number of less
399 		 * often used kernel parts suffer from the same bug.
400 		 * See PR kern/105943 for a proposed general solution.
401 		 */
402 		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
403 			update_mbuf_csumflags(m, n);
404 			(void)if_simloop(ifp, n, RO_GET_FAMILY(ro, dst), hlen);
405 		} else
406 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
407 	}
408 
409        /*
410 	* Bridges require special output handling.
411 	*/
412 	if (ifp->if_bridge) {
413 		BRIDGE_OUTPUT(ifp, m, error);
414 		return (error);
415 	}
416 
417 #if defined(INET) || defined(INET6)
418 	if (ifp->if_carp &&
419 	    (error = (*carp_output_p)(ifp, m, dst)))
420 		goto bad;
421 #endif
422 
423 	/* Handle ng_ether(4) processing, if any */
424 	if (ifp->if_l2com != NULL) {
425 		KASSERT(ng_ether_output_p != NULL,
426 		    ("ng_ether_output_p is NULL"));
427 		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
428 bad:			if (m != NULL)
429 				m_freem(m);
430 			return (error);
431 		}
432 		if (m == NULL)
433 			return (0);
434 	}
435 
436 	/* Continue with link-layer output */
437 	return ether_output_frame(ifp, m);
438 }
439 
440 static bool
441 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
442 {
443 	struct ether_8021q_tag qtag;
444 	struct ether_header *eh;
445 
446 	eh = mtod(*mp, struct ether_header *);
447 	if (ntohs(eh->ether_type) == ETHERTYPE_VLAN ||
448 	    ntohs(eh->ether_type) == ETHERTYPE_QINQ)
449 		return (true);
450 
451 	qtag.vid = 0;
452 	qtag.pcp = pcp;
453 	qtag.proto = ETHERTYPE_VLAN;
454 	if (ether_8021q_frame(mp, ifp, ifp, &qtag))
455 		return (true);
456 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
457 	return (false);
458 }
459 
460 /*
461  * Ethernet link layer output routine to send a raw frame to the device.
462  *
463  * This assumes that the 14 byte Ethernet header is present and contiguous
464  * in the first mbuf (if BRIDGE'ing).
465  */
466 int
467 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
468 {
469 	uint8_t pcp;
470 
471 	pcp = ifp->if_pcp;
472 	if (pcp != IFNET_PCP_NONE && ifp->if_type != IFT_L2VLAN &&
473 	    !ether_set_pcp(&m, ifp, pcp))
474 		return (0);
475 
476 	if (PFIL_HOOKED_OUT(V_link_pfil_head))
477 		switch (pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_OUT,
478 		    NULL)) {
479 		case PFIL_DROPPED:
480 			return (EACCES);
481 		case PFIL_CONSUMED:
482 			return (0);
483 		}
484 
485 #ifdef EXPERIMENTAL
486 #if defined(INET6) && defined(INET)
487 	/* draft-ietf-6man-ipv6only-flag */
488 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
489 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
490 		struct ether_header *eh;
491 
492 		eh = mtod(m, struct ether_header *);
493 		switch (ntohs(eh->ether_type)) {
494 		case ETHERTYPE_IP:
495 		case ETHERTYPE_ARP:
496 		case ETHERTYPE_REVARP:
497 			m_freem(m);
498 			return (EAFNOSUPPORT);
499 			/* NOTREACHED */
500 			break;
501 		};
502 	}
503 #endif
504 #endif
505 
506 	/*
507 	 * Queue message on interface, update output statistics if successful,
508 	 * and start output if interface not yet active.
509 	 *
510 	 * If KMSAN is enabled, use it to verify that the data does not contain
511 	 * any uninitialized bytes.
512 	 */
513 	kmsan_check_mbuf(m, "ether_output");
514 	return ((ifp->if_transmit)(ifp, m));
515 }
516 
517 /*
518  * Process a received Ethernet packet; the packet is in the
519  * mbuf chain m with the ethernet header at the front.
520  */
521 static void
522 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
523 {
524 	struct ether_header *eh;
525 	u_short etype;
526 
527 	if ((ifp->if_flags & IFF_UP) == 0) {
528 		m_freem(m);
529 		return;
530 	}
531 #ifdef DIAGNOSTIC
532 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
533 		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
534 		m_freem(m);
535 		return;
536 	}
537 #endif
538 	if (m->m_len < ETHER_HDR_LEN) {
539 		/* XXX maybe should pullup? */
540 		if_printf(ifp, "discard frame w/o leading ethernet "
541 				"header (len %u pkt len %u)\n",
542 				m->m_len, m->m_pkthdr.len);
543 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
544 		m_freem(m);
545 		return;
546 	}
547 	eh = mtod(m, struct ether_header *);
548 	etype = ntohs(eh->ether_type);
549 	random_harvest_queue_ether(m, sizeof(*m));
550 
551 #ifdef EXPERIMENTAL
552 #if defined(INET6) && defined(INET)
553 	/* draft-ietf-6man-ipv6only-flag */
554 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
555 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
556 		switch (etype) {
557 		case ETHERTYPE_IP:
558 		case ETHERTYPE_ARP:
559 		case ETHERTYPE_REVARP:
560 			m_freem(m);
561 			return;
562 			/* NOTREACHED */
563 			break;
564 		};
565 	}
566 #endif
567 #endif
568 
569 	CURVNET_SET_QUIET(ifp->if_vnet);
570 
571 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
572 		if (ETHER_IS_BROADCAST(eh->ether_dhost))
573 			m->m_flags |= M_BCAST;
574 		else
575 			m->m_flags |= M_MCAST;
576 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
577 	}
578 
579 #ifdef MAC
580 	/*
581 	 * Tag the mbuf with an appropriate MAC label before any other
582 	 * consumers can get to it.
583 	 */
584 	mac_ifnet_create_mbuf(ifp, m);
585 #endif
586 
587 	/*
588 	 * Give bpf a chance at the packet.
589 	 */
590 	ETHER_BPF_MTAP(ifp, m);
591 
592 	/*
593 	 * If the CRC is still on the packet, trim it off. We do this once
594 	 * and once only in case we are re-entered. Nothing else on the
595 	 * Ethernet receive path expects to see the FCS.
596 	 */
597 	if (m->m_flags & M_HASFCS) {
598 		m_adj(m, -ETHER_CRC_LEN);
599 		m->m_flags &= ~M_HASFCS;
600 	}
601 
602 	if (!(ifp->if_capenable & IFCAP_HWSTATS))
603 		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
604 
605 	/* Allow monitor mode to claim this frame, after stats are updated. */
606 	if (ifp->if_flags & IFF_MONITOR) {
607 		m_freem(m);
608 		CURVNET_RESTORE();
609 		return;
610 	}
611 
612 	/* Handle input from a lagg(4) port */
613 	if (ifp->if_type == IFT_IEEE8023ADLAG) {
614 		KASSERT(lagg_input_ethernet_p != NULL,
615 		    ("%s: if_lagg not loaded!", __func__));
616 		m = (*lagg_input_ethernet_p)(ifp, m);
617 		if (m != NULL)
618 			ifp = m->m_pkthdr.rcvif;
619 		else {
620 			CURVNET_RESTORE();
621 			return;
622 		}
623 	}
624 
625 	/*
626 	 * If the hardware did not process an 802.1Q tag, do this now,
627 	 * to allow 802.1P priority frames to be passed to the main input
628 	 * path correctly.
629 	 */
630 	if ((m->m_flags & M_VLANTAG) == 0 &&
631 	    ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
632 		struct ether_vlan_header *evl;
633 
634 		if (m->m_len < sizeof(*evl) &&
635 		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
636 #ifdef DIAGNOSTIC
637 			if_printf(ifp, "cannot pullup VLAN header\n");
638 #endif
639 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
640 			CURVNET_RESTORE();
641 			return;
642 		}
643 
644 		evl = mtod(m, struct ether_vlan_header *);
645 		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
646 		m->m_flags |= M_VLANTAG;
647 
648 		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
649 		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
650 		m_adj(m, ETHER_VLAN_ENCAP_LEN);
651 		eh = mtod(m, struct ether_header *);
652 	}
653 
654 	M_SETFIB(m, ifp->if_fib);
655 
656 	/* Allow ng_ether(4) to claim this frame. */
657 	if (ifp->if_l2com != NULL) {
658 		KASSERT(ng_ether_input_p != NULL,
659 		    ("%s: ng_ether_input_p is NULL", __func__));
660 		m->m_flags &= ~M_PROMISC;
661 		(*ng_ether_input_p)(ifp, &m);
662 		if (m == NULL) {
663 			CURVNET_RESTORE();
664 			return;
665 		}
666 		eh = mtod(m, struct ether_header *);
667 	}
668 
669 	/*
670 	 * Allow if_bridge(4) to claim this frame.
671 	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
672 	 * and the frame should be delivered locally.
673 	 */
674 	if (ifp->if_bridge != NULL) {
675 		m->m_flags &= ~M_PROMISC;
676 		BRIDGE_INPUT(ifp, m);
677 		if (m == NULL) {
678 			CURVNET_RESTORE();
679 			return;
680 		}
681 		eh = mtod(m, struct ether_header *);
682 	}
683 
684 #if defined(INET) || defined(INET6)
685 	/*
686 	 * Clear M_PROMISC on frame so that carp(4) will see it when the
687 	 * mbuf flows up to Layer 3.
688 	 * FreeBSD's implementation of carp(4) uses the inprotosw
689 	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
690 	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
691 	 * is outside the scope of the M_PROMISC test below.
692 	 * TODO: Maintain a hash table of ethernet addresses other than
693 	 * ether_dhost which may be active on this ifp.
694 	 */
695 	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
696 		m->m_flags &= ~M_PROMISC;
697 	} else
698 #endif
699 	{
700 		/*
701 		 * If the frame received was not for our MAC address, set the
702 		 * M_PROMISC flag on the mbuf chain. The frame may need to
703 		 * be seen by the rest of the Ethernet input path in case of
704 		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
705 		 * seen by upper protocol layers.
706 		 */
707 		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
708 		    bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
709 			m->m_flags |= M_PROMISC;
710 	}
711 
712 	ether_demux(ifp, m);
713 	CURVNET_RESTORE();
714 }
715 
716 /*
717  * Ethernet input dispatch; by default, direct dispatch here regardless of
718  * global configuration.  However, if RSS is enabled, hook up RSS affinity
719  * so that when deferred or hybrid dispatch is enabled, we can redistribute
720  * load based on RSS.
721  *
722  * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
723  * not it had already done work distribution via multi-queue.  Then we could
724  * direct dispatch in the event load balancing was already complete and
725  * handle the case of interfaces with different capabilities better.
726  *
727  * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
728  * at multiple layers?
729  *
730  * XXXRW: For now, enable all this only if RSS is compiled in, although it
731  * works fine without RSS.  Need to characterise the performance overhead
732  * of the detour through the netisr code in the event the result is always
733  * direct dispatch.
734  */
735 static void
736 ether_nh_input(struct mbuf *m)
737 {
738 
739 	M_ASSERTPKTHDR(m);
740 	KASSERT(m->m_pkthdr.rcvif != NULL,
741 	    ("%s: NULL interface pointer", __func__));
742 	ether_input_internal(m->m_pkthdr.rcvif, m);
743 }
744 
745 static struct netisr_handler	ether_nh = {
746 	.nh_name = "ether",
747 	.nh_handler = ether_nh_input,
748 	.nh_proto = NETISR_ETHER,
749 #ifdef RSS
750 	.nh_policy = NETISR_POLICY_CPU,
751 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
752 	.nh_m2cpuid = rss_m2cpuid,
753 #else
754 	.nh_policy = NETISR_POLICY_SOURCE,
755 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
756 #endif
757 };
758 
759 static void
760 ether_init(__unused void *arg)
761 {
762 
763 	netisr_register(&ether_nh);
764 }
765 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
766 
767 static void
768 vnet_ether_init(__unused void *arg)
769 {
770 	struct pfil_head_args args;
771 
772 	args.pa_version = PFIL_VERSION;
773 	args.pa_flags = PFIL_IN | PFIL_OUT;
774 	args.pa_type = PFIL_TYPE_ETHERNET;
775 	args.pa_headname = PFIL_ETHER_NAME;
776 	V_link_pfil_head = pfil_head_register(&args);
777 
778 #ifdef VIMAGE
779 	netisr_register_vnet(&ether_nh);
780 #endif
781 }
782 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
783     vnet_ether_init, NULL);
784 
785 #ifdef VIMAGE
786 static void
787 vnet_ether_pfil_destroy(__unused void *arg)
788 {
789 
790 	pfil_head_unregister(V_link_pfil_head);
791 }
792 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
793     vnet_ether_pfil_destroy, NULL);
794 
795 static void
796 vnet_ether_destroy(__unused void *arg)
797 {
798 
799 	netisr_unregister_vnet(&ether_nh);
800 }
801 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
802     vnet_ether_destroy, NULL);
803 #endif
804 
805 static void
806 ether_input(struct ifnet *ifp, struct mbuf *m)
807 {
808 	struct epoch_tracker et;
809 	struct mbuf *mn;
810 	bool needs_epoch;
811 
812 	needs_epoch = !(ifp->if_flags & IFF_KNOWSEPOCH);
813 
814 	/*
815 	 * The drivers are allowed to pass in a chain of packets linked with
816 	 * m_nextpkt. We split them up into separate packets here and pass
817 	 * them up. This allows the drivers to amortize the receive lock.
818 	 */
819 	CURVNET_SET_QUIET(ifp->if_vnet);
820 	if (__predict_false(needs_epoch))
821 		NET_EPOCH_ENTER(et);
822 	while (m) {
823 		mn = m->m_nextpkt;
824 		m->m_nextpkt = NULL;
825 
826 		/*
827 		 * We will rely on rcvif being set properly in the deferred
828 		 * context, so assert it is correct here.
829 		 */
830 		MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
831 		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
832 		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
833 		netisr_dispatch(NETISR_ETHER, m);
834 		m = mn;
835 	}
836 	if (__predict_false(needs_epoch))
837 		NET_EPOCH_EXIT(et);
838 	CURVNET_RESTORE();
839 }
840 
841 /*
842  * Upper layer processing for a received Ethernet packet.
843  */
844 void
845 ether_demux(struct ifnet *ifp, struct mbuf *m)
846 {
847 	struct ether_header *eh;
848 	int i, isr;
849 	u_short ether_type;
850 
851 	NET_EPOCH_ASSERT();
852 	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
853 
854 	/* Do not grab PROMISC frames in case we are re-entered. */
855 	if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
856 		i = pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_IN, NULL);
857 		if (i != 0 || m == NULL)
858 			return;
859 	}
860 
861 	eh = mtod(m, struct ether_header *);
862 	ether_type = ntohs(eh->ether_type);
863 
864 	/*
865 	 * If this frame has a VLAN tag other than 0, call vlan_input()
866 	 * if its module is loaded. Otherwise, drop.
867 	 */
868 	if ((m->m_flags & M_VLANTAG) &&
869 	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
870 		if (ifp->if_vlantrunk == NULL) {
871 			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
872 			m_freem(m);
873 			return;
874 		}
875 		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
876 		    __func__));
877 		/* Clear before possibly re-entering ether_input(). */
878 		m->m_flags &= ~M_PROMISC;
879 		(*vlan_input_p)(ifp, m);
880 		return;
881 	}
882 
883 	/*
884 	 * Pass promiscuously received frames to the upper layer if the user
885 	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
886 	 */
887 	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
888 		m_freem(m);
889 		return;
890 	}
891 
892 	/*
893 	 * Reset layer specific mbuf flags to avoid confusing upper layers.
894 	 * Strip off Ethernet header.
895 	 */
896 	m->m_flags &= ~M_VLANTAG;
897 	m_clrprotoflags(m);
898 	m_adj(m, ETHER_HDR_LEN);
899 
900 	/*
901 	 * Dispatch frame to upper layer.
902 	 */
903 	switch (ether_type) {
904 #ifdef INET
905 	case ETHERTYPE_IP:
906 		isr = NETISR_IP;
907 		break;
908 
909 	case ETHERTYPE_ARP:
910 		if (ifp->if_flags & IFF_NOARP) {
911 			/* Discard packet if ARP is disabled on interface */
912 			m_freem(m);
913 			return;
914 		}
915 		isr = NETISR_ARP;
916 		break;
917 #endif
918 #ifdef INET6
919 	case ETHERTYPE_IPV6:
920 		isr = NETISR_IPV6;
921 		break;
922 #endif
923 	default:
924 		goto discard;
925 	}
926 	netisr_dispatch(isr, m);
927 	return;
928 
929 discard:
930 	/*
931 	 * Packet is to be discarded.  If netgraph is present,
932 	 * hand the packet to it for last chance processing;
933 	 * otherwise dispose of it.
934 	 */
935 	if (ifp->if_l2com != NULL) {
936 		KASSERT(ng_ether_input_orphan_p != NULL,
937 		    ("ng_ether_input_orphan_p is NULL"));
938 		/*
939 		 * Put back the ethernet header so netgraph has a
940 		 * consistent view of inbound packets.
941 		 */
942 		M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
943 		(*ng_ether_input_orphan_p)(ifp, m);
944 		return;
945 	}
946 	m_freem(m);
947 }
948 
949 /*
950  * Convert Ethernet address to printable (loggable) representation.
951  * This routine is for compatibility; it's better to just use
952  *
953  *	printf("%6D", <pointer to address>, ":");
954  *
955  * since there's no static buffer involved.
956  */
957 char *
958 ether_sprintf(const u_char *ap)
959 {
960 	static char etherbuf[18];
961 	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
962 	return (etherbuf);
963 }
964 
965 /*
966  * Perform common duties while attaching to interface list
967  */
968 void
969 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
970 {
971 	int i;
972 	struct ifaddr *ifa;
973 	struct sockaddr_dl *sdl;
974 
975 	ifp->if_addrlen = ETHER_ADDR_LEN;
976 	ifp->if_hdrlen = ETHER_HDR_LEN;
977 	ifp->if_mtu = ETHERMTU;
978 	if_attach(ifp);
979 	ifp->if_output = ether_output;
980 	ifp->if_input = ether_input;
981 	ifp->if_resolvemulti = ether_resolvemulti;
982 	ifp->if_requestencap = ether_requestencap;
983 #ifdef VIMAGE
984 	ifp->if_reassign = ether_reassign;
985 #endif
986 	if (ifp->if_baudrate == 0)
987 		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
988 	ifp->if_broadcastaddr = etherbroadcastaddr;
989 
990 	ifa = ifp->if_addr;
991 	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
992 	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
993 	sdl->sdl_type = IFT_ETHER;
994 	sdl->sdl_alen = ifp->if_addrlen;
995 	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
996 
997 	if (ifp->if_hw_addr != NULL)
998 		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
999 
1000 	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1001 	if (ng_ether_attach_p != NULL)
1002 		(*ng_ether_attach_p)(ifp);
1003 
1004 	/* Announce Ethernet MAC address if non-zero. */
1005 	for (i = 0; i < ifp->if_addrlen; i++)
1006 		if (lla[i] != 0)
1007 			break;
1008 	if (i != ifp->if_addrlen)
1009 		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1010 
1011 	uuid_ether_add(LLADDR(sdl));
1012 
1013 	/* Add necessary bits are setup; announce it now. */
1014 	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1015 	if (IS_DEFAULT_VNET(curvnet))
1016 		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1017 }
1018 
1019 /*
1020  * Perform common duties while detaching an Ethernet interface
1021  */
1022 void
1023 ether_ifdetach(struct ifnet *ifp)
1024 {
1025 	struct sockaddr_dl *sdl;
1026 
1027 	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1028 	uuid_ether_del(LLADDR(sdl));
1029 
1030 	if (ifp->if_l2com != NULL) {
1031 		KASSERT(ng_ether_detach_p != NULL,
1032 		    ("ng_ether_detach_p is NULL"));
1033 		(*ng_ether_detach_p)(ifp);
1034 	}
1035 
1036 	bpfdetach(ifp);
1037 	if_detach(ifp);
1038 }
1039 
1040 #ifdef VIMAGE
1041 void
1042 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1043 {
1044 
1045 	if (ifp->if_l2com != NULL) {
1046 		KASSERT(ng_ether_detach_p != NULL,
1047 		    ("ng_ether_detach_p is NULL"));
1048 		(*ng_ether_detach_p)(ifp);
1049 	}
1050 
1051 	if (ng_ether_attach_p != NULL) {
1052 		CURVNET_SET_QUIET(new_vnet);
1053 		(*ng_ether_attach_p)(ifp);
1054 		CURVNET_RESTORE();
1055 	}
1056 }
1057 #endif
1058 
1059 SYSCTL_DECL(_net_link);
1060 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1061     "Ethernet");
1062 
1063 #if 0
1064 /*
1065  * This is for reference.  We have a table-driven version
1066  * of the little-endian crc32 generator, which is faster
1067  * than the double-loop.
1068  */
1069 uint32_t
1070 ether_crc32_le(const uint8_t *buf, size_t len)
1071 {
1072 	size_t i;
1073 	uint32_t crc;
1074 	int bit;
1075 	uint8_t data;
1076 
1077 	crc = 0xffffffff;	/* initial value */
1078 
1079 	for (i = 0; i < len; i++) {
1080 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1081 			carry = (crc ^ data) & 1;
1082 			crc >>= 1;
1083 			if (carry)
1084 				crc = (crc ^ ETHER_CRC_POLY_LE);
1085 		}
1086 	}
1087 
1088 	return (crc);
1089 }
1090 #else
1091 uint32_t
1092 ether_crc32_le(const uint8_t *buf, size_t len)
1093 {
1094 	static const uint32_t crctab[] = {
1095 		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1096 		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1097 		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1098 		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1099 	};
1100 	size_t i;
1101 	uint32_t crc;
1102 
1103 	crc = 0xffffffff;	/* initial value */
1104 
1105 	for (i = 0; i < len; i++) {
1106 		crc ^= buf[i];
1107 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1108 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1109 	}
1110 
1111 	return (crc);
1112 }
1113 #endif
1114 
1115 uint32_t
1116 ether_crc32_be(const uint8_t *buf, size_t len)
1117 {
1118 	size_t i;
1119 	uint32_t crc, carry;
1120 	int bit;
1121 	uint8_t data;
1122 
1123 	crc = 0xffffffff;	/* initial value */
1124 
1125 	for (i = 0; i < len; i++) {
1126 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1127 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1128 			crc <<= 1;
1129 			if (carry)
1130 				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1131 		}
1132 	}
1133 
1134 	return (crc);
1135 }
1136 
1137 int
1138 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1139 {
1140 	struct ifaddr *ifa = (struct ifaddr *) data;
1141 	struct ifreq *ifr = (struct ifreq *) data;
1142 	int error = 0;
1143 
1144 	switch (command) {
1145 	case SIOCSIFADDR:
1146 		ifp->if_flags |= IFF_UP;
1147 
1148 		switch (ifa->ifa_addr->sa_family) {
1149 #ifdef INET
1150 		case AF_INET:
1151 			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1152 			arp_ifinit(ifp, ifa);
1153 			break;
1154 #endif
1155 		default:
1156 			ifp->if_init(ifp->if_softc);
1157 			break;
1158 		}
1159 		break;
1160 
1161 	case SIOCGIFADDR:
1162 		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1163 		    ETHER_ADDR_LEN);
1164 		break;
1165 
1166 	case SIOCSIFMTU:
1167 		/*
1168 		 * Set the interface MTU.
1169 		 */
1170 		if (ifr->ifr_mtu > ETHERMTU) {
1171 			error = EINVAL;
1172 		} else {
1173 			ifp->if_mtu = ifr->ifr_mtu;
1174 		}
1175 		break;
1176 
1177 	case SIOCSLANPCP:
1178 		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1179 		if (error != 0)
1180 			break;
1181 		if (ifr->ifr_lan_pcp > 7 &&
1182 		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1183 			error = EINVAL;
1184 		} else {
1185 			ifp->if_pcp = ifr->ifr_lan_pcp;
1186 			/* broadcast event about PCP change */
1187 			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1188 		}
1189 		break;
1190 
1191 	case SIOCGLANPCP:
1192 		ifr->ifr_lan_pcp = ifp->if_pcp;
1193 		break;
1194 
1195 	default:
1196 		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1197 		break;
1198 	}
1199 	return (error);
1200 }
1201 
1202 static int
1203 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1204 	struct sockaddr *sa)
1205 {
1206 	struct sockaddr_dl *sdl;
1207 #ifdef INET
1208 	struct sockaddr_in *sin;
1209 #endif
1210 #ifdef INET6
1211 	struct sockaddr_in6 *sin6;
1212 #endif
1213 	u_char *e_addr;
1214 
1215 	switch(sa->sa_family) {
1216 	case AF_LINK:
1217 		/*
1218 		 * No mapping needed. Just check that it's a valid MC address.
1219 		 */
1220 		sdl = (struct sockaddr_dl *)sa;
1221 		e_addr = LLADDR(sdl);
1222 		if (!ETHER_IS_MULTICAST(e_addr))
1223 			return EADDRNOTAVAIL;
1224 		*llsa = NULL;
1225 		return 0;
1226 
1227 #ifdef INET
1228 	case AF_INET:
1229 		sin = (struct sockaddr_in *)sa;
1230 		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1231 			return EADDRNOTAVAIL;
1232 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1233 		sdl->sdl_alen = ETHER_ADDR_LEN;
1234 		e_addr = LLADDR(sdl);
1235 		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1236 		*llsa = (struct sockaddr *)sdl;
1237 		return 0;
1238 #endif
1239 #ifdef INET6
1240 	case AF_INET6:
1241 		sin6 = (struct sockaddr_in6 *)sa;
1242 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1243 			/*
1244 			 * An IP6 address of 0 means listen to all
1245 			 * of the Ethernet multicast address used for IP6.
1246 			 * (This is used for multicast routers.)
1247 			 */
1248 			ifp->if_flags |= IFF_ALLMULTI;
1249 			*llsa = NULL;
1250 			return 0;
1251 		}
1252 		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1253 			return EADDRNOTAVAIL;
1254 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1255 		sdl->sdl_alen = ETHER_ADDR_LEN;
1256 		e_addr = LLADDR(sdl);
1257 		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1258 		*llsa = (struct sockaddr *)sdl;
1259 		return 0;
1260 #endif
1261 
1262 	default:
1263 		/*
1264 		 * Well, the text isn't quite right, but it's the name
1265 		 * that counts...
1266 		 */
1267 		return EAFNOSUPPORT;
1268 	}
1269 }
1270 
1271 static moduledata_t ether_mod = {
1272 	.name = "ether",
1273 };
1274 
1275 void
1276 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1277 {
1278 	struct ether_vlan_header vlan;
1279 	struct mbuf mv, mb;
1280 
1281 	KASSERT((m->m_flags & M_VLANTAG) != 0,
1282 	    ("%s: vlan information not present", __func__));
1283 	KASSERT(m->m_len >= sizeof(struct ether_header),
1284 	    ("%s: mbuf not large enough for header", __func__));
1285 	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1286 	vlan.evl_proto = vlan.evl_encap_proto;
1287 	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1288 	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1289 	m->m_len -= sizeof(struct ether_header);
1290 	m->m_data += sizeof(struct ether_header);
1291 	/*
1292 	 * If a data link has been supplied by the caller, then we will need to
1293 	 * re-create a stack allocated mbuf chain with the following structure:
1294 	 *
1295 	 * (1) mbuf #1 will contain the supplied data link
1296 	 * (2) mbuf #2 will contain the vlan header
1297 	 * (3) mbuf #3 will contain the original mbuf's packet data
1298 	 *
1299 	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1300 	 */
1301 	if (data != NULL) {
1302 		mv.m_next = m;
1303 		mv.m_data = (caddr_t)&vlan;
1304 		mv.m_len = sizeof(vlan);
1305 		mb.m_next = &mv;
1306 		mb.m_data = data;
1307 		mb.m_len = dlen;
1308 		bpf_mtap(bp, &mb);
1309 	} else
1310 		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1311 	m->m_len += sizeof(struct ether_header);
1312 	m->m_data -= sizeof(struct ether_header);
1313 }
1314 
1315 struct mbuf *
1316 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1317 {
1318 	struct ether_vlan_header *evl;
1319 
1320 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1321 	if (m == NULL)
1322 		return (NULL);
1323 	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1324 
1325 	if (m->m_len < sizeof(*evl)) {
1326 		m = m_pullup(m, sizeof(*evl));
1327 		if (m == NULL)
1328 			return (NULL);
1329 	}
1330 
1331 	/*
1332 	 * Transform the Ethernet header into an Ethernet header
1333 	 * with 802.1Q encapsulation.
1334 	 */
1335 	evl = mtod(m, struct ether_vlan_header *);
1336 	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1337 	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1338 	evl->evl_encap_proto = htons(proto);
1339 	evl->evl_tag = htons(tag);
1340 	return (m);
1341 }
1342 
1343 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1344     "IEEE 802.1Q VLAN");
1345 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1346     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1347     "for consistency");
1348 
1349 VNET_DEFINE_STATIC(int, soft_pad);
1350 #define	V_soft_pad	VNET(soft_pad)
1351 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1352     &VNET_NAME(soft_pad), 0,
1353     "pad short frames before tagging");
1354 
1355 /*
1356  * For now, make preserving PCP via an mbuf tag optional, as it increases
1357  * per-packet memory allocations and frees.  In the future, it would be
1358  * preferable to reuse ether_vtag for this, or similar.
1359  */
1360 VNET_DEFINE(int, vlan_mtag_pcp) = 0;
1361 #define	V_vlan_mtag_pcp	VNET(vlan_mtag_pcp)
1362 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW | CTLFLAG_VNET,
1363     &VNET_NAME(vlan_mtag_pcp), 0,
1364     "Retain VLAN PCP information as packets are passed up the stack");
1365 
1366 bool
1367 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1368     struct ether_8021q_tag *qtag)
1369 {
1370 	struct m_tag *mtag;
1371 	int n;
1372 	uint16_t tag;
1373 	static const char pad[8];	/* just zeros */
1374 
1375 	/*
1376 	 * Pad the frame to the minimum size allowed if told to.
1377 	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1378 	 * paragraph C.4.4.3.b.  It can help to work around buggy
1379 	 * bridges that violate paragraph C.4.4.3.a from the same
1380 	 * document, i.e., fail to pad short frames after untagging.
1381 	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1382 	 * untagging it will produce a 62-byte frame, which is a runt
1383 	 * and requires padding.  There are VLAN-enabled network
1384 	 * devices that just discard such runts instead or mishandle
1385 	 * them somehow.
1386 	 */
1387 	if (V_soft_pad && p->if_type == IFT_ETHER) {
1388 		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1389 		     n > 0; n -= sizeof(pad)) {
1390 			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1391 				break;
1392 		}
1393 		if (n > 0) {
1394 			m_freem(*mp);
1395 			*mp = NULL;
1396 			if_printf(ife, "cannot pad short frame");
1397 			return (false);
1398 		}
1399 	}
1400 
1401 	/*
1402 	 * If PCP is set in mbuf, use it
1403 	 */
1404 	if ((*mp)->m_flags & M_VLANTAG) {
1405 		qtag->pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1406 	}
1407 
1408 	/*
1409 	 * If underlying interface can do VLAN tag insertion itself,
1410 	 * just pass the packet along. However, we need some way to
1411 	 * tell the interface where the packet came from so that it
1412 	 * knows how to find the VLAN tag to use, so we attach a
1413 	 * packet tag that holds it.
1414 	 */
1415 	if (V_vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1416 	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1417 		tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1418 	else
1419 		tag = EVL_MAKETAG(qtag->vid, qtag->pcp, 0);
1420 	if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1421 	    (qtag->proto == ETHERTYPE_VLAN)) {
1422 		(*mp)->m_pkthdr.ether_vtag = tag;
1423 		(*mp)->m_flags |= M_VLANTAG;
1424 	} else {
1425 		*mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1426 		if (*mp == NULL) {
1427 			if_printf(ife, "unable to prepend 802.1Q header");
1428 			return (false);
1429 		}
1430 	}
1431 	return (true);
1432 }
1433 
1434 /*
1435  * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1436  * cryptographic hash function on the containing jail's name, UUID and the
1437  * interface name to attempt to provide a unique but stable address.
1438  * Pseudo-interfaces which require a MAC address should use this function to
1439  * allocate non-locally-administered addresses.
1440  */
1441 void
1442 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1443 {
1444 	SHA1_CTX ctx;
1445 	char *buf;
1446 	char uuid[HOSTUUIDLEN + 1];
1447 	uint64_t addr;
1448 	int i, sz;
1449 	char digest[SHA1_RESULTLEN];
1450 	char jailname[MAXHOSTNAMELEN];
1451 
1452 	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1453 	if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1454 		/* Fall back to a random mac address. */
1455 		goto rando;
1456 	}
1457 
1458 	/* If each (vnet) jail would also have a unique hostuuid this would not
1459 	 * be necessary. */
1460 	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1461 	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, if_name(ifp),
1462 	    jailname);
1463 	if (sz < 0) {
1464 		/* Fall back to a random mac address. */
1465 		goto rando;
1466 	}
1467 
1468 	SHA1Init(&ctx);
1469 	SHA1Update(&ctx, buf, sz);
1470 	SHA1Final(digest, &ctx);
1471 	free(buf, M_TEMP);
1472 
1473 	addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1474 	    OUI_FREEBSD_GENERATED_MASK;
1475 	addr = OUI_FREEBSD(addr);
1476 	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1477 		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1478 		    0xFF;
1479 	}
1480 
1481 	return;
1482 rando:
1483 	arc4rand(hwaddr, sizeof(*hwaddr), 0);
1484 	/* Unicast */
1485 	hwaddr->octet[0] &= 0xFE;
1486 	/* Locally administered. */
1487 	hwaddr->octet[0] |= 0x02;
1488 }
1489 
1490 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1491 MODULE_VERSION(ether, 1);
1492