xref: /freebsd/sys/net/if_ethersubr.c (revision c6989859ae9388eeb46a24fe88f9b8d07101c710)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)if_ethersubr.c	8.1 (Berkeley) 6/10/93
32  * $FreeBSD$
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_netgraph.h"
38 #include "opt_mbuf_profiling.h"
39 #include "opt_rss.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/devctl.h>
44 #include <sys/eventhandler.h>
45 #include <sys/jail.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mbuf.h>
51 #include <sys/proc.h>
52 #include <sys/priv.h>
53 #include <sys/random.h>
54 #include <sys/socket.h>
55 #include <sys/sockio.h>
56 #include <sys/sysctl.h>
57 #include <sys/uuid.h>
58 
59 #include <net/ieee_oui.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/netisr.h>
64 #include <net/route.h>
65 #include <net/if_llc.h>
66 #include <net/if_dl.h>
67 #include <net/if_types.h>
68 #include <net/bpf.h>
69 #include <net/ethernet.h>
70 #include <net/if_bridgevar.h>
71 #include <net/if_vlan_var.h>
72 #include <net/if_llatbl.h>
73 #include <net/pfil.h>
74 #include <net/rss_config.h>
75 #include <net/vnet.h>
76 
77 #include <netpfil/pf/pf_mtag.h>
78 
79 #if defined(INET) || defined(INET6)
80 #include <netinet/in.h>
81 #include <netinet/in_var.h>
82 #include <netinet/if_ether.h>
83 #include <netinet/ip_carp.h>
84 #include <netinet/ip_var.h>
85 #endif
86 #ifdef INET6
87 #include <netinet6/nd6.h>
88 #endif
89 #include <security/mac/mac_framework.h>
90 
91 #include <crypto/sha1.h>
92 
93 #ifdef CTASSERT
94 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
95 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
96 #endif
97 
98 VNET_DEFINE(pfil_head_t, link_pfil_head);	/* Packet filter hooks */
99 
100 /* netgraph node hooks for ng_ether(4) */
101 void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
102 void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
103 int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
104 void	(*ng_ether_attach_p)(struct ifnet *ifp);
105 void	(*ng_ether_detach_p)(struct ifnet *ifp);
106 
107 void	(*vlan_input_p)(struct ifnet *, struct mbuf *);
108 
109 /* if_bridge(4) support */
110 void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
111 
112 /* if_lagg(4) support */
113 struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *);
114 
115 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
116 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
117 
118 static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
119 		struct sockaddr *);
120 #ifdef VIMAGE
121 static	void ether_reassign(struct ifnet *, struct vnet *, char *);
122 #endif
123 static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
124 
125 #define senderr(e) do { error = (e); goto bad;} while (0)
126 
127 static void
128 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
129 {
130 	int csum_flags = 0;
131 
132 	if (src->m_pkthdr.csum_flags & CSUM_IP)
133 		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
134 	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
135 		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
136 	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
137 		csum_flags |= CSUM_SCTP_VALID;
138 	dst->m_pkthdr.csum_flags |= csum_flags;
139 	if (csum_flags & CSUM_DATA_VALID)
140 		dst->m_pkthdr.csum_data = 0xffff;
141 }
142 
143 /*
144  * Handle link-layer encapsulation requests.
145  */
146 static int
147 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
148 {
149 	struct ether_header *eh;
150 	struct arphdr *ah;
151 	uint16_t etype;
152 	const u_char *lladdr;
153 
154 	if (req->rtype != IFENCAP_LL)
155 		return (EOPNOTSUPP);
156 
157 	if (req->bufsize < ETHER_HDR_LEN)
158 		return (ENOMEM);
159 
160 	eh = (struct ether_header *)req->buf;
161 	lladdr = req->lladdr;
162 	req->lladdr_off = 0;
163 
164 	switch (req->family) {
165 	case AF_INET:
166 		etype = htons(ETHERTYPE_IP);
167 		break;
168 	case AF_INET6:
169 		etype = htons(ETHERTYPE_IPV6);
170 		break;
171 	case AF_ARP:
172 		ah = (struct arphdr *)req->hdata;
173 		ah->ar_hrd = htons(ARPHRD_ETHER);
174 
175 		switch(ntohs(ah->ar_op)) {
176 		case ARPOP_REVREQUEST:
177 		case ARPOP_REVREPLY:
178 			etype = htons(ETHERTYPE_REVARP);
179 			break;
180 		case ARPOP_REQUEST:
181 		case ARPOP_REPLY:
182 		default:
183 			etype = htons(ETHERTYPE_ARP);
184 			break;
185 		}
186 
187 		if (req->flags & IFENCAP_FLAG_BROADCAST)
188 			lladdr = ifp->if_broadcastaddr;
189 		break;
190 	default:
191 		return (EAFNOSUPPORT);
192 	}
193 
194 	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
195 	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
196 	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
197 	req->bufsize = sizeof(struct ether_header);
198 
199 	return (0);
200 }
201 
202 static int
203 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
204 	const struct sockaddr *dst, struct route *ro, u_char *phdr,
205 	uint32_t *pflags, struct llentry **plle)
206 {
207 	struct ether_header *eh;
208 	uint32_t lleflags = 0;
209 	int error = 0;
210 #if defined(INET) || defined(INET6)
211 	uint16_t etype;
212 #endif
213 
214 	if (plle)
215 		*plle = NULL;
216 	eh = (struct ether_header *)phdr;
217 
218 	switch (dst->sa_family) {
219 #ifdef INET
220 	case AF_INET:
221 		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
222 			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
223 			    plle);
224 		else {
225 			if (m->m_flags & M_BCAST)
226 				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
227 				    ETHER_ADDR_LEN);
228 			else {
229 				const struct in_addr *a;
230 				a = &(((const struct sockaddr_in *)dst)->sin_addr);
231 				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
232 			}
233 			etype = htons(ETHERTYPE_IP);
234 			memcpy(&eh->ether_type, &etype, sizeof(etype));
235 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
236 		}
237 		break;
238 #endif
239 #ifdef INET6
240 	case AF_INET6:
241 		if ((m->m_flags & M_MCAST) == 0)
242 			error = nd6_resolve(ifp, 0, m, dst, phdr, &lleflags,
243 			    plle);
244 		else {
245 			const struct in6_addr *a6;
246 			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
247 			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
248 			etype = htons(ETHERTYPE_IPV6);
249 			memcpy(&eh->ether_type, &etype, sizeof(etype));
250 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
251 		}
252 		break;
253 #endif
254 	default:
255 		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
256 		if (m != NULL)
257 			m_freem(m);
258 		return (EAFNOSUPPORT);
259 	}
260 
261 	if (error == EHOSTDOWN) {
262 		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
263 			error = EHOSTUNREACH;
264 	}
265 
266 	if (error != 0)
267 		return (error);
268 
269 	*pflags = RT_MAY_LOOP;
270 	if (lleflags & LLE_IFADDR)
271 		*pflags |= RT_L2_ME;
272 
273 	return (0);
274 }
275 
276 /*
277  * Ethernet output routine.
278  * Encapsulate a packet of type family for the local net.
279  * Use trailer local net encapsulation if enough data in first
280  * packet leaves a multiple of 512 bytes of data in remainder.
281  */
282 int
283 ether_output(struct ifnet *ifp, struct mbuf *m,
284 	const struct sockaddr *dst, struct route *ro)
285 {
286 	int error = 0;
287 	char linkhdr[ETHER_HDR_LEN], *phdr;
288 	struct ether_header *eh;
289 	struct pf_mtag *t;
290 	int loop_copy = 1;
291 	int hlen;	/* link layer header length */
292 	uint32_t pflags;
293 	struct llentry *lle = NULL;
294 	int addref = 0;
295 
296 	phdr = NULL;
297 	pflags = 0;
298 	if (ro != NULL) {
299 		/* XXX BPF uses ro_prepend */
300 		if (ro->ro_prepend != NULL) {
301 			phdr = ro->ro_prepend;
302 			hlen = ro->ro_plen;
303 		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
304 			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
305 				lle = ro->ro_lle;
306 				if (lle != NULL &&
307 				    (lle->la_flags & LLE_VALID) == 0) {
308 					LLE_FREE(lle);
309 					lle = NULL;	/* redundant */
310 					ro->ro_lle = NULL;
311 				}
312 				if (lle == NULL) {
313 					/* if we lookup, keep cache */
314 					addref = 1;
315 				} else
316 					/*
317 					 * Notify LLE code that
318 					 * the entry was used
319 					 * by datapath.
320 					 */
321 					llentry_mark_used(lle);
322 			}
323 			if (lle != NULL) {
324 				phdr = lle->r_linkdata;
325 				hlen = lle->r_hdrlen;
326 				pflags = lle->r_flags;
327 			}
328 		}
329 	}
330 
331 #ifdef MAC
332 	error = mac_ifnet_check_transmit(ifp, m);
333 	if (error)
334 		senderr(error);
335 #endif
336 
337 	M_PROFILE(m);
338 	if (ifp->if_flags & IFF_MONITOR)
339 		senderr(ENETDOWN);
340 	if (!((ifp->if_flags & IFF_UP) &&
341 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
342 		senderr(ENETDOWN);
343 
344 	if (phdr == NULL) {
345 		/* No prepend data supplied. Try to calculate ourselves. */
346 		phdr = linkhdr;
347 		hlen = ETHER_HDR_LEN;
348 		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
349 		    addref ? &lle : NULL);
350 		if (addref && lle != NULL)
351 			ro->ro_lle = lle;
352 		if (error != 0)
353 			return (error == EWOULDBLOCK ? 0 : error);
354 	}
355 
356 	if ((pflags & RT_L2_ME) != 0) {
357 		update_mbuf_csumflags(m, m);
358 		return (if_simloop(ifp, m, dst->sa_family, 0));
359 	}
360 	loop_copy = pflags & RT_MAY_LOOP;
361 
362 	/*
363 	 * Add local net header.  If no space in first mbuf,
364 	 * allocate another.
365 	 *
366 	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
367 	 * This is done because BPF code shifts m_data pointer
368 	 * to the end of ethernet header prior to calling if_output().
369 	 */
370 	M_PREPEND(m, hlen, M_NOWAIT);
371 	if (m == NULL)
372 		senderr(ENOBUFS);
373 	if ((pflags & RT_HAS_HEADER) == 0) {
374 		eh = mtod(m, struct ether_header *);
375 		memcpy(eh, phdr, hlen);
376 	}
377 
378 	/*
379 	 * If a simplex interface, and the packet is being sent to our
380 	 * Ethernet address or a broadcast address, loopback a copy.
381 	 * XXX To make a simplex device behave exactly like a duplex
382 	 * device, we should copy in the case of sending to our own
383 	 * ethernet address (thus letting the original actually appear
384 	 * on the wire). However, we don't do that here for security
385 	 * reasons and compatibility with the original behavior.
386 	 */
387 	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
388 	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
389 		struct mbuf *n;
390 
391 		/*
392 		 * Because if_simloop() modifies the packet, we need a
393 		 * writable copy through m_dup() instead of a readonly
394 		 * one as m_copy[m] would give us. The alternative would
395 		 * be to modify if_simloop() to handle the readonly mbuf,
396 		 * but performancewise it is mostly equivalent (trading
397 		 * extra data copying vs. extra locking).
398 		 *
399 		 * XXX This is a local workaround.  A number of less
400 		 * often used kernel parts suffer from the same bug.
401 		 * See PR kern/105943 for a proposed general solution.
402 		 */
403 		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
404 			update_mbuf_csumflags(m, n);
405 			(void)if_simloop(ifp, n, dst->sa_family, hlen);
406 		} else
407 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
408 	}
409 
410        /*
411 	* Bridges require special output handling.
412 	*/
413 	if (ifp->if_bridge) {
414 		BRIDGE_OUTPUT(ifp, m, error);
415 		return (error);
416 	}
417 
418 #if defined(INET) || defined(INET6)
419 	if (ifp->if_carp &&
420 	    (error = (*carp_output_p)(ifp, m, dst)))
421 		goto bad;
422 #endif
423 
424 	/* Handle ng_ether(4) processing, if any */
425 	if (ifp->if_l2com != NULL) {
426 		KASSERT(ng_ether_output_p != NULL,
427 		    ("ng_ether_output_p is NULL"));
428 		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
429 bad:			if (m != NULL)
430 				m_freem(m);
431 			return (error);
432 		}
433 		if (m == NULL)
434 			return (0);
435 	}
436 
437 	/* Continue with link-layer output */
438 	return ether_output_frame(ifp, m);
439 }
440 
441 static bool
442 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
443 {
444 	struct ether_header *eh;
445 
446 	eh = mtod(*mp, struct ether_header *);
447 	if (ntohs(eh->ether_type) == ETHERTYPE_VLAN ||
448 	    ether_8021q_frame(mp, ifp, ifp, 0, pcp))
449 		return (true);
450 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
451 	return (false);
452 }
453 
454 /*
455  * Ethernet link layer output routine to send a raw frame to the device.
456  *
457  * This assumes that the 14 byte Ethernet header is present and contiguous
458  * in the first mbuf (if BRIDGE'ing).
459  */
460 int
461 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
462 {
463 	uint8_t pcp;
464 
465 	pcp = ifp->if_pcp;
466 	if (pcp != IFNET_PCP_NONE && ifp->if_type != IFT_L2VLAN &&
467 	    !ether_set_pcp(&m, ifp, pcp))
468 		return (0);
469 
470 	if (PFIL_HOOKED_OUT(V_link_pfil_head))
471 		switch (pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_OUT,
472 		    NULL)) {
473 		case PFIL_DROPPED:
474 			return (EACCES);
475 		case PFIL_CONSUMED:
476 			return (0);
477 		}
478 
479 #ifdef EXPERIMENTAL
480 #if defined(INET6) && defined(INET)
481 	/* draft-ietf-6man-ipv6only-flag */
482 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
483 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
484 		struct ether_header *eh;
485 
486 		eh = mtod(m, struct ether_header *);
487 		switch (ntohs(eh->ether_type)) {
488 		case ETHERTYPE_IP:
489 		case ETHERTYPE_ARP:
490 		case ETHERTYPE_REVARP:
491 			m_freem(m);
492 			return (EAFNOSUPPORT);
493 			/* NOTREACHED */
494 			break;
495 		};
496 	}
497 #endif
498 #endif
499 
500 	/*
501 	 * Queue message on interface, update output statistics if
502 	 * successful, and start output if interface not yet active.
503 	 */
504 	return ((ifp->if_transmit)(ifp, m));
505 }
506 
507 /*
508  * Process a received Ethernet packet; the packet is in the
509  * mbuf chain m with the ethernet header at the front.
510  */
511 static void
512 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
513 {
514 	struct ether_header *eh;
515 	u_short etype;
516 
517 	if ((ifp->if_flags & IFF_UP) == 0) {
518 		m_freem(m);
519 		return;
520 	}
521 #ifdef DIAGNOSTIC
522 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
523 		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
524 		m_freem(m);
525 		return;
526 	}
527 #endif
528 	if (m->m_len < ETHER_HDR_LEN) {
529 		/* XXX maybe should pullup? */
530 		if_printf(ifp, "discard frame w/o leading ethernet "
531 				"header (len %u pkt len %u)\n",
532 				m->m_len, m->m_pkthdr.len);
533 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
534 		m_freem(m);
535 		return;
536 	}
537 	eh = mtod(m, struct ether_header *);
538 	etype = ntohs(eh->ether_type);
539 	random_harvest_queue_ether(m, sizeof(*m));
540 
541 #ifdef EXPERIMENTAL
542 #if defined(INET6) && defined(INET)
543 	/* draft-ietf-6man-ipv6only-flag */
544 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
545 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
546 		switch (etype) {
547 		case ETHERTYPE_IP:
548 		case ETHERTYPE_ARP:
549 		case ETHERTYPE_REVARP:
550 			m_freem(m);
551 			return;
552 			/* NOTREACHED */
553 			break;
554 		};
555 	}
556 #endif
557 #endif
558 
559 	CURVNET_SET_QUIET(ifp->if_vnet);
560 
561 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
562 		if (ETHER_IS_BROADCAST(eh->ether_dhost))
563 			m->m_flags |= M_BCAST;
564 		else
565 			m->m_flags |= M_MCAST;
566 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
567 	}
568 
569 #ifdef MAC
570 	/*
571 	 * Tag the mbuf with an appropriate MAC label before any other
572 	 * consumers can get to it.
573 	 */
574 	mac_ifnet_create_mbuf(ifp, m);
575 #endif
576 
577 	/*
578 	 * Give bpf a chance at the packet.
579 	 */
580 	ETHER_BPF_MTAP(ifp, m);
581 
582 	/*
583 	 * If the CRC is still on the packet, trim it off. We do this once
584 	 * and once only in case we are re-entered. Nothing else on the
585 	 * Ethernet receive path expects to see the FCS.
586 	 */
587 	if (m->m_flags & M_HASFCS) {
588 		m_adj(m, -ETHER_CRC_LEN);
589 		m->m_flags &= ~M_HASFCS;
590 	}
591 
592 	if (!(ifp->if_capenable & IFCAP_HWSTATS))
593 		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
594 
595 	/* Allow monitor mode to claim this frame, after stats are updated. */
596 	if (ifp->if_flags & IFF_MONITOR) {
597 		m_freem(m);
598 		CURVNET_RESTORE();
599 		return;
600 	}
601 
602 	/* Handle input from a lagg(4) port */
603 	if (ifp->if_type == IFT_IEEE8023ADLAG) {
604 		KASSERT(lagg_input_p != NULL,
605 		    ("%s: if_lagg not loaded!", __func__));
606 		m = (*lagg_input_p)(ifp, m);
607 		if (m != NULL)
608 			ifp = m->m_pkthdr.rcvif;
609 		else {
610 			CURVNET_RESTORE();
611 			return;
612 		}
613 	}
614 
615 	/*
616 	 * If the hardware did not process an 802.1Q tag, do this now,
617 	 * to allow 802.1P priority frames to be passed to the main input
618 	 * path correctly.
619 	 * TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels.
620 	 */
621 	if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) {
622 		struct ether_vlan_header *evl;
623 
624 		if (m->m_len < sizeof(*evl) &&
625 		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
626 #ifdef DIAGNOSTIC
627 			if_printf(ifp, "cannot pullup VLAN header\n");
628 #endif
629 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
630 			CURVNET_RESTORE();
631 			return;
632 		}
633 
634 		evl = mtod(m, struct ether_vlan_header *);
635 		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
636 		m->m_flags |= M_VLANTAG;
637 
638 		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
639 		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
640 		m_adj(m, ETHER_VLAN_ENCAP_LEN);
641 		eh = mtod(m, struct ether_header *);
642 	}
643 
644 	M_SETFIB(m, ifp->if_fib);
645 
646 	/* Allow ng_ether(4) to claim this frame. */
647 	if (ifp->if_l2com != NULL) {
648 		KASSERT(ng_ether_input_p != NULL,
649 		    ("%s: ng_ether_input_p is NULL", __func__));
650 		m->m_flags &= ~M_PROMISC;
651 		(*ng_ether_input_p)(ifp, &m);
652 		if (m == NULL) {
653 			CURVNET_RESTORE();
654 			return;
655 		}
656 		eh = mtod(m, struct ether_header *);
657 	}
658 
659 	/*
660 	 * Allow if_bridge(4) to claim this frame.
661 	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
662 	 * and the frame should be delivered locally.
663 	 */
664 	if (ifp->if_bridge != NULL) {
665 		m->m_flags &= ~M_PROMISC;
666 		BRIDGE_INPUT(ifp, m);
667 		if (m == NULL) {
668 			CURVNET_RESTORE();
669 			return;
670 		}
671 		eh = mtod(m, struct ether_header *);
672 	}
673 
674 #if defined(INET) || defined(INET6)
675 	/*
676 	 * Clear M_PROMISC on frame so that carp(4) will see it when the
677 	 * mbuf flows up to Layer 3.
678 	 * FreeBSD's implementation of carp(4) uses the inprotosw
679 	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
680 	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
681 	 * is outside the scope of the M_PROMISC test below.
682 	 * TODO: Maintain a hash table of ethernet addresses other than
683 	 * ether_dhost which may be active on this ifp.
684 	 */
685 	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
686 		m->m_flags &= ~M_PROMISC;
687 	} else
688 #endif
689 	{
690 		/*
691 		 * If the frame received was not for our MAC address, set the
692 		 * M_PROMISC flag on the mbuf chain. The frame may need to
693 		 * be seen by the rest of the Ethernet input path in case of
694 		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
695 		 * seen by upper protocol layers.
696 		 */
697 		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
698 		    bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
699 			m->m_flags |= M_PROMISC;
700 	}
701 
702 	ether_demux(ifp, m);
703 	CURVNET_RESTORE();
704 }
705 
706 /*
707  * Ethernet input dispatch; by default, direct dispatch here regardless of
708  * global configuration.  However, if RSS is enabled, hook up RSS affinity
709  * so that when deferred or hybrid dispatch is enabled, we can redistribute
710  * load based on RSS.
711  *
712  * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
713  * not it had already done work distribution via multi-queue.  Then we could
714  * direct dispatch in the event load balancing was already complete and
715  * handle the case of interfaces with different capabilities better.
716  *
717  * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
718  * at multiple layers?
719  *
720  * XXXRW: For now, enable all this only if RSS is compiled in, although it
721  * works fine without RSS.  Need to characterise the performance overhead
722  * of the detour through the netisr code in the event the result is always
723  * direct dispatch.
724  */
725 static void
726 ether_nh_input(struct mbuf *m)
727 {
728 
729 	M_ASSERTPKTHDR(m);
730 	KASSERT(m->m_pkthdr.rcvif != NULL,
731 	    ("%s: NULL interface pointer", __func__));
732 	ether_input_internal(m->m_pkthdr.rcvif, m);
733 }
734 
735 static struct netisr_handler	ether_nh = {
736 	.nh_name = "ether",
737 	.nh_handler = ether_nh_input,
738 	.nh_proto = NETISR_ETHER,
739 #ifdef RSS
740 	.nh_policy = NETISR_POLICY_CPU,
741 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
742 	.nh_m2cpuid = rss_m2cpuid,
743 #else
744 	.nh_policy = NETISR_POLICY_SOURCE,
745 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
746 #endif
747 };
748 
749 static void
750 ether_init(__unused void *arg)
751 {
752 
753 	netisr_register(&ether_nh);
754 }
755 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
756 
757 static void
758 vnet_ether_init(__unused void *arg)
759 {
760 	struct pfil_head_args args;
761 
762 	args.pa_version = PFIL_VERSION;
763 	args.pa_flags = PFIL_IN | PFIL_OUT;
764 	args.pa_type = PFIL_TYPE_ETHERNET;
765 	args.pa_headname = PFIL_ETHER_NAME;
766 	V_link_pfil_head = pfil_head_register(&args);
767 
768 #ifdef VIMAGE
769 	netisr_register_vnet(&ether_nh);
770 #endif
771 }
772 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
773     vnet_ether_init, NULL);
774 
775 #ifdef VIMAGE
776 static void
777 vnet_ether_pfil_destroy(__unused void *arg)
778 {
779 
780 	pfil_head_unregister(V_link_pfil_head);
781 }
782 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
783     vnet_ether_pfil_destroy, NULL);
784 
785 static void
786 vnet_ether_destroy(__unused void *arg)
787 {
788 
789 	netisr_unregister_vnet(&ether_nh);
790 }
791 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
792     vnet_ether_destroy, NULL);
793 #endif
794 
795 static void
796 ether_input(struct ifnet *ifp, struct mbuf *m)
797 {
798 	struct epoch_tracker et;
799 	struct mbuf *mn;
800 	bool needs_epoch;
801 
802 	needs_epoch = !(ifp->if_flags & IFF_KNOWSEPOCH);
803 
804 	/*
805 	 * The drivers are allowed to pass in a chain of packets linked with
806 	 * m_nextpkt. We split them up into separate packets here and pass
807 	 * them up. This allows the drivers to amortize the receive lock.
808 	 */
809 	CURVNET_SET_QUIET(ifp->if_vnet);
810 	if (__predict_false(needs_epoch))
811 		NET_EPOCH_ENTER(et);
812 	while (m) {
813 		mn = m->m_nextpkt;
814 		m->m_nextpkt = NULL;
815 
816 		/*
817 		 * We will rely on rcvif being set properly in the deferred
818 		 * context, so assert it is correct here.
819 		 */
820 		MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
821 		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
822 		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
823 		netisr_dispatch(NETISR_ETHER, m);
824 		m = mn;
825 	}
826 	if (__predict_false(needs_epoch))
827 		NET_EPOCH_EXIT(et);
828 	CURVNET_RESTORE();
829 }
830 
831 /*
832  * Upper layer processing for a received Ethernet packet.
833  */
834 void
835 ether_demux(struct ifnet *ifp, struct mbuf *m)
836 {
837 	struct ether_header *eh;
838 	int i, isr;
839 	u_short ether_type;
840 
841 	NET_EPOCH_ASSERT();
842 	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
843 
844 	/* Do not grab PROMISC frames in case we are re-entered. */
845 	if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
846 		i = pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_IN, NULL);
847 		if (i != 0 || m == NULL)
848 			return;
849 	}
850 
851 	eh = mtod(m, struct ether_header *);
852 	ether_type = ntohs(eh->ether_type);
853 
854 	/*
855 	 * If this frame has a VLAN tag other than 0, call vlan_input()
856 	 * if its module is loaded. Otherwise, drop.
857 	 */
858 	if ((m->m_flags & M_VLANTAG) &&
859 	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
860 		if (ifp->if_vlantrunk == NULL) {
861 			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
862 			m_freem(m);
863 			return;
864 		}
865 		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
866 		    __func__));
867 		/* Clear before possibly re-entering ether_input(). */
868 		m->m_flags &= ~M_PROMISC;
869 		(*vlan_input_p)(ifp, m);
870 		return;
871 	}
872 
873 	/*
874 	 * Pass promiscuously received frames to the upper layer if the user
875 	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
876 	 */
877 	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
878 		m_freem(m);
879 		return;
880 	}
881 
882 	/*
883 	 * Reset layer specific mbuf flags to avoid confusing upper layers.
884 	 * Strip off Ethernet header.
885 	 */
886 	m->m_flags &= ~M_VLANTAG;
887 	m_clrprotoflags(m);
888 	m_adj(m, ETHER_HDR_LEN);
889 
890 	/*
891 	 * Dispatch frame to upper layer.
892 	 */
893 	switch (ether_type) {
894 #ifdef INET
895 	case ETHERTYPE_IP:
896 		isr = NETISR_IP;
897 		break;
898 
899 	case ETHERTYPE_ARP:
900 		if (ifp->if_flags & IFF_NOARP) {
901 			/* Discard packet if ARP is disabled on interface */
902 			m_freem(m);
903 			return;
904 		}
905 		isr = NETISR_ARP;
906 		break;
907 #endif
908 #ifdef INET6
909 	case ETHERTYPE_IPV6:
910 		isr = NETISR_IPV6;
911 		break;
912 #endif
913 	default:
914 		goto discard;
915 	}
916 	netisr_dispatch(isr, m);
917 	return;
918 
919 discard:
920 	/*
921 	 * Packet is to be discarded.  If netgraph is present,
922 	 * hand the packet to it for last chance processing;
923 	 * otherwise dispose of it.
924 	 */
925 	if (ifp->if_l2com != NULL) {
926 		KASSERT(ng_ether_input_orphan_p != NULL,
927 		    ("ng_ether_input_orphan_p is NULL"));
928 		/*
929 		 * Put back the ethernet header so netgraph has a
930 		 * consistent view of inbound packets.
931 		 */
932 		M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
933 		(*ng_ether_input_orphan_p)(ifp, m);
934 		return;
935 	}
936 	m_freem(m);
937 }
938 
939 /*
940  * Convert Ethernet address to printable (loggable) representation.
941  * This routine is for compatibility; it's better to just use
942  *
943  *	printf("%6D", <pointer to address>, ":");
944  *
945  * since there's no static buffer involved.
946  */
947 char *
948 ether_sprintf(const u_char *ap)
949 {
950 	static char etherbuf[18];
951 	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
952 	return (etherbuf);
953 }
954 
955 /*
956  * Perform common duties while attaching to interface list
957  */
958 void
959 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
960 {
961 	int i;
962 	struct ifaddr *ifa;
963 	struct sockaddr_dl *sdl;
964 
965 	ifp->if_addrlen = ETHER_ADDR_LEN;
966 	ifp->if_hdrlen = ETHER_HDR_LEN;
967 	ifp->if_mtu = ETHERMTU;
968 	if_attach(ifp);
969 	ifp->if_output = ether_output;
970 	ifp->if_input = ether_input;
971 	ifp->if_resolvemulti = ether_resolvemulti;
972 	ifp->if_requestencap = ether_requestencap;
973 #ifdef VIMAGE
974 	ifp->if_reassign = ether_reassign;
975 #endif
976 	if (ifp->if_baudrate == 0)
977 		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
978 	ifp->if_broadcastaddr = etherbroadcastaddr;
979 
980 	ifa = ifp->if_addr;
981 	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
982 	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
983 	sdl->sdl_type = IFT_ETHER;
984 	sdl->sdl_alen = ifp->if_addrlen;
985 	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
986 
987 	if (ifp->if_hw_addr != NULL)
988 		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
989 
990 	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
991 	if (ng_ether_attach_p != NULL)
992 		(*ng_ether_attach_p)(ifp);
993 
994 	/* Announce Ethernet MAC address if non-zero. */
995 	for (i = 0; i < ifp->if_addrlen; i++)
996 		if (lla[i] != 0)
997 			break;
998 	if (i != ifp->if_addrlen)
999 		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1000 
1001 	uuid_ether_add(LLADDR(sdl));
1002 
1003 	/* Add necessary bits are setup; announce it now. */
1004 	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1005 	if (IS_DEFAULT_VNET(curvnet))
1006 		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1007 }
1008 
1009 /*
1010  * Perform common duties while detaching an Ethernet interface
1011  */
1012 void
1013 ether_ifdetach(struct ifnet *ifp)
1014 {
1015 	struct sockaddr_dl *sdl;
1016 
1017 	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1018 	uuid_ether_del(LLADDR(sdl));
1019 
1020 	if (ifp->if_l2com != NULL) {
1021 		KASSERT(ng_ether_detach_p != NULL,
1022 		    ("ng_ether_detach_p is NULL"));
1023 		(*ng_ether_detach_p)(ifp);
1024 	}
1025 
1026 	bpfdetach(ifp);
1027 	if_detach(ifp);
1028 }
1029 
1030 #ifdef VIMAGE
1031 void
1032 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1033 {
1034 
1035 	if (ifp->if_l2com != NULL) {
1036 		KASSERT(ng_ether_detach_p != NULL,
1037 		    ("ng_ether_detach_p is NULL"));
1038 		(*ng_ether_detach_p)(ifp);
1039 	}
1040 
1041 	if (ng_ether_attach_p != NULL) {
1042 		CURVNET_SET_QUIET(new_vnet);
1043 		(*ng_ether_attach_p)(ifp);
1044 		CURVNET_RESTORE();
1045 	}
1046 }
1047 #endif
1048 
1049 SYSCTL_DECL(_net_link);
1050 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1051     "Ethernet");
1052 
1053 #if 0
1054 /*
1055  * This is for reference.  We have a table-driven version
1056  * of the little-endian crc32 generator, which is faster
1057  * than the double-loop.
1058  */
1059 uint32_t
1060 ether_crc32_le(const uint8_t *buf, size_t len)
1061 {
1062 	size_t i;
1063 	uint32_t crc;
1064 	int bit;
1065 	uint8_t data;
1066 
1067 	crc = 0xffffffff;	/* initial value */
1068 
1069 	for (i = 0; i < len; i++) {
1070 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1071 			carry = (crc ^ data) & 1;
1072 			crc >>= 1;
1073 			if (carry)
1074 				crc = (crc ^ ETHER_CRC_POLY_LE);
1075 		}
1076 	}
1077 
1078 	return (crc);
1079 }
1080 #else
1081 uint32_t
1082 ether_crc32_le(const uint8_t *buf, size_t len)
1083 {
1084 	static const uint32_t crctab[] = {
1085 		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1086 		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1087 		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1088 		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1089 	};
1090 	size_t i;
1091 	uint32_t crc;
1092 
1093 	crc = 0xffffffff;	/* initial value */
1094 
1095 	for (i = 0; i < len; i++) {
1096 		crc ^= buf[i];
1097 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1098 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1099 	}
1100 
1101 	return (crc);
1102 }
1103 #endif
1104 
1105 uint32_t
1106 ether_crc32_be(const uint8_t *buf, size_t len)
1107 {
1108 	size_t i;
1109 	uint32_t crc, carry;
1110 	int bit;
1111 	uint8_t data;
1112 
1113 	crc = 0xffffffff;	/* initial value */
1114 
1115 	for (i = 0; i < len; i++) {
1116 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1117 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1118 			crc <<= 1;
1119 			if (carry)
1120 				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1121 		}
1122 	}
1123 
1124 	return (crc);
1125 }
1126 
1127 int
1128 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1129 {
1130 	struct ifaddr *ifa = (struct ifaddr *) data;
1131 	struct ifreq *ifr = (struct ifreq *) data;
1132 	int error = 0;
1133 
1134 	switch (command) {
1135 	case SIOCSIFADDR:
1136 		ifp->if_flags |= IFF_UP;
1137 
1138 		switch (ifa->ifa_addr->sa_family) {
1139 #ifdef INET
1140 		case AF_INET:
1141 			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1142 			arp_ifinit(ifp, ifa);
1143 			break;
1144 #endif
1145 		default:
1146 			ifp->if_init(ifp->if_softc);
1147 			break;
1148 		}
1149 		break;
1150 
1151 	case SIOCGIFADDR:
1152 		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1153 		    ETHER_ADDR_LEN);
1154 		break;
1155 
1156 	case SIOCSIFMTU:
1157 		/*
1158 		 * Set the interface MTU.
1159 		 */
1160 		if (ifr->ifr_mtu > ETHERMTU) {
1161 			error = EINVAL;
1162 		} else {
1163 			ifp->if_mtu = ifr->ifr_mtu;
1164 		}
1165 		break;
1166 
1167 	case SIOCSLANPCP:
1168 		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1169 		if (error != 0)
1170 			break;
1171 		if (ifr->ifr_lan_pcp > 7 &&
1172 		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1173 			error = EINVAL;
1174 		} else {
1175 			ifp->if_pcp = ifr->ifr_lan_pcp;
1176 			/* broadcast event about PCP change */
1177 			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1178 		}
1179 		break;
1180 
1181 	case SIOCGLANPCP:
1182 		ifr->ifr_lan_pcp = ifp->if_pcp;
1183 		break;
1184 
1185 	default:
1186 		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1187 		break;
1188 	}
1189 	return (error);
1190 }
1191 
1192 static int
1193 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1194 	struct sockaddr *sa)
1195 {
1196 	struct sockaddr_dl *sdl;
1197 #ifdef INET
1198 	struct sockaddr_in *sin;
1199 #endif
1200 #ifdef INET6
1201 	struct sockaddr_in6 *sin6;
1202 #endif
1203 	u_char *e_addr;
1204 
1205 	switch(sa->sa_family) {
1206 	case AF_LINK:
1207 		/*
1208 		 * No mapping needed. Just check that it's a valid MC address.
1209 		 */
1210 		sdl = (struct sockaddr_dl *)sa;
1211 		e_addr = LLADDR(sdl);
1212 		if (!ETHER_IS_MULTICAST(e_addr))
1213 			return EADDRNOTAVAIL;
1214 		*llsa = NULL;
1215 		return 0;
1216 
1217 #ifdef INET
1218 	case AF_INET:
1219 		sin = (struct sockaddr_in *)sa;
1220 		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1221 			return EADDRNOTAVAIL;
1222 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1223 		sdl->sdl_alen = ETHER_ADDR_LEN;
1224 		e_addr = LLADDR(sdl);
1225 		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1226 		*llsa = (struct sockaddr *)sdl;
1227 		return 0;
1228 #endif
1229 #ifdef INET6
1230 	case AF_INET6:
1231 		sin6 = (struct sockaddr_in6 *)sa;
1232 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1233 			/*
1234 			 * An IP6 address of 0 means listen to all
1235 			 * of the Ethernet multicast address used for IP6.
1236 			 * (This is used for multicast routers.)
1237 			 */
1238 			ifp->if_flags |= IFF_ALLMULTI;
1239 			*llsa = NULL;
1240 			return 0;
1241 		}
1242 		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1243 			return EADDRNOTAVAIL;
1244 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1245 		sdl->sdl_alen = ETHER_ADDR_LEN;
1246 		e_addr = LLADDR(sdl);
1247 		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1248 		*llsa = (struct sockaddr *)sdl;
1249 		return 0;
1250 #endif
1251 
1252 	default:
1253 		/*
1254 		 * Well, the text isn't quite right, but it's the name
1255 		 * that counts...
1256 		 */
1257 		return EAFNOSUPPORT;
1258 	}
1259 }
1260 
1261 static moduledata_t ether_mod = {
1262 	.name = "ether",
1263 };
1264 
1265 void
1266 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1267 {
1268 	struct ether_vlan_header vlan;
1269 	struct mbuf mv, mb;
1270 
1271 	KASSERT((m->m_flags & M_VLANTAG) != 0,
1272 	    ("%s: vlan information not present", __func__));
1273 	KASSERT(m->m_len >= sizeof(struct ether_header),
1274 	    ("%s: mbuf not large enough for header", __func__));
1275 	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1276 	vlan.evl_proto = vlan.evl_encap_proto;
1277 	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1278 	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1279 	m->m_len -= sizeof(struct ether_header);
1280 	m->m_data += sizeof(struct ether_header);
1281 	/*
1282 	 * If a data link has been supplied by the caller, then we will need to
1283 	 * re-create a stack allocated mbuf chain with the following structure:
1284 	 *
1285 	 * (1) mbuf #1 will contain the supplied data link
1286 	 * (2) mbuf #2 will contain the vlan header
1287 	 * (3) mbuf #3 will contain the original mbuf's packet data
1288 	 *
1289 	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1290 	 */
1291 	if (data != NULL) {
1292 		mv.m_next = m;
1293 		mv.m_data = (caddr_t)&vlan;
1294 		mv.m_len = sizeof(vlan);
1295 		mb.m_next = &mv;
1296 		mb.m_data = data;
1297 		mb.m_len = dlen;
1298 		bpf_mtap(bp, &mb);
1299 	} else
1300 		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1301 	m->m_len += sizeof(struct ether_header);
1302 	m->m_data -= sizeof(struct ether_header);
1303 }
1304 
1305 struct mbuf *
1306 ether_vlanencap(struct mbuf *m, uint16_t tag)
1307 {
1308 	struct ether_vlan_header *evl;
1309 
1310 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1311 	if (m == NULL)
1312 		return (NULL);
1313 	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1314 
1315 	if (m->m_len < sizeof(*evl)) {
1316 		m = m_pullup(m, sizeof(*evl));
1317 		if (m == NULL)
1318 			return (NULL);
1319 	}
1320 
1321 	/*
1322 	 * Transform the Ethernet header into an Ethernet header
1323 	 * with 802.1Q encapsulation.
1324 	 */
1325 	evl = mtod(m, struct ether_vlan_header *);
1326 	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1327 	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1328 	evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1329 	evl->evl_tag = htons(tag);
1330 	return (m);
1331 }
1332 
1333 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1334     "IEEE 802.1Q VLAN");
1335 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1336     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1337     "for consistency");
1338 
1339 VNET_DEFINE_STATIC(int, soft_pad);
1340 #define	V_soft_pad	VNET(soft_pad)
1341 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1342     &VNET_NAME(soft_pad), 0,
1343     "pad short frames before tagging");
1344 
1345 /*
1346  * For now, make preserving PCP via an mbuf tag optional, as it increases
1347  * per-packet memory allocations and frees.  In the future, it would be
1348  * preferable to reuse ether_vtag for this, or similar.
1349  */
1350 int vlan_mtag_pcp = 0;
1351 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW,
1352     &vlan_mtag_pcp, 0,
1353     "Retain VLAN PCP information as packets are passed up the stack");
1354 
1355 bool
1356 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1357     uint16_t vid, uint8_t pcp)
1358 {
1359 	struct m_tag *mtag;
1360 	int n;
1361 	uint16_t tag;
1362 	static const char pad[8];	/* just zeros */
1363 
1364 	/*
1365 	 * Pad the frame to the minimum size allowed if told to.
1366 	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1367 	 * paragraph C.4.4.3.b.  It can help to work around buggy
1368 	 * bridges that violate paragraph C.4.4.3.a from the same
1369 	 * document, i.e., fail to pad short frames after untagging.
1370 	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1371 	 * untagging it will produce a 62-byte frame, which is a runt
1372 	 * and requires padding.  There are VLAN-enabled network
1373 	 * devices that just discard such runts instead or mishandle
1374 	 * them somehow.
1375 	 */
1376 	if (V_soft_pad && p->if_type == IFT_ETHER) {
1377 		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1378 		     n > 0; n -= sizeof(pad)) {
1379 			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1380 				break;
1381 		}
1382 		if (n > 0) {
1383 			m_freem(*mp);
1384 			*mp = NULL;
1385 			if_printf(ife, "cannot pad short frame");
1386 			return (false);
1387 		}
1388 	}
1389 
1390 	/*
1391 	 * If underlying interface can do VLAN tag insertion itself,
1392 	 * just pass the packet along. However, we need some way to
1393 	 * tell the interface where the packet came from so that it
1394 	 * knows how to find the VLAN tag to use, so we attach a
1395 	 * packet tag that holds it.
1396 	 */
1397 	if (vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1398 	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1399 		tag = EVL_MAKETAG(vid, *(uint8_t *)(mtag + 1), 0);
1400 	else
1401 		tag = EVL_MAKETAG(vid, pcp, 0);
1402 	if (p->if_capenable & IFCAP_VLAN_HWTAGGING) {
1403 		(*mp)->m_pkthdr.ether_vtag = tag;
1404 		(*mp)->m_flags |= M_VLANTAG;
1405 	} else {
1406 		*mp = ether_vlanencap(*mp, tag);
1407 		if (*mp == NULL) {
1408 			if_printf(ife, "unable to prepend 802.1Q header");
1409 			return (false);
1410 		}
1411 	}
1412 	return (true);
1413 }
1414 
1415 /*
1416  * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1417  * cryptographic hash function on the containing jail's name, UUID and the
1418  * interface name to attempt to provide a unique but stable address.
1419  * Pseudo-interfaces which require a MAC address should use this function to
1420  * allocate non-locally-administered addresses.
1421  */
1422 void
1423 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1424 {
1425 	SHA1_CTX ctx;
1426 	char *buf;
1427 	char uuid[HOSTUUIDLEN + 1];
1428 	uint64_t addr;
1429 	int i, sz;
1430 	char digest[SHA1_RESULTLEN];
1431 	char jailname[MAXHOSTNAMELEN];
1432 
1433 	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1434 	/* If each (vnet) jail would also have a unique hostuuid this would not
1435 	 * be necessary. */
1436 	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1437 	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, if_name(ifp),
1438 	    jailname);
1439 	if (sz < 0) {
1440 		/* Fall back to a random mac address. */
1441 		arc4rand(hwaddr, sizeof(*hwaddr), 0);
1442 		hwaddr->octet[0] = 0x02;
1443 		return;
1444 	}
1445 
1446 	SHA1Init(&ctx);
1447 	SHA1Update(&ctx, buf, sz);
1448 	SHA1Final(digest, &ctx);
1449 	free(buf, M_TEMP);
1450 
1451 	addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1452 	    OUI_FREEBSD_GENERATED_MASK;
1453 	addr = OUI_FREEBSD(addr);
1454 	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1455 		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1456 		    0xFF;
1457 	}
1458 }
1459 
1460 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1461 MODULE_VERSION(ether, 1);
1462