xref: /freebsd/sys/net/if_ethersubr.c (revision 3ff01b231dfa83d518854c63e7c9cd1debd1139e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)if_ethersubr.c	8.1 (Berkeley) 6/10/93
32  * $FreeBSD$
33  */
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_netgraph.h"
38 #include "opt_mbuf_profiling.h"
39 #include "opt_rss.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/devctl.h>
44 #include <sys/eventhandler.h>
45 #include <sys/jail.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mbuf.h>
51 #include <sys/proc.h>
52 #include <sys/priv.h>
53 #include <sys/random.h>
54 #include <sys/socket.h>
55 #include <sys/sockio.h>
56 #include <sys/sysctl.h>
57 #include <sys/uuid.h>
58 
59 #include <net/ieee_oui.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/netisr.h>
64 #include <net/route.h>
65 #include <net/if_llc.h>
66 #include <net/if_dl.h>
67 #include <net/if_types.h>
68 #include <net/bpf.h>
69 #include <net/ethernet.h>
70 #include <net/if_bridgevar.h>
71 #include <net/if_vlan_var.h>
72 #include <net/if_llatbl.h>
73 #include <net/pfil.h>
74 #include <net/rss_config.h>
75 #include <net/vnet.h>
76 
77 #include <netpfil/pf/pf_mtag.h>
78 
79 #if defined(INET) || defined(INET6)
80 #include <netinet/in.h>
81 #include <netinet/in_var.h>
82 #include <netinet/if_ether.h>
83 #include <netinet/ip_carp.h>
84 #include <netinet/ip_var.h>
85 #endif
86 #ifdef INET6
87 #include <netinet6/nd6.h>
88 #endif
89 #include <security/mac/mac_framework.h>
90 
91 #include <crypto/sha1.h>
92 
93 #ifdef CTASSERT
94 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
95 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
96 #endif
97 
98 VNET_DEFINE(pfil_head_t, link_pfil_head);	/* Packet filter hooks */
99 
100 /* netgraph node hooks for ng_ether(4) */
101 void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
102 void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
103 int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
104 void	(*ng_ether_attach_p)(struct ifnet *ifp);
105 void	(*ng_ether_detach_p)(struct ifnet *ifp);
106 
107 void	(*vlan_input_p)(struct ifnet *, struct mbuf *);
108 
109 /* if_bridge(4) support */
110 void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
111 
112 /* if_lagg(4) support */
113 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
114 
115 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
116 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
117 
118 static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
119 		struct sockaddr *);
120 static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
121 
122 #define senderr(e) do { error = (e); goto bad;} while (0)
123 
124 static void
125 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
126 {
127 	int csum_flags = 0;
128 
129 	if (src->m_pkthdr.csum_flags & CSUM_IP)
130 		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
131 	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
132 		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
133 	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
134 		csum_flags |= CSUM_SCTP_VALID;
135 	dst->m_pkthdr.csum_flags |= csum_flags;
136 	if (csum_flags & CSUM_DATA_VALID)
137 		dst->m_pkthdr.csum_data = 0xffff;
138 }
139 
140 /*
141  * Handle link-layer encapsulation requests.
142  */
143 static int
144 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
145 {
146 	struct ether_header *eh;
147 	struct arphdr *ah;
148 	uint16_t etype;
149 	const u_char *lladdr;
150 
151 	if (req->rtype != IFENCAP_LL)
152 		return (EOPNOTSUPP);
153 
154 	if (req->bufsize < ETHER_HDR_LEN)
155 		return (ENOMEM);
156 
157 	eh = (struct ether_header *)req->buf;
158 	lladdr = req->lladdr;
159 	req->lladdr_off = 0;
160 
161 	switch (req->family) {
162 	case AF_INET:
163 		etype = htons(ETHERTYPE_IP);
164 		break;
165 	case AF_INET6:
166 		etype = htons(ETHERTYPE_IPV6);
167 		break;
168 	case AF_ARP:
169 		ah = (struct arphdr *)req->hdata;
170 		ah->ar_hrd = htons(ARPHRD_ETHER);
171 
172 		switch(ntohs(ah->ar_op)) {
173 		case ARPOP_REVREQUEST:
174 		case ARPOP_REVREPLY:
175 			etype = htons(ETHERTYPE_REVARP);
176 			break;
177 		case ARPOP_REQUEST:
178 		case ARPOP_REPLY:
179 		default:
180 			etype = htons(ETHERTYPE_ARP);
181 			break;
182 		}
183 
184 		if (req->flags & IFENCAP_FLAG_BROADCAST)
185 			lladdr = ifp->if_broadcastaddr;
186 		break;
187 	default:
188 		return (EAFNOSUPPORT);
189 	}
190 
191 	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
192 	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
193 	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
194 	req->bufsize = sizeof(struct ether_header);
195 
196 	return (0);
197 }
198 
199 static int
200 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
201 	const struct sockaddr *dst, struct route *ro, u_char *phdr,
202 	uint32_t *pflags, struct llentry **plle)
203 {
204 	struct ether_header *eh;
205 	uint32_t lleflags = 0;
206 	int error = 0;
207 #if defined(INET) || defined(INET6)
208 	uint16_t etype;
209 #endif
210 
211 	if (plle)
212 		*plle = NULL;
213 	eh = (struct ether_header *)phdr;
214 
215 	switch (dst->sa_family) {
216 #ifdef INET
217 	case AF_INET:
218 		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
219 			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
220 			    plle);
221 		else {
222 			if (m->m_flags & M_BCAST)
223 				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
224 				    ETHER_ADDR_LEN);
225 			else {
226 				const struct in_addr *a;
227 				a = &(((const struct sockaddr_in *)dst)->sin_addr);
228 				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
229 			}
230 			etype = htons(ETHERTYPE_IP);
231 			memcpy(&eh->ether_type, &etype, sizeof(etype));
232 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
233 		}
234 		break;
235 #endif
236 #ifdef INET6
237 	case AF_INET6:
238 		if ((m->m_flags & M_MCAST) == 0)
239 			error = nd6_resolve(ifp, 0, m, dst, phdr, &lleflags,
240 			    plle);
241 		else {
242 			const struct in6_addr *a6;
243 			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
244 			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
245 			etype = htons(ETHERTYPE_IPV6);
246 			memcpy(&eh->ether_type, &etype, sizeof(etype));
247 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
248 		}
249 		break;
250 #endif
251 	default:
252 		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
253 		if (m != NULL)
254 			m_freem(m);
255 		return (EAFNOSUPPORT);
256 	}
257 
258 	if (error == EHOSTDOWN) {
259 		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
260 			error = EHOSTUNREACH;
261 	}
262 
263 	if (error != 0)
264 		return (error);
265 
266 	*pflags = RT_MAY_LOOP;
267 	if (lleflags & LLE_IFADDR)
268 		*pflags |= RT_L2_ME;
269 
270 	return (0);
271 }
272 
273 /*
274  * Ethernet output routine.
275  * Encapsulate a packet of type family for the local net.
276  * Use trailer local net encapsulation if enough data in first
277  * packet leaves a multiple of 512 bytes of data in remainder.
278  */
279 int
280 ether_output(struct ifnet *ifp, struct mbuf *m,
281 	const struct sockaddr *dst, struct route *ro)
282 {
283 	int error = 0;
284 	char linkhdr[ETHER_HDR_LEN], *phdr;
285 	struct ether_header *eh;
286 	struct pf_mtag *t;
287 	bool loop_copy;
288 	int hlen;	/* link layer header length */
289 	uint32_t pflags;
290 	struct llentry *lle = NULL;
291 	int addref = 0;
292 
293 	phdr = NULL;
294 	pflags = 0;
295 	if (ro != NULL) {
296 		/* XXX BPF uses ro_prepend */
297 		if (ro->ro_prepend != NULL) {
298 			phdr = ro->ro_prepend;
299 			hlen = ro->ro_plen;
300 		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
301 			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
302 				lle = ro->ro_lle;
303 				if (lle != NULL &&
304 				    (lle->la_flags & LLE_VALID) == 0) {
305 					LLE_FREE(lle);
306 					lle = NULL;	/* redundant */
307 					ro->ro_lle = NULL;
308 				}
309 				if (lle == NULL) {
310 					/* if we lookup, keep cache */
311 					addref = 1;
312 				} else
313 					/*
314 					 * Notify LLE code that
315 					 * the entry was used
316 					 * by datapath.
317 					 */
318 					llentry_mark_used(lle);
319 			}
320 			if (lle != NULL) {
321 				phdr = lle->r_linkdata;
322 				hlen = lle->r_hdrlen;
323 				pflags = lle->r_flags;
324 			}
325 		}
326 	}
327 
328 #ifdef MAC
329 	error = mac_ifnet_check_transmit(ifp, m);
330 	if (error)
331 		senderr(error);
332 #endif
333 
334 	M_PROFILE(m);
335 	if (ifp->if_flags & IFF_MONITOR)
336 		senderr(ENETDOWN);
337 	if (!((ifp->if_flags & IFF_UP) &&
338 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
339 		senderr(ENETDOWN);
340 
341 	if (phdr == NULL) {
342 		/* No prepend data supplied. Try to calculate ourselves. */
343 		phdr = linkhdr;
344 		hlen = ETHER_HDR_LEN;
345 		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
346 		    addref ? &lle : NULL);
347 		if (addref && lle != NULL)
348 			ro->ro_lle = lle;
349 		if (error != 0)
350 			return (error == EWOULDBLOCK ? 0 : error);
351 	}
352 
353 	if ((pflags & RT_L2_ME) != 0) {
354 		update_mbuf_csumflags(m, m);
355 		return (if_simloop(ifp, m, dst->sa_family, 0));
356 	}
357 	loop_copy = (pflags & RT_MAY_LOOP) != 0;
358 
359 	/*
360 	 * Add local net header.  If no space in first mbuf,
361 	 * allocate another.
362 	 *
363 	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
364 	 * This is done because BPF code shifts m_data pointer
365 	 * to the end of ethernet header prior to calling if_output().
366 	 */
367 	M_PREPEND(m, hlen, M_NOWAIT);
368 	if (m == NULL)
369 		senderr(ENOBUFS);
370 	if ((pflags & RT_HAS_HEADER) == 0) {
371 		eh = mtod(m, struct ether_header *);
372 		memcpy(eh, phdr, hlen);
373 	}
374 
375 	/*
376 	 * If a simplex interface, and the packet is being sent to our
377 	 * Ethernet address or a broadcast address, loopback a copy.
378 	 * XXX To make a simplex device behave exactly like a duplex
379 	 * device, we should copy in the case of sending to our own
380 	 * ethernet address (thus letting the original actually appear
381 	 * on the wire). However, we don't do that here for security
382 	 * reasons and compatibility with the original behavior.
383 	 */
384 	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
385 	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
386 		struct mbuf *n;
387 
388 		/*
389 		 * Because if_simloop() modifies the packet, we need a
390 		 * writable copy through m_dup() instead of a readonly
391 		 * one as m_copy[m] would give us. The alternative would
392 		 * be to modify if_simloop() to handle the readonly mbuf,
393 		 * but performancewise it is mostly equivalent (trading
394 		 * extra data copying vs. extra locking).
395 		 *
396 		 * XXX This is a local workaround.  A number of less
397 		 * often used kernel parts suffer from the same bug.
398 		 * See PR kern/105943 for a proposed general solution.
399 		 */
400 		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
401 			update_mbuf_csumflags(m, n);
402 			(void)if_simloop(ifp, n, dst->sa_family, hlen);
403 		} else
404 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
405 	}
406 
407        /*
408 	* Bridges require special output handling.
409 	*/
410 	if (ifp->if_bridge) {
411 		BRIDGE_OUTPUT(ifp, m, error);
412 		return (error);
413 	}
414 
415 #if defined(INET) || defined(INET6)
416 	if (ifp->if_carp &&
417 	    (error = (*carp_output_p)(ifp, m, dst)))
418 		goto bad;
419 #endif
420 
421 	/* Handle ng_ether(4) processing, if any */
422 	if (ifp->if_l2com != NULL) {
423 		KASSERT(ng_ether_output_p != NULL,
424 		    ("ng_ether_output_p is NULL"));
425 		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
426 bad:			if (m != NULL)
427 				m_freem(m);
428 			return (error);
429 		}
430 		if (m == NULL)
431 			return (0);
432 	}
433 
434 	/* Continue with link-layer output */
435 	return ether_output_frame(ifp, m);
436 }
437 
438 static bool
439 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
440 {
441 	struct ether_8021q_tag qtag;
442 	struct ether_header *eh;
443 
444 	eh = mtod(*mp, struct ether_header *);
445 	if (ntohs(eh->ether_type) == ETHERTYPE_VLAN ||
446 	    ntohs(eh->ether_type) == ETHERTYPE_QINQ)
447 		return (true);
448 
449 	qtag.vid = 0;
450 	qtag.pcp = pcp;
451 	qtag.proto = ETHERTYPE_VLAN;
452 	if (ether_8021q_frame(mp, ifp, ifp, &qtag))
453 		return (true);
454 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
455 	return (false);
456 }
457 
458 /*
459  * Ethernet link layer output routine to send a raw frame to the device.
460  *
461  * This assumes that the 14 byte Ethernet header is present and contiguous
462  * in the first mbuf (if BRIDGE'ing).
463  */
464 int
465 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
466 {
467 	uint8_t pcp;
468 
469 	pcp = ifp->if_pcp;
470 	if (pcp != IFNET_PCP_NONE && ifp->if_type != IFT_L2VLAN &&
471 	    !ether_set_pcp(&m, ifp, pcp))
472 		return (0);
473 
474 	if (PFIL_HOOKED_OUT(V_link_pfil_head))
475 		switch (pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_OUT,
476 		    NULL)) {
477 		case PFIL_DROPPED:
478 			return (EACCES);
479 		case PFIL_CONSUMED:
480 			return (0);
481 		}
482 
483 #ifdef EXPERIMENTAL
484 #if defined(INET6) && defined(INET)
485 	/* draft-ietf-6man-ipv6only-flag */
486 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
487 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
488 		struct ether_header *eh;
489 
490 		eh = mtod(m, struct ether_header *);
491 		switch (ntohs(eh->ether_type)) {
492 		case ETHERTYPE_IP:
493 		case ETHERTYPE_ARP:
494 		case ETHERTYPE_REVARP:
495 			m_freem(m);
496 			return (EAFNOSUPPORT);
497 			/* NOTREACHED */
498 			break;
499 		};
500 	}
501 #endif
502 #endif
503 
504 	/*
505 	 * Queue message on interface, update output statistics if
506 	 * successful, and start output if interface not yet active.
507 	 */
508 	return ((ifp->if_transmit)(ifp, m));
509 }
510 
511 /*
512  * Process a received Ethernet packet; the packet is in the
513  * mbuf chain m with the ethernet header at the front.
514  */
515 static void
516 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
517 {
518 	struct ether_header *eh;
519 	u_short etype;
520 
521 	if ((ifp->if_flags & IFF_UP) == 0) {
522 		m_freem(m);
523 		return;
524 	}
525 #ifdef DIAGNOSTIC
526 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
527 		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
528 		m_freem(m);
529 		return;
530 	}
531 #endif
532 	if (m->m_len < ETHER_HDR_LEN) {
533 		/* XXX maybe should pullup? */
534 		if_printf(ifp, "discard frame w/o leading ethernet "
535 				"header (len %u pkt len %u)\n",
536 				m->m_len, m->m_pkthdr.len);
537 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
538 		m_freem(m);
539 		return;
540 	}
541 	eh = mtod(m, struct ether_header *);
542 	etype = ntohs(eh->ether_type);
543 	random_harvest_queue_ether(m, sizeof(*m));
544 
545 #ifdef EXPERIMENTAL
546 #if defined(INET6) && defined(INET)
547 	/* draft-ietf-6man-ipv6only-flag */
548 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
549 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
550 		switch (etype) {
551 		case ETHERTYPE_IP:
552 		case ETHERTYPE_ARP:
553 		case ETHERTYPE_REVARP:
554 			m_freem(m);
555 			return;
556 			/* NOTREACHED */
557 			break;
558 		};
559 	}
560 #endif
561 #endif
562 
563 	CURVNET_SET_QUIET(ifp->if_vnet);
564 
565 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
566 		if (ETHER_IS_BROADCAST(eh->ether_dhost))
567 			m->m_flags |= M_BCAST;
568 		else
569 			m->m_flags |= M_MCAST;
570 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
571 	}
572 
573 #ifdef MAC
574 	/*
575 	 * Tag the mbuf with an appropriate MAC label before any other
576 	 * consumers can get to it.
577 	 */
578 	mac_ifnet_create_mbuf(ifp, m);
579 #endif
580 
581 	/*
582 	 * Give bpf a chance at the packet.
583 	 */
584 	ETHER_BPF_MTAP(ifp, m);
585 
586 	/*
587 	 * If the CRC is still on the packet, trim it off. We do this once
588 	 * and once only in case we are re-entered. Nothing else on the
589 	 * Ethernet receive path expects to see the FCS.
590 	 */
591 	if (m->m_flags & M_HASFCS) {
592 		m_adj(m, -ETHER_CRC_LEN);
593 		m->m_flags &= ~M_HASFCS;
594 	}
595 
596 	if (!(ifp->if_capenable & IFCAP_HWSTATS))
597 		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
598 
599 	/* Allow monitor mode to claim this frame, after stats are updated. */
600 	if (ifp->if_flags & IFF_MONITOR) {
601 		m_freem(m);
602 		CURVNET_RESTORE();
603 		return;
604 	}
605 
606 	/* Handle input from a lagg(4) port */
607 	if (ifp->if_type == IFT_IEEE8023ADLAG) {
608 		KASSERT(lagg_input_ethernet_p != NULL,
609 		    ("%s: if_lagg not loaded!", __func__));
610 		m = (*lagg_input_ethernet_p)(ifp, m);
611 		if (m != NULL)
612 			ifp = m->m_pkthdr.rcvif;
613 		else {
614 			CURVNET_RESTORE();
615 			return;
616 		}
617 	}
618 
619 	/*
620 	 * If the hardware did not process an 802.1Q tag, do this now,
621 	 * to allow 802.1P priority frames to be passed to the main input
622 	 * path correctly.
623 	 */
624 	if ((m->m_flags & M_VLANTAG) == 0 &&
625 	    ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
626 		struct ether_vlan_header *evl;
627 
628 		if (m->m_len < sizeof(*evl) &&
629 		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
630 #ifdef DIAGNOSTIC
631 			if_printf(ifp, "cannot pullup VLAN header\n");
632 #endif
633 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
634 			CURVNET_RESTORE();
635 			return;
636 		}
637 
638 		evl = mtod(m, struct ether_vlan_header *);
639 		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
640 		m->m_flags |= M_VLANTAG;
641 
642 		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
643 		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
644 		m_adj(m, ETHER_VLAN_ENCAP_LEN);
645 		eh = mtod(m, struct ether_header *);
646 	}
647 
648 	M_SETFIB(m, ifp->if_fib);
649 
650 	/* Allow ng_ether(4) to claim this frame. */
651 	if (ifp->if_l2com != NULL) {
652 		KASSERT(ng_ether_input_p != NULL,
653 		    ("%s: ng_ether_input_p is NULL", __func__));
654 		m->m_flags &= ~M_PROMISC;
655 		(*ng_ether_input_p)(ifp, &m);
656 		if (m == NULL) {
657 			CURVNET_RESTORE();
658 			return;
659 		}
660 		eh = mtod(m, struct ether_header *);
661 	}
662 
663 	/*
664 	 * Allow if_bridge(4) to claim this frame.
665 	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
666 	 * and the frame should be delivered locally.
667 	 */
668 	if (ifp->if_bridge != NULL) {
669 		m->m_flags &= ~M_PROMISC;
670 		BRIDGE_INPUT(ifp, m);
671 		if (m == NULL) {
672 			CURVNET_RESTORE();
673 			return;
674 		}
675 		eh = mtod(m, struct ether_header *);
676 	}
677 
678 #if defined(INET) || defined(INET6)
679 	/*
680 	 * Clear M_PROMISC on frame so that carp(4) will see it when the
681 	 * mbuf flows up to Layer 3.
682 	 * FreeBSD's implementation of carp(4) uses the inprotosw
683 	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
684 	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
685 	 * is outside the scope of the M_PROMISC test below.
686 	 * TODO: Maintain a hash table of ethernet addresses other than
687 	 * ether_dhost which may be active on this ifp.
688 	 */
689 	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
690 		m->m_flags &= ~M_PROMISC;
691 	} else
692 #endif
693 	{
694 		/*
695 		 * If the frame received was not for our MAC address, set the
696 		 * M_PROMISC flag on the mbuf chain. The frame may need to
697 		 * be seen by the rest of the Ethernet input path in case of
698 		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
699 		 * seen by upper protocol layers.
700 		 */
701 		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
702 		    bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
703 			m->m_flags |= M_PROMISC;
704 	}
705 
706 	ether_demux(ifp, m);
707 	CURVNET_RESTORE();
708 }
709 
710 /*
711  * Ethernet input dispatch; by default, direct dispatch here regardless of
712  * global configuration.  However, if RSS is enabled, hook up RSS affinity
713  * so that when deferred or hybrid dispatch is enabled, we can redistribute
714  * load based on RSS.
715  *
716  * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
717  * not it had already done work distribution via multi-queue.  Then we could
718  * direct dispatch in the event load balancing was already complete and
719  * handle the case of interfaces with different capabilities better.
720  *
721  * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
722  * at multiple layers?
723  *
724  * XXXRW: For now, enable all this only if RSS is compiled in, although it
725  * works fine without RSS.  Need to characterise the performance overhead
726  * of the detour through the netisr code in the event the result is always
727  * direct dispatch.
728  */
729 static void
730 ether_nh_input(struct mbuf *m)
731 {
732 
733 	M_ASSERTPKTHDR(m);
734 	KASSERT(m->m_pkthdr.rcvif != NULL,
735 	    ("%s: NULL interface pointer", __func__));
736 	ether_input_internal(m->m_pkthdr.rcvif, m);
737 }
738 
739 static struct netisr_handler	ether_nh = {
740 	.nh_name = "ether",
741 	.nh_handler = ether_nh_input,
742 	.nh_proto = NETISR_ETHER,
743 #ifdef RSS
744 	.nh_policy = NETISR_POLICY_CPU,
745 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
746 	.nh_m2cpuid = rss_m2cpuid,
747 #else
748 	.nh_policy = NETISR_POLICY_SOURCE,
749 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
750 #endif
751 };
752 
753 static void
754 ether_init(__unused void *arg)
755 {
756 
757 	netisr_register(&ether_nh);
758 }
759 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
760 
761 static void
762 vnet_ether_init(__unused void *arg)
763 {
764 	struct pfil_head_args args;
765 
766 	args.pa_version = PFIL_VERSION;
767 	args.pa_flags = PFIL_IN | PFIL_OUT;
768 	args.pa_type = PFIL_TYPE_ETHERNET;
769 	args.pa_headname = PFIL_ETHER_NAME;
770 	V_link_pfil_head = pfil_head_register(&args);
771 
772 #ifdef VIMAGE
773 	netisr_register_vnet(&ether_nh);
774 #endif
775 }
776 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
777     vnet_ether_init, NULL);
778 
779 #ifdef VIMAGE
780 static void
781 vnet_ether_pfil_destroy(__unused void *arg)
782 {
783 
784 	pfil_head_unregister(V_link_pfil_head);
785 }
786 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
787     vnet_ether_pfil_destroy, NULL);
788 
789 static void
790 vnet_ether_destroy(__unused void *arg)
791 {
792 
793 	netisr_unregister_vnet(&ether_nh);
794 }
795 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
796     vnet_ether_destroy, NULL);
797 #endif
798 
799 static void
800 ether_input(struct ifnet *ifp, struct mbuf *m)
801 {
802 	struct epoch_tracker et;
803 	struct mbuf *mn;
804 	bool needs_epoch;
805 
806 	needs_epoch = !(ifp->if_flags & IFF_KNOWSEPOCH);
807 
808 	/*
809 	 * The drivers are allowed to pass in a chain of packets linked with
810 	 * m_nextpkt. We split them up into separate packets here and pass
811 	 * them up. This allows the drivers to amortize the receive lock.
812 	 */
813 	CURVNET_SET_QUIET(ifp->if_vnet);
814 	if (__predict_false(needs_epoch))
815 		NET_EPOCH_ENTER(et);
816 	while (m) {
817 		mn = m->m_nextpkt;
818 		m->m_nextpkt = NULL;
819 
820 		/*
821 		 * We will rely on rcvif being set properly in the deferred
822 		 * context, so assert it is correct here.
823 		 */
824 		MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
825 		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
826 		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
827 		netisr_dispatch(NETISR_ETHER, m);
828 		m = mn;
829 	}
830 	if (__predict_false(needs_epoch))
831 		NET_EPOCH_EXIT(et);
832 	CURVNET_RESTORE();
833 }
834 
835 /*
836  * Upper layer processing for a received Ethernet packet.
837  */
838 void
839 ether_demux(struct ifnet *ifp, struct mbuf *m)
840 {
841 	struct ether_header *eh;
842 	int i, isr;
843 	u_short ether_type;
844 
845 	NET_EPOCH_ASSERT();
846 	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
847 
848 	/* Do not grab PROMISC frames in case we are re-entered. */
849 	if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
850 		i = pfil_run_hooks(V_link_pfil_head, &m, ifp, PFIL_IN, NULL);
851 		if (i != 0 || m == NULL)
852 			return;
853 	}
854 
855 	eh = mtod(m, struct ether_header *);
856 	ether_type = ntohs(eh->ether_type);
857 
858 	/*
859 	 * If this frame has a VLAN tag other than 0, call vlan_input()
860 	 * if its module is loaded. Otherwise, drop.
861 	 */
862 	if ((m->m_flags & M_VLANTAG) &&
863 	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
864 		if (ifp->if_vlantrunk == NULL) {
865 			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
866 			m_freem(m);
867 			return;
868 		}
869 		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
870 		    __func__));
871 		/* Clear before possibly re-entering ether_input(). */
872 		m->m_flags &= ~M_PROMISC;
873 		(*vlan_input_p)(ifp, m);
874 		return;
875 	}
876 
877 	/*
878 	 * Pass promiscuously received frames to the upper layer if the user
879 	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
880 	 */
881 	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
882 		m_freem(m);
883 		return;
884 	}
885 
886 	/*
887 	 * Reset layer specific mbuf flags to avoid confusing upper layers.
888 	 * Strip off Ethernet header.
889 	 */
890 	m->m_flags &= ~M_VLANTAG;
891 	m_clrprotoflags(m);
892 	m_adj(m, ETHER_HDR_LEN);
893 
894 	/*
895 	 * Dispatch frame to upper layer.
896 	 */
897 	switch (ether_type) {
898 #ifdef INET
899 	case ETHERTYPE_IP:
900 		isr = NETISR_IP;
901 		break;
902 
903 	case ETHERTYPE_ARP:
904 		if (ifp->if_flags & IFF_NOARP) {
905 			/* Discard packet if ARP is disabled on interface */
906 			m_freem(m);
907 			return;
908 		}
909 		isr = NETISR_ARP;
910 		break;
911 #endif
912 #ifdef INET6
913 	case ETHERTYPE_IPV6:
914 		isr = NETISR_IPV6;
915 		break;
916 #endif
917 	default:
918 		goto discard;
919 	}
920 	netisr_dispatch(isr, m);
921 	return;
922 
923 discard:
924 	/*
925 	 * Packet is to be discarded.  If netgraph is present,
926 	 * hand the packet to it for last chance processing;
927 	 * otherwise dispose of it.
928 	 */
929 	if (ifp->if_l2com != NULL) {
930 		KASSERT(ng_ether_input_orphan_p != NULL,
931 		    ("ng_ether_input_orphan_p is NULL"));
932 		/*
933 		 * Put back the ethernet header so netgraph has a
934 		 * consistent view of inbound packets.
935 		 */
936 		M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
937 		(*ng_ether_input_orphan_p)(ifp, m);
938 		return;
939 	}
940 	m_freem(m);
941 }
942 
943 /*
944  * Convert Ethernet address to printable (loggable) representation.
945  * This routine is for compatibility; it's better to just use
946  *
947  *	printf("%6D", <pointer to address>, ":");
948  *
949  * since there's no static buffer involved.
950  */
951 char *
952 ether_sprintf(const u_char *ap)
953 {
954 	static char etherbuf[18];
955 	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
956 	return (etherbuf);
957 }
958 
959 /*
960  * Perform common duties while attaching to interface list
961  */
962 void
963 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
964 {
965 	int i;
966 	struct ifaddr *ifa;
967 	struct sockaddr_dl *sdl;
968 
969 	ifp->if_addrlen = ETHER_ADDR_LEN;
970 	ifp->if_hdrlen = ETHER_HDR_LEN;
971 	ifp->if_mtu = ETHERMTU;
972 	if_attach(ifp);
973 	ifp->if_output = ether_output;
974 	ifp->if_input = ether_input;
975 	ifp->if_resolvemulti = ether_resolvemulti;
976 	ifp->if_requestencap = ether_requestencap;
977 #ifdef VIMAGE
978 	ifp->if_reassign = ether_reassign;
979 #endif
980 	if (ifp->if_baudrate == 0)
981 		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
982 	ifp->if_broadcastaddr = etherbroadcastaddr;
983 
984 	ifa = ifp->if_addr;
985 	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
986 	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
987 	sdl->sdl_type = IFT_ETHER;
988 	sdl->sdl_alen = ifp->if_addrlen;
989 	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
990 
991 	if (ifp->if_hw_addr != NULL)
992 		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
993 
994 	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
995 	if (ng_ether_attach_p != NULL)
996 		(*ng_ether_attach_p)(ifp);
997 
998 	/* Announce Ethernet MAC address if non-zero. */
999 	for (i = 0; i < ifp->if_addrlen; i++)
1000 		if (lla[i] != 0)
1001 			break;
1002 	if (i != ifp->if_addrlen)
1003 		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1004 
1005 	uuid_ether_add(LLADDR(sdl));
1006 
1007 	/* Add necessary bits are setup; announce it now. */
1008 	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1009 	if (IS_DEFAULT_VNET(curvnet))
1010 		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1011 }
1012 
1013 /*
1014  * Perform common duties while detaching an Ethernet interface
1015  */
1016 void
1017 ether_ifdetach(struct ifnet *ifp)
1018 {
1019 	struct sockaddr_dl *sdl;
1020 
1021 	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1022 	uuid_ether_del(LLADDR(sdl));
1023 
1024 	if (ifp->if_l2com != NULL) {
1025 		KASSERT(ng_ether_detach_p != NULL,
1026 		    ("ng_ether_detach_p is NULL"));
1027 		(*ng_ether_detach_p)(ifp);
1028 	}
1029 
1030 	bpfdetach(ifp);
1031 	if_detach(ifp);
1032 }
1033 
1034 #ifdef VIMAGE
1035 void
1036 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1037 {
1038 
1039 	if (ifp->if_l2com != NULL) {
1040 		KASSERT(ng_ether_detach_p != NULL,
1041 		    ("ng_ether_detach_p is NULL"));
1042 		(*ng_ether_detach_p)(ifp);
1043 	}
1044 
1045 	if (ng_ether_attach_p != NULL) {
1046 		CURVNET_SET_QUIET(new_vnet);
1047 		(*ng_ether_attach_p)(ifp);
1048 		CURVNET_RESTORE();
1049 	}
1050 }
1051 #endif
1052 
1053 SYSCTL_DECL(_net_link);
1054 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1055     "Ethernet");
1056 
1057 #if 0
1058 /*
1059  * This is for reference.  We have a table-driven version
1060  * of the little-endian crc32 generator, which is faster
1061  * than the double-loop.
1062  */
1063 uint32_t
1064 ether_crc32_le(const uint8_t *buf, size_t len)
1065 {
1066 	size_t i;
1067 	uint32_t crc;
1068 	int bit;
1069 	uint8_t data;
1070 
1071 	crc = 0xffffffff;	/* initial value */
1072 
1073 	for (i = 0; i < len; i++) {
1074 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1075 			carry = (crc ^ data) & 1;
1076 			crc >>= 1;
1077 			if (carry)
1078 				crc = (crc ^ ETHER_CRC_POLY_LE);
1079 		}
1080 	}
1081 
1082 	return (crc);
1083 }
1084 #else
1085 uint32_t
1086 ether_crc32_le(const uint8_t *buf, size_t len)
1087 {
1088 	static const uint32_t crctab[] = {
1089 		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1090 		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1091 		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1092 		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1093 	};
1094 	size_t i;
1095 	uint32_t crc;
1096 
1097 	crc = 0xffffffff;	/* initial value */
1098 
1099 	for (i = 0; i < len; i++) {
1100 		crc ^= buf[i];
1101 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1102 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1103 	}
1104 
1105 	return (crc);
1106 }
1107 #endif
1108 
1109 uint32_t
1110 ether_crc32_be(const uint8_t *buf, size_t len)
1111 {
1112 	size_t i;
1113 	uint32_t crc, carry;
1114 	int bit;
1115 	uint8_t data;
1116 
1117 	crc = 0xffffffff;	/* initial value */
1118 
1119 	for (i = 0; i < len; i++) {
1120 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1121 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1122 			crc <<= 1;
1123 			if (carry)
1124 				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1125 		}
1126 	}
1127 
1128 	return (crc);
1129 }
1130 
1131 int
1132 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1133 {
1134 	struct ifaddr *ifa = (struct ifaddr *) data;
1135 	struct ifreq *ifr = (struct ifreq *) data;
1136 	int error = 0;
1137 
1138 	switch (command) {
1139 	case SIOCSIFADDR:
1140 		ifp->if_flags |= IFF_UP;
1141 
1142 		switch (ifa->ifa_addr->sa_family) {
1143 #ifdef INET
1144 		case AF_INET:
1145 			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1146 			arp_ifinit(ifp, ifa);
1147 			break;
1148 #endif
1149 		default:
1150 			ifp->if_init(ifp->if_softc);
1151 			break;
1152 		}
1153 		break;
1154 
1155 	case SIOCGIFADDR:
1156 		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1157 		    ETHER_ADDR_LEN);
1158 		break;
1159 
1160 	case SIOCSIFMTU:
1161 		/*
1162 		 * Set the interface MTU.
1163 		 */
1164 		if (ifr->ifr_mtu > ETHERMTU) {
1165 			error = EINVAL;
1166 		} else {
1167 			ifp->if_mtu = ifr->ifr_mtu;
1168 		}
1169 		break;
1170 
1171 	case SIOCSLANPCP:
1172 		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1173 		if (error != 0)
1174 			break;
1175 		if (ifr->ifr_lan_pcp > 7 &&
1176 		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1177 			error = EINVAL;
1178 		} else {
1179 			ifp->if_pcp = ifr->ifr_lan_pcp;
1180 			/* broadcast event about PCP change */
1181 			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1182 		}
1183 		break;
1184 
1185 	case SIOCGLANPCP:
1186 		ifr->ifr_lan_pcp = ifp->if_pcp;
1187 		break;
1188 
1189 	default:
1190 		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1191 		break;
1192 	}
1193 	return (error);
1194 }
1195 
1196 static int
1197 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1198 	struct sockaddr *sa)
1199 {
1200 	struct sockaddr_dl *sdl;
1201 #ifdef INET
1202 	struct sockaddr_in *sin;
1203 #endif
1204 #ifdef INET6
1205 	struct sockaddr_in6 *sin6;
1206 #endif
1207 	u_char *e_addr;
1208 
1209 	switch(sa->sa_family) {
1210 	case AF_LINK:
1211 		/*
1212 		 * No mapping needed. Just check that it's a valid MC address.
1213 		 */
1214 		sdl = (struct sockaddr_dl *)sa;
1215 		e_addr = LLADDR(sdl);
1216 		if (!ETHER_IS_MULTICAST(e_addr))
1217 			return EADDRNOTAVAIL;
1218 		*llsa = NULL;
1219 		return 0;
1220 
1221 #ifdef INET
1222 	case AF_INET:
1223 		sin = (struct sockaddr_in *)sa;
1224 		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1225 			return EADDRNOTAVAIL;
1226 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1227 		sdl->sdl_alen = ETHER_ADDR_LEN;
1228 		e_addr = LLADDR(sdl);
1229 		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1230 		*llsa = (struct sockaddr *)sdl;
1231 		return 0;
1232 #endif
1233 #ifdef INET6
1234 	case AF_INET6:
1235 		sin6 = (struct sockaddr_in6 *)sa;
1236 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1237 			/*
1238 			 * An IP6 address of 0 means listen to all
1239 			 * of the Ethernet multicast address used for IP6.
1240 			 * (This is used for multicast routers.)
1241 			 */
1242 			ifp->if_flags |= IFF_ALLMULTI;
1243 			*llsa = NULL;
1244 			return 0;
1245 		}
1246 		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1247 			return EADDRNOTAVAIL;
1248 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1249 		sdl->sdl_alen = ETHER_ADDR_LEN;
1250 		e_addr = LLADDR(sdl);
1251 		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1252 		*llsa = (struct sockaddr *)sdl;
1253 		return 0;
1254 #endif
1255 
1256 	default:
1257 		/*
1258 		 * Well, the text isn't quite right, but it's the name
1259 		 * that counts...
1260 		 */
1261 		return EAFNOSUPPORT;
1262 	}
1263 }
1264 
1265 static moduledata_t ether_mod = {
1266 	.name = "ether",
1267 };
1268 
1269 void
1270 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1271 {
1272 	struct ether_vlan_header vlan;
1273 	struct mbuf mv, mb;
1274 
1275 	KASSERT((m->m_flags & M_VLANTAG) != 0,
1276 	    ("%s: vlan information not present", __func__));
1277 	KASSERT(m->m_len >= sizeof(struct ether_header),
1278 	    ("%s: mbuf not large enough for header", __func__));
1279 	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1280 	vlan.evl_proto = vlan.evl_encap_proto;
1281 	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1282 	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1283 	m->m_len -= sizeof(struct ether_header);
1284 	m->m_data += sizeof(struct ether_header);
1285 	/*
1286 	 * If a data link has been supplied by the caller, then we will need to
1287 	 * re-create a stack allocated mbuf chain with the following structure:
1288 	 *
1289 	 * (1) mbuf #1 will contain the supplied data link
1290 	 * (2) mbuf #2 will contain the vlan header
1291 	 * (3) mbuf #3 will contain the original mbuf's packet data
1292 	 *
1293 	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1294 	 */
1295 	if (data != NULL) {
1296 		mv.m_next = m;
1297 		mv.m_data = (caddr_t)&vlan;
1298 		mv.m_len = sizeof(vlan);
1299 		mb.m_next = &mv;
1300 		mb.m_data = data;
1301 		mb.m_len = dlen;
1302 		bpf_mtap(bp, &mb);
1303 	} else
1304 		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1305 	m->m_len += sizeof(struct ether_header);
1306 	m->m_data -= sizeof(struct ether_header);
1307 }
1308 
1309 struct mbuf *
1310 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1311 {
1312 	struct ether_vlan_header *evl;
1313 
1314 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1315 	if (m == NULL)
1316 		return (NULL);
1317 	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1318 
1319 	if (m->m_len < sizeof(*evl)) {
1320 		m = m_pullup(m, sizeof(*evl));
1321 		if (m == NULL)
1322 			return (NULL);
1323 	}
1324 
1325 	/*
1326 	 * Transform the Ethernet header into an Ethernet header
1327 	 * with 802.1Q encapsulation.
1328 	 */
1329 	evl = mtod(m, struct ether_vlan_header *);
1330 	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1331 	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1332 	evl->evl_encap_proto = htons(proto);
1333 	evl->evl_tag = htons(tag);
1334 	return (m);
1335 }
1336 
1337 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1338     "IEEE 802.1Q VLAN");
1339 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1340     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1341     "for consistency");
1342 
1343 VNET_DEFINE_STATIC(int, soft_pad);
1344 #define	V_soft_pad	VNET(soft_pad)
1345 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1346     &VNET_NAME(soft_pad), 0,
1347     "pad short frames before tagging");
1348 
1349 /*
1350  * For now, make preserving PCP via an mbuf tag optional, as it increases
1351  * per-packet memory allocations and frees.  In the future, it would be
1352  * preferable to reuse ether_vtag for this, or similar.
1353  */
1354 int vlan_mtag_pcp = 0;
1355 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW,
1356     &vlan_mtag_pcp, 0,
1357     "Retain VLAN PCP information as packets are passed up the stack");
1358 
1359 bool
1360 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1361     struct ether_8021q_tag *qtag)
1362 {
1363 	struct m_tag *mtag;
1364 	int n;
1365 	uint16_t tag;
1366 	static const char pad[8];	/* just zeros */
1367 
1368 	/*
1369 	 * Pad the frame to the minimum size allowed if told to.
1370 	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1371 	 * paragraph C.4.4.3.b.  It can help to work around buggy
1372 	 * bridges that violate paragraph C.4.4.3.a from the same
1373 	 * document, i.e., fail to pad short frames after untagging.
1374 	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1375 	 * untagging it will produce a 62-byte frame, which is a runt
1376 	 * and requires padding.  There are VLAN-enabled network
1377 	 * devices that just discard such runts instead or mishandle
1378 	 * them somehow.
1379 	 */
1380 	if (V_soft_pad && p->if_type == IFT_ETHER) {
1381 		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1382 		     n > 0; n -= sizeof(pad)) {
1383 			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1384 				break;
1385 		}
1386 		if (n > 0) {
1387 			m_freem(*mp);
1388 			*mp = NULL;
1389 			if_printf(ife, "cannot pad short frame");
1390 			return (false);
1391 		}
1392 	}
1393 
1394 	/*
1395 	 * If PCP is set in mbuf, use it
1396 	 */
1397 	if ((*mp)->m_flags & M_VLANTAG) {
1398 		qtag->pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1399 	}
1400 
1401 	/*
1402 	 * If underlying interface can do VLAN tag insertion itself,
1403 	 * just pass the packet along. However, we need some way to
1404 	 * tell the interface where the packet came from so that it
1405 	 * knows how to find the VLAN tag to use, so we attach a
1406 	 * packet tag that holds it.
1407 	 */
1408 	if (vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1409 	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1410 		tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1411 	else
1412 		tag = EVL_MAKETAG(qtag->vid, qtag->pcp, 0);
1413 	if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1414 	    (qtag->proto == ETHERTYPE_VLAN)) {
1415 		(*mp)->m_pkthdr.ether_vtag = tag;
1416 		(*mp)->m_flags |= M_VLANTAG;
1417 	} else {
1418 		*mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1419 		if (*mp == NULL) {
1420 			if_printf(ife, "unable to prepend 802.1Q header");
1421 			return (false);
1422 		}
1423 	}
1424 	return (true);
1425 }
1426 
1427 /*
1428  * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1429  * cryptographic hash function on the containing jail's name, UUID and the
1430  * interface name to attempt to provide a unique but stable address.
1431  * Pseudo-interfaces which require a MAC address should use this function to
1432  * allocate non-locally-administered addresses.
1433  */
1434 void
1435 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1436 {
1437 	SHA1_CTX ctx;
1438 	char *buf;
1439 	char uuid[HOSTUUIDLEN + 1];
1440 	uint64_t addr;
1441 	int i, sz;
1442 	char digest[SHA1_RESULTLEN];
1443 	char jailname[MAXHOSTNAMELEN];
1444 
1445 	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1446 	if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1447 		/* Fall back to a random mac address. */
1448 		goto rando;
1449 	}
1450 
1451 	/* If each (vnet) jail would also have a unique hostuuid this would not
1452 	 * be necessary. */
1453 	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1454 	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, if_name(ifp),
1455 	    jailname);
1456 	if (sz < 0) {
1457 		/* Fall back to a random mac address. */
1458 		goto rando;
1459 	}
1460 
1461 	SHA1Init(&ctx);
1462 	SHA1Update(&ctx, buf, sz);
1463 	SHA1Final(digest, &ctx);
1464 	free(buf, M_TEMP);
1465 
1466 	addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1467 	    OUI_FREEBSD_GENERATED_MASK;
1468 	addr = OUI_FREEBSD(addr);
1469 	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1470 		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1471 		    0xFF;
1472 	}
1473 
1474 	return;
1475 rando:
1476 	arc4rand(hwaddr, sizeof(*hwaddr), 0);
1477 	/* Unicast */
1478 	hwaddr->octet[0] &= 0xFE;
1479 	/* Locally administered. */
1480 	hwaddr->octet[0] |= 0x02;
1481 }
1482 
1483 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1484 MODULE_VERSION(ether, 1);
1485