1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_netgraph.h"
35 #include "opt_mbuf_profiling.h"
36 #include "opt_rss.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/devctl.h>
41 #include <sys/eventhandler.h>
42 #include <sys/jail.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/module.h>
48 #include <sys/msan.h>
49 #include <sys/proc.h>
50 #include <sys/priv.h>
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/uuid.h>
56 #ifdef KDB
57 #include <sys/kdb.h>
58 #endif
59
60 #include <net/ieee_oui.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_private.h>
64 #include <net/if_arp.h>
65 #include <net/netisr.h>
66 #include <net/route.h>
67 #include <net/if_llc.h>
68 #include <net/if_dl.h>
69 #include <net/if_types.h>
70 #include <net/bpf.h>
71 #include <net/ethernet.h>
72 #include <net/if_bridgevar.h>
73 #include <net/if_vlan_var.h>
74 #include <net/if_llatbl.h>
75 #include <net/pfil.h>
76 #include <net/rss_config.h>
77 #include <net/vnet.h>
78
79 #include <netpfil/pf/pf_mtag.h>
80
81 #if defined(INET) || defined(INET6)
82 #include <netinet/in.h>
83 #include <netinet/in_var.h>
84 #include <netinet/if_ether.h>
85 #include <netinet/ip_carp.h>
86 #include <netinet/ip_var.h>
87 #endif
88 #ifdef INET6
89 #include <netinet6/nd6.h>
90 #endif
91 #include <security/mac/mac_framework.h>
92
93 #include <crypto/sha1.h>
94
95 VNET_DEFINE(pfil_head_t, link_pfil_head); /* Packet filter hooks */
96
97 /* netgraph node hooks for ng_ether(4) */
98 void (*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
99 void (*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
100 int (*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
101 void (*ng_ether_attach_p)(struct ifnet *ifp);
102 void (*ng_ether_detach_p)(struct ifnet *ifp);
103
104 /* if_bridge(4) support */
105 void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
106 bool (*bridge_same_p)(const void *, const void *);
107 void *(*bridge_get_softc_p)(struct ifnet *);
108 bool (*bridge_member_ifaddrs_p)(void);
109
110 /* if_lagg(4) support */
111 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
112
113 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
114 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
115
116 static int ether_resolvemulti(struct ifnet *, struct sockaddr **,
117 struct sockaddr *);
118 static int ether_requestencap(struct ifnet *, struct if_encap_req *);
119
120 static inline bool ether_do_pcp(struct ifnet *, struct mbuf *);
121
122 #define senderr(e) do { error = (e); goto bad;} while (0)
123
124 static void
update_mbuf_csumflags(struct mbuf * src,struct mbuf * dst)125 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
126 {
127 int csum_flags = 0;
128
129 if (src->m_pkthdr.csum_flags & CSUM_IP)
130 csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
131 if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
132 csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
133 if (src->m_pkthdr.csum_flags & CSUM_SCTP)
134 csum_flags |= CSUM_SCTP_VALID;
135 dst->m_pkthdr.csum_flags |= csum_flags;
136 if (csum_flags & CSUM_DATA_VALID)
137 dst->m_pkthdr.csum_data = 0xffff;
138 }
139
140 /*
141 * Handle link-layer encapsulation requests.
142 */
143 static int
ether_requestencap(struct ifnet * ifp,struct if_encap_req * req)144 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
145 {
146 struct ether_header *eh;
147 struct arphdr *ah;
148 uint16_t etype;
149 const u_char *lladdr;
150
151 if (req->rtype != IFENCAP_LL)
152 return (EOPNOTSUPP);
153
154 if (req->bufsize < ETHER_HDR_LEN)
155 return (ENOMEM);
156
157 eh = (struct ether_header *)req->buf;
158 lladdr = req->lladdr;
159 req->lladdr_off = 0;
160
161 switch (req->family) {
162 case AF_INET:
163 etype = htons(ETHERTYPE_IP);
164 break;
165 case AF_INET6:
166 etype = htons(ETHERTYPE_IPV6);
167 break;
168 case AF_ARP:
169 ah = (struct arphdr *)req->hdata;
170 ah->ar_hrd = htons(ARPHRD_ETHER);
171
172 switch(ntohs(ah->ar_op)) {
173 case ARPOP_REVREQUEST:
174 case ARPOP_REVREPLY:
175 etype = htons(ETHERTYPE_REVARP);
176 break;
177 case ARPOP_REQUEST:
178 case ARPOP_REPLY:
179 default:
180 etype = htons(ETHERTYPE_ARP);
181 break;
182 }
183
184 if (req->flags & IFENCAP_FLAG_BROADCAST)
185 lladdr = ifp->if_broadcastaddr;
186 break;
187 default:
188 return (EAFNOSUPPORT);
189 }
190
191 memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
192 memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
193 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
194 req->bufsize = sizeof(struct ether_header);
195
196 return (0);
197 }
198
199 static int
ether_resolve_addr(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro,u_char * phdr,uint32_t * pflags,struct llentry ** plle)200 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
201 const struct sockaddr *dst, struct route *ro, u_char *phdr,
202 uint32_t *pflags, struct llentry **plle)
203 {
204 uint32_t lleflags = 0;
205 int error = 0;
206 #if defined(INET) || defined(INET6)
207 struct ether_header *eh = (struct ether_header *)phdr;
208 uint16_t etype;
209 #endif
210
211 if (plle)
212 *plle = NULL;
213
214 switch (dst->sa_family) {
215 #ifdef INET
216 case AF_INET:
217 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
218 error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
219 plle);
220 else {
221 if (m->m_flags & M_BCAST)
222 memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
223 ETHER_ADDR_LEN);
224 else {
225 const struct in_addr *a;
226 a = &(((const struct sockaddr_in *)dst)->sin_addr);
227 ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
228 }
229 etype = htons(ETHERTYPE_IP);
230 memcpy(&eh->ether_type, &etype, sizeof(etype));
231 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
232 }
233 break;
234 #endif
235 #ifdef INET6
236 case AF_INET6:
237 if ((m->m_flags & M_MCAST) == 0) {
238 int af = RO_GET_FAMILY(ro, dst);
239 error = nd6_resolve(ifp, LLE_SF(af, 0), m, dst, phdr,
240 &lleflags, plle);
241 } else {
242 const struct in6_addr *a6;
243 a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
244 ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
245 etype = htons(ETHERTYPE_IPV6);
246 memcpy(&eh->ether_type, &etype, sizeof(etype));
247 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
248 }
249 break;
250 #endif
251 default:
252 if_printf(ifp, "can't handle af%d\n", dst->sa_family);
253 if (m != NULL)
254 m_freem(m);
255 return (EAFNOSUPPORT);
256 }
257
258 if (error == EHOSTDOWN) {
259 if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
260 error = EHOSTUNREACH;
261 }
262
263 if (error != 0)
264 return (error);
265
266 *pflags = RT_MAY_LOOP;
267 if (lleflags & LLE_IFADDR)
268 *pflags |= RT_L2_ME;
269
270 return (0);
271 }
272
273 /*
274 * Ethernet output routine.
275 * Encapsulate a packet of type family for the local net.
276 * Use trailer local net encapsulation if enough data in first
277 * packet leaves a multiple of 512 bytes of data in remainder.
278 */
279 int
ether_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro)280 ether_output(struct ifnet *ifp, struct mbuf *m,
281 const struct sockaddr *dst, struct route *ro)
282 {
283 int error = 0;
284 char linkhdr[ETHER_HDR_LEN], *phdr;
285 struct ether_header *eh;
286 struct pf_mtag *t;
287 bool loop_copy;
288 int hlen; /* link layer header length */
289 uint32_t pflags;
290 struct llentry *lle = NULL;
291 int addref = 0;
292
293 phdr = NULL;
294 pflags = 0;
295 if (ro != NULL) {
296 /* XXX BPF uses ro_prepend */
297 if (ro->ro_prepend != NULL) {
298 phdr = ro->ro_prepend;
299 hlen = ro->ro_plen;
300 } else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
301 if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
302 lle = ro->ro_lle;
303 if (lle != NULL &&
304 (lle->la_flags & LLE_VALID) == 0) {
305 LLE_FREE(lle);
306 lle = NULL; /* redundant */
307 ro->ro_lle = NULL;
308 }
309 if (lle == NULL) {
310 /* if we lookup, keep cache */
311 addref = 1;
312 } else
313 /*
314 * Notify LLE code that
315 * the entry was used
316 * by datapath.
317 */
318 llentry_provide_feedback(lle);
319 }
320 if (lle != NULL) {
321 phdr = lle->r_linkdata;
322 hlen = lle->r_hdrlen;
323 pflags = lle->r_flags;
324 }
325 }
326 }
327
328 #ifdef MAC
329 error = mac_ifnet_check_transmit(ifp, m);
330 if (error)
331 senderr(error);
332 #endif
333
334 M_PROFILE(m);
335 if (ifp->if_flags & IFF_MONITOR)
336 senderr(ENETDOWN);
337 if (!((ifp->if_flags & IFF_UP) &&
338 (ifp->if_drv_flags & IFF_DRV_RUNNING)))
339 senderr(ENETDOWN);
340
341 if (phdr == NULL) {
342 /* No prepend data supplied. Try to calculate ourselves. */
343 phdr = linkhdr;
344 hlen = ETHER_HDR_LEN;
345 error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
346 addref ? &lle : NULL);
347 if (addref && lle != NULL)
348 ro->ro_lle = lle;
349 if (error != 0)
350 return (error == EWOULDBLOCK ? 0 : error);
351 }
352
353 if ((pflags & RT_L2_ME) != 0) {
354 update_mbuf_csumflags(m, m);
355 return (if_simloop(ifp, m, RO_GET_FAMILY(ro, dst), 0));
356 }
357 loop_copy = (pflags & RT_MAY_LOOP) != 0;
358
359 /*
360 * Add local net header. If no space in first mbuf,
361 * allocate another.
362 *
363 * Note that we do prepend regardless of RT_HAS_HEADER flag.
364 * This is done because BPF code shifts m_data pointer
365 * to the end of ethernet header prior to calling if_output().
366 */
367 M_PREPEND(m, hlen, M_NOWAIT);
368 if (m == NULL)
369 senderr(ENOBUFS);
370 if ((pflags & RT_HAS_HEADER) == 0) {
371 eh = mtod(m, struct ether_header *);
372 memcpy(eh, phdr, hlen);
373 }
374
375 /*
376 * If a simplex interface, and the packet is being sent to our
377 * Ethernet address or a broadcast address, loopback a copy.
378 * XXX To make a simplex device behave exactly like a duplex
379 * device, we should copy in the case of sending to our own
380 * ethernet address (thus letting the original actually appear
381 * on the wire). However, we don't do that here for security
382 * reasons and compatibility with the original behavior.
383 */
384 if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
385 ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
386 struct mbuf *n;
387
388 /*
389 * Because if_simloop() modifies the packet, we need a
390 * writable copy through m_dup() instead of a readonly
391 * one as m_copy[m] would give us. The alternative would
392 * be to modify if_simloop() to handle the readonly mbuf,
393 * but performancewise it is mostly equivalent (trading
394 * extra data copying vs. extra locking).
395 *
396 * XXX This is a local workaround. A number of less
397 * often used kernel parts suffer from the same bug.
398 * See PR kern/105943 for a proposed general solution.
399 */
400 if ((n = m_dup(m, M_NOWAIT)) != NULL) {
401 update_mbuf_csumflags(m, n);
402 (void)if_simloop(ifp, n, RO_GET_FAMILY(ro, dst), hlen);
403 } else
404 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
405 }
406
407 /*
408 * Bridges require special output handling.
409 */
410 if (ifp->if_bridge) {
411 BRIDGE_OUTPUT(ifp, m, error);
412 return (error);
413 }
414
415 #if defined(INET) || defined(INET6)
416 if (ifp->if_carp &&
417 (error = (*carp_output_p)(ifp, m, dst)))
418 goto bad;
419 #endif
420
421 /* Handle ng_ether(4) processing, if any */
422 if (ifp->if_l2com != NULL) {
423 KASSERT(ng_ether_output_p != NULL,
424 ("ng_ether_output_p is NULL"));
425 if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
426 bad: if (m != NULL)
427 m_freem(m);
428 return (error);
429 }
430 if (m == NULL)
431 return (0);
432 }
433
434 /* Continue with link-layer output */
435 return ether_output_frame(ifp, m);
436 }
437
438 static bool
ether_set_pcp(struct mbuf ** mp,struct ifnet * ifp,uint8_t pcp)439 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
440 {
441 struct ether_8021q_tag qtag;
442 struct ether_header *eh;
443
444 eh = mtod(*mp, struct ether_header *);
445 if (eh->ether_type == htons(ETHERTYPE_VLAN) ||
446 eh->ether_type == htons(ETHERTYPE_QINQ)) {
447 (*mp)->m_flags &= ~M_VLANTAG;
448 return (true);
449 }
450
451 qtag.vid = 0;
452 qtag.pcp = pcp;
453 qtag.proto = ETHERTYPE_VLAN;
454 if (ether_8021q_frame(mp, ifp, ifp, &qtag))
455 return (true);
456 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
457 return (false);
458 }
459
460 /*
461 * Ethernet link layer output routine to send a raw frame to the device.
462 *
463 * This assumes that the 14 byte Ethernet header is present and contiguous
464 * in the first mbuf (if BRIDGE'ing).
465 */
466 int
ether_output_frame(struct ifnet * ifp,struct mbuf * m)467 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
468 {
469 if (ether_do_pcp(ifp, m) && !ether_set_pcp(&m, ifp, ifp->if_pcp))
470 return (0);
471
472 if (PFIL_HOOKED_OUT(V_link_pfil_head))
473 switch (pfil_mbuf_out(V_link_pfil_head, &m, ifp, NULL)) {
474 case PFIL_DROPPED:
475 return (EACCES);
476 case PFIL_CONSUMED:
477 return (0);
478 }
479
480 #ifdef EXPERIMENTAL
481 #if defined(INET6) && defined(INET)
482 /* draft-ietf-6man-ipv6only-flag */
483 /* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
484 if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
485 struct ether_header *eh;
486
487 eh = mtod(m, struct ether_header *);
488 switch (ntohs(eh->ether_type)) {
489 case ETHERTYPE_IP:
490 case ETHERTYPE_ARP:
491 case ETHERTYPE_REVARP:
492 m_freem(m);
493 return (EAFNOSUPPORT);
494 /* NOTREACHED */
495 break;
496 };
497 }
498 #endif
499 #endif
500
501 /*
502 * Queue message on interface, update output statistics if successful,
503 * and start output if interface not yet active.
504 *
505 * If KMSAN is enabled, use it to verify that the data does not contain
506 * any uninitialized bytes.
507 */
508 kmsan_check_mbuf(m, "ether_output");
509 return ((ifp->if_transmit)(ifp, m));
510 }
511
512 /*
513 * Process a received Ethernet packet; the packet is in the
514 * mbuf chain m with the ethernet header at the front.
515 */
516 static void
ether_input_internal(struct ifnet * ifp,struct mbuf * m)517 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
518 {
519 struct ether_header *eh;
520 u_short etype;
521
522 if ((ifp->if_flags & IFF_UP) == 0) {
523 m_freem(m);
524 return;
525 }
526 #ifdef DIAGNOSTIC
527 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
528 if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
529 m_freem(m);
530 return;
531 }
532 #endif
533 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
534 /* Drivers should pullup and ensure the mbuf is valid */
535 if_printf(ifp, "discard frame w/o leading ethernet "
536 "header (len %d pkt len %d)\n",
537 m->m_len, m->m_pkthdr.len);
538 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
539 m_freem(m);
540 return;
541 }
542 eh = mtod(m, struct ether_header *);
543 etype = ntohs(eh->ether_type);
544 random_harvest_queue_ether(m, sizeof(*m));
545
546 #ifdef EXPERIMENTAL
547 #if defined(INET6) && defined(INET)
548 /* draft-ietf-6man-ipv6only-flag */
549 /* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
550 if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
551 switch (etype) {
552 case ETHERTYPE_IP:
553 case ETHERTYPE_ARP:
554 case ETHERTYPE_REVARP:
555 m_freem(m);
556 return;
557 /* NOTREACHED */
558 break;
559 };
560 }
561 #endif
562 #endif
563
564 CURVNET_SET_QUIET(ifp->if_vnet);
565
566 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
567 if (ETHER_IS_BROADCAST(eh->ether_dhost))
568 m->m_flags |= M_BCAST;
569 else
570 m->m_flags |= M_MCAST;
571 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
572 }
573
574 #ifdef MAC
575 /*
576 * Tag the mbuf with an appropriate MAC label before any other
577 * consumers can get to it.
578 */
579 mac_ifnet_create_mbuf(ifp, m);
580 #endif
581
582 /*
583 * Give bpf a chance at the packet.
584 */
585 ETHER_BPF_MTAP(ifp, m);
586
587 if (!(ifp->if_capenable & IFCAP_HWSTATS))
588 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
589
590 /* Allow monitor mode to claim this frame, after stats are updated. */
591 if (ifp->if_flags & IFF_MONITOR) {
592 m_freem(m);
593 CURVNET_RESTORE();
594 return;
595 }
596
597 /* Handle input from a lagg(4) port */
598 if (ifp->if_type == IFT_IEEE8023ADLAG) {
599 KASSERT(lagg_input_ethernet_p != NULL,
600 ("%s: if_lagg not loaded!", __func__));
601 m = (*lagg_input_ethernet_p)(ifp, m);
602 if (m != NULL)
603 ifp = m->m_pkthdr.rcvif;
604 else {
605 CURVNET_RESTORE();
606 return;
607 }
608 }
609
610 /*
611 * If the hardware did not process an 802.1Q tag, do this now,
612 * to allow 802.1P priority frames to be passed to the main input
613 * path correctly.
614 */
615 if ((m->m_flags & M_VLANTAG) == 0 &&
616 ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
617 struct ether_vlan_header *evl;
618
619 if (m->m_len < sizeof(*evl) &&
620 (m = m_pullup(m, sizeof(*evl))) == NULL) {
621 #ifdef DIAGNOSTIC
622 if_printf(ifp, "cannot pullup VLAN header\n");
623 #endif
624 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
625 CURVNET_RESTORE();
626 return;
627 }
628
629 evl = mtod(m, struct ether_vlan_header *);
630 m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
631 m->m_flags |= M_VLANTAG;
632
633 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
634 ETHER_HDR_LEN - ETHER_TYPE_LEN);
635 m_adj(m, ETHER_VLAN_ENCAP_LEN);
636 eh = mtod(m, struct ether_header *);
637 }
638
639 M_SETFIB(m, ifp->if_fib);
640
641 /* Allow ng_ether(4) to claim this frame. */
642 if (ifp->if_l2com != NULL) {
643 KASSERT(ng_ether_input_p != NULL,
644 ("%s: ng_ether_input_p is NULL", __func__));
645 m->m_flags &= ~M_PROMISC;
646 (*ng_ether_input_p)(ifp, &m);
647 if (m == NULL) {
648 CURVNET_RESTORE();
649 return;
650 }
651 eh = mtod(m, struct ether_header *);
652 }
653
654 /*
655 * Allow if_bridge(4) to claim this frame.
656 *
657 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
658 * and the frame should be delivered locally.
659 *
660 * If M_BRIDGE_INJECT is set, the packet was received directly by the
661 * bridge via netmap, so "ifp" is the bridge itself and the packet
662 * should be re-examined.
663 */
664 if (ifp->if_bridge != NULL || (m->m_flags & M_BRIDGE_INJECT) != 0) {
665 m->m_flags &= ~M_PROMISC;
666 BRIDGE_INPUT(ifp, m);
667 if (m == NULL) {
668 CURVNET_RESTORE();
669 return;
670 }
671 eh = mtod(m, struct ether_header *);
672 }
673
674 #if defined(INET) || defined(INET6)
675 /*
676 * Clear M_PROMISC on frame so that carp(4) will see it when the
677 * mbuf flows up to Layer 3.
678 * FreeBSD's implementation of carp(4) uses the inprotosw
679 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
680 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
681 * is outside the scope of the M_PROMISC test below.
682 * TODO: Maintain a hash table of ethernet addresses other than
683 * ether_dhost which may be active on this ifp.
684 */
685 if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
686 m->m_flags &= ~M_PROMISC;
687 } else
688 #endif
689 {
690 /*
691 * If the frame received was not for our MAC address, set the
692 * M_PROMISC flag on the mbuf chain. The frame may need to
693 * be seen by the rest of the Ethernet input path in case of
694 * re-entry (e.g. bridge, vlan, netgraph) but should not be
695 * seen by upper protocol layers.
696 */
697 if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
698 bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
699 m->m_flags |= M_PROMISC;
700 }
701
702 ether_demux(ifp, m);
703 CURVNET_RESTORE();
704 }
705
706 /*
707 * Ethernet input dispatch; by default, direct dispatch here regardless of
708 * global configuration. However, if RSS is enabled, hook up RSS affinity
709 * so that when deferred or hybrid dispatch is enabled, we can redistribute
710 * load based on RSS.
711 *
712 * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
713 * not it had already done work distribution via multi-queue. Then we could
714 * direct dispatch in the event load balancing was already complete and
715 * handle the case of interfaces with different capabilities better.
716 *
717 * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
718 * at multiple layers?
719 *
720 * XXXRW: For now, enable all this only if RSS is compiled in, although it
721 * works fine without RSS. Need to characterise the performance overhead
722 * of the detour through the netisr code in the event the result is always
723 * direct dispatch.
724 */
725 static void
ether_nh_input(struct mbuf * m)726 ether_nh_input(struct mbuf *m)
727 {
728
729 M_ASSERTPKTHDR(m);
730 KASSERT(m->m_pkthdr.rcvif != NULL,
731 ("%s: NULL interface pointer", __func__));
732 ether_input_internal(m->m_pkthdr.rcvif, m);
733 }
734
735 static struct netisr_handler ether_nh = {
736 .nh_name = "ether",
737 .nh_handler = ether_nh_input,
738 .nh_proto = NETISR_ETHER,
739 #ifdef RSS
740 .nh_policy = NETISR_POLICY_CPU,
741 .nh_dispatch = NETISR_DISPATCH_DIRECT,
742 .nh_m2cpuid = rss_m2cpuid,
743 #else
744 .nh_policy = NETISR_POLICY_SOURCE,
745 .nh_dispatch = NETISR_DISPATCH_DIRECT,
746 #endif
747 };
748
749 static void
ether_init(__unused void * arg)750 ether_init(__unused void *arg)
751 {
752
753 netisr_register(ðer_nh);
754 }
755 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
756
757 static void
vnet_ether_init(const __unused void * arg)758 vnet_ether_init(const __unused void *arg)
759 {
760 struct pfil_head_args args;
761
762 args.pa_version = PFIL_VERSION;
763 args.pa_flags = PFIL_IN | PFIL_OUT;
764 args.pa_type = PFIL_TYPE_ETHERNET;
765 args.pa_headname = PFIL_ETHER_NAME;
766 V_link_pfil_head = pfil_head_register(&args);
767
768 #ifdef VIMAGE
769 netisr_register_vnet(ðer_nh);
770 #endif
771 }
772 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
773 vnet_ether_init, NULL);
774
775 #ifdef VIMAGE
776 static void
vnet_ether_pfil_destroy(const __unused void * arg)777 vnet_ether_pfil_destroy(const __unused void *arg)
778 {
779
780 pfil_head_unregister(V_link_pfil_head);
781 }
782 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
783 vnet_ether_pfil_destroy, NULL);
784
785 static void
vnet_ether_destroy(__unused void * arg)786 vnet_ether_destroy(__unused void *arg)
787 {
788
789 netisr_unregister_vnet(ðer_nh);
790 }
791 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
792 vnet_ether_destroy, NULL);
793 #endif
794
795 static void
ether_input(struct ifnet * ifp,struct mbuf * m)796 ether_input(struct ifnet *ifp, struct mbuf *m)
797 {
798 struct epoch_tracker et;
799 struct mbuf *mn;
800 bool needs_epoch;
801
802 needs_epoch = (ifp->if_flags & IFF_NEEDSEPOCH);
803 #ifdef INVARIANTS
804 /*
805 * This temporary code is here to prevent epoch unaware and unmarked
806 * drivers to panic the system. Once all drivers are taken care of,
807 * the whole INVARIANTS block should go away.
808 */
809 if (!needs_epoch && !in_epoch(net_epoch_preempt)) {
810 static bool printedonce;
811
812 needs_epoch = true;
813 if (!printedonce) {
814 printedonce = true;
815 if_printf(ifp, "called %s w/o net epoch! "
816 "PLEASE file a bug report.", __func__);
817 #ifdef KDB
818 kdb_backtrace();
819 #endif
820 }
821 }
822 #endif
823
824 /*
825 * The drivers are allowed to pass in a chain of packets linked with
826 * m_nextpkt. We split them up into separate packets here and pass
827 * them up. This allows the drivers to amortize the receive lock.
828 */
829 CURVNET_SET_QUIET(ifp->if_vnet);
830 if (__predict_false(needs_epoch))
831 NET_EPOCH_ENTER(et);
832 while (m) {
833 mn = m->m_nextpkt;
834 m->m_nextpkt = NULL;
835
836 /*
837 * We will rely on rcvif being set properly in the deferred
838 * context, so assert it is correct here.
839 */
840 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
841 KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
842 "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
843 netisr_dispatch(NETISR_ETHER, m);
844 m = mn;
845 }
846 if (__predict_false(needs_epoch))
847 NET_EPOCH_EXIT(et);
848 CURVNET_RESTORE();
849 }
850
851 /*
852 * Upper layer processing for a received Ethernet packet.
853 */
854 void
ether_demux(struct ifnet * ifp,struct mbuf * m)855 ether_demux(struct ifnet *ifp, struct mbuf *m)
856 {
857 struct ether_header *eh;
858 int i, isr;
859 u_short ether_type;
860
861 NET_EPOCH_ASSERT();
862 KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
863
864 /* Do not grab PROMISC frames in case we are re-entered. */
865 if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
866 i = pfil_mbuf_in(V_link_pfil_head, &m, ifp, NULL);
867 if (i != PFIL_PASS)
868 return;
869 }
870
871 eh = mtod(m, struct ether_header *);
872 ether_type = ntohs(eh->ether_type);
873
874 /*
875 * If this frame has a VLAN tag other than 0, call vlan_input()
876 * if its module is loaded. Otherwise, drop.
877 */
878 if ((m->m_flags & M_VLANTAG) &&
879 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
880 if (ifp->if_vlantrunk == NULL) {
881 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
882 m_freem(m);
883 return;
884 }
885 KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
886 __func__));
887 /* Clear before possibly re-entering ether_input(). */
888 m->m_flags &= ~M_PROMISC;
889 (*vlan_input_p)(ifp, m);
890 return;
891 }
892
893 /*
894 * Pass promiscuously received frames to the upper layer if the user
895 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
896 */
897 if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
898 m_freem(m);
899 return;
900 }
901
902 /*
903 * Reset layer specific mbuf flags to avoid confusing upper layers.
904 */
905 m->m_flags &= ~M_VLANTAG;
906 m_clrprotoflags(m);
907
908 /*
909 * Dispatch frame to upper layer.
910 */
911 switch (ether_type) {
912 #ifdef INET
913 case ETHERTYPE_IP:
914 isr = NETISR_IP;
915 break;
916
917 case ETHERTYPE_ARP:
918 if (ifp->if_flags & IFF_NOARP) {
919 /* Discard packet if ARP is disabled on interface */
920 m_freem(m);
921 return;
922 }
923 isr = NETISR_ARP;
924 break;
925 #endif
926 #ifdef INET6
927 case ETHERTYPE_IPV6:
928 isr = NETISR_IPV6;
929 break;
930 #endif
931 default:
932 goto discard;
933 }
934
935 /* Strip off Ethernet header. */
936 m_adj(m, ETHER_HDR_LEN);
937
938 netisr_dispatch(isr, m);
939 return;
940
941 discard:
942 /*
943 * Packet is to be discarded. If netgraph is present,
944 * hand the packet to it for last chance processing;
945 * otherwise dispose of it.
946 */
947 if (ifp->if_l2com != NULL) {
948 KASSERT(ng_ether_input_orphan_p != NULL,
949 ("ng_ether_input_orphan_p is NULL"));
950 (*ng_ether_input_orphan_p)(ifp, m);
951 return;
952 }
953 m_freem(m);
954 }
955
956 /*
957 * Convert Ethernet address to printable (loggable) representation.
958 * This routine is for compatibility; it's better to just use
959 *
960 * printf("%6D", <pointer to address>, ":");
961 *
962 * since there's no static buffer involved.
963 */
964 char *
ether_sprintf(const u_char * ap)965 ether_sprintf(const u_char *ap)
966 {
967 static char etherbuf[18];
968 snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
969 return (etherbuf);
970 }
971
972 /*
973 * Perform common duties while attaching to interface list
974 */
975 void
ether_ifattach(struct ifnet * ifp,const u_int8_t * lla)976 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
977 {
978 int i;
979 struct ifaddr *ifa;
980 struct sockaddr_dl *sdl;
981
982 ifp->if_addrlen = ETHER_ADDR_LEN;
983 ifp->if_hdrlen = (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0 ?
984 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN : ETHER_HDR_LEN;
985 ifp->if_mtu = ETHERMTU;
986 if_attach(ifp);
987 ifp->if_output = ether_output;
988 ifp->if_input = ether_input;
989 ifp->if_resolvemulti = ether_resolvemulti;
990 ifp->if_requestencap = ether_requestencap;
991 #ifdef VIMAGE
992 ifp->if_reassign = ether_reassign;
993 #endif
994 if (ifp->if_baudrate == 0)
995 ifp->if_baudrate = IF_Mbps(10); /* just a default */
996 ifp->if_broadcastaddr = etherbroadcastaddr;
997
998 ifa = ifp->if_addr;
999 KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
1000 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1001 sdl->sdl_type = IFT_ETHER;
1002 sdl->sdl_alen = ifp->if_addrlen;
1003 bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
1004
1005 if (ifp->if_hw_addr != NULL)
1006 bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
1007
1008 bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1009 if (ng_ether_attach_p != NULL)
1010 (*ng_ether_attach_p)(ifp);
1011
1012 /* Announce Ethernet MAC address if non-zero. */
1013 for (i = 0; i < ifp->if_addrlen; i++)
1014 if (lla[i] != 0)
1015 break;
1016 if (i != ifp->if_addrlen)
1017 if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1018
1019 uuid_ether_add(LLADDR(sdl));
1020
1021 /* Add necessary bits are setup; announce it now. */
1022 EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1023 if (IS_DEFAULT_VNET(curvnet))
1024 devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1025 }
1026
1027 /*
1028 * Perform common duties while detaching an Ethernet interface
1029 */
1030 void
ether_ifdetach(struct ifnet * ifp)1031 ether_ifdetach(struct ifnet *ifp)
1032 {
1033 struct sockaddr_dl *sdl;
1034
1035 sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1036 uuid_ether_del(LLADDR(sdl));
1037
1038 if (ifp->if_l2com != NULL) {
1039 KASSERT(ng_ether_detach_p != NULL,
1040 ("ng_ether_detach_p is NULL"));
1041 (*ng_ether_detach_p)(ifp);
1042 }
1043
1044 bpfdetach(ifp);
1045 if_detach(ifp);
1046 }
1047
1048 #ifdef VIMAGE
1049 void
ether_reassign(struct ifnet * ifp,struct vnet * new_vnet,char * unused __unused)1050 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1051 {
1052
1053 if (ifp->if_l2com != NULL) {
1054 KASSERT(ng_ether_detach_p != NULL,
1055 ("ng_ether_detach_p is NULL"));
1056 (*ng_ether_detach_p)(ifp);
1057 }
1058
1059 if (ng_ether_attach_p != NULL) {
1060 CURVNET_SET_QUIET(new_vnet);
1061 (*ng_ether_attach_p)(ifp);
1062 CURVNET_RESTORE();
1063 }
1064 }
1065 #endif
1066
1067 SYSCTL_DECL(_net_link);
1068 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1069 "Ethernet");
1070
1071 #if 0
1072 /*
1073 * This is for reference. We have a table-driven version
1074 * of the little-endian crc32 generator, which is faster
1075 * than the double-loop.
1076 */
1077 uint32_t
1078 ether_crc32_le(const uint8_t *buf, size_t len)
1079 {
1080 size_t i;
1081 uint32_t crc;
1082 int bit;
1083 uint8_t data;
1084
1085 crc = 0xffffffff; /* initial value */
1086
1087 for (i = 0; i < len; i++) {
1088 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1089 carry = (crc ^ data) & 1;
1090 crc >>= 1;
1091 if (carry)
1092 crc = (crc ^ ETHER_CRC_POLY_LE);
1093 }
1094 }
1095
1096 return (crc);
1097 }
1098 #else
1099 uint32_t
ether_crc32_le(const uint8_t * buf,size_t len)1100 ether_crc32_le(const uint8_t *buf, size_t len)
1101 {
1102 static const uint32_t crctab[] = {
1103 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1104 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1105 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1106 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1107 };
1108 size_t i;
1109 uint32_t crc;
1110
1111 crc = 0xffffffff; /* initial value */
1112
1113 for (i = 0; i < len; i++) {
1114 crc ^= buf[i];
1115 crc = (crc >> 4) ^ crctab[crc & 0xf];
1116 crc = (crc >> 4) ^ crctab[crc & 0xf];
1117 }
1118
1119 return (crc);
1120 }
1121 #endif
1122
1123 uint32_t
ether_crc32_be(const uint8_t * buf,size_t len)1124 ether_crc32_be(const uint8_t *buf, size_t len)
1125 {
1126 size_t i;
1127 uint32_t crc, carry;
1128 int bit;
1129 uint8_t data;
1130
1131 crc = 0xffffffff; /* initial value */
1132
1133 for (i = 0; i < len; i++) {
1134 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1135 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1136 crc <<= 1;
1137 if (carry)
1138 crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1139 }
1140 }
1141
1142 return (crc);
1143 }
1144
1145 int
ether_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1146 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1147 {
1148 struct ifaddr *ifa = (struct ifaddr *) data;
1149 struct ifreq *ifr = (struct ifreq *) data;
1150 int error = 0;
1151
1152 switch (command) {
1153 case SIOCSIFADDR:
1154 ifp->if_flags |= IFF_UP;
1155
1156 switch (ifa->ifa_addr->sa_family) {
1157 #ifdef INET
1158 case AF_INET:
1159 ifp->if_init(ifp->if_softc); /* before arpwhohas */
1160 arp_ifinit(ifp, ifa);
1161 break;
1162 #endif
1163 default:
1164 ifp->if_init(ifp->if_softc);
1165 break;
1166 }
1167 break;
1168
1169 case SIOCGIFADDR:
1170 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1171 ETHER_ADDR_LEN);
1172 break;
1173
1174 case SIOCSIFMTU:
1175 /*
1176 * Set the interface MTU.
1177 */
1178 if (ifr->ifr_mtu > ETHERMTU) {
1179 error = EINVAL;
1180 } else {
1181 ifp->if_mtu = ifr->ifr_mtu;
1182 }
1183 break;
1184
1185 case SIOCSLANPCP:
1186 error = priv_check(curthread, PRIV_NET_SETLANPCP);
1187 if (error != 0)
1188 break;
1189 if (ifr->ifr_lan_pcp > 7 &&
1190 ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1191 error = EINVAL;
1192 } else {
1193 ifp->if_pcp = ifr->ifr_lan_pcp;
1194 /* broadcast event about PCP change */
1195 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1196 }
1197 break;
1198
1199 case SIOCGLANPCP:
1200 ifr->ifr_lan_pcp = ifp->if_pcp;
1201 break;
1202
1203 default:
1204 error = EINVAL; /* XXX netbsd has ENOTTY??? */
1205 break;
1206 }
1207 return (error);
1208 }
1209
1210 static int
ether_resolvemulti(struct ifnet * ifp,struct sockaddr ** llsa,struct sockaddr * sa)1211 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1212 struct sockaddr *sa)
1213 {
1214 struct sockaddr_dl *sdl;
1215 #ifdef INET
1216 struct sockaddr_in *sin;
1217 #endif
1218 #ifdef INET6
1219 struct sockaddr_in6 *sin6;
1220 #endif
1221 u_char *e_addr;
1222
1223 switch(sa->sa_family) {
1224 case AF_LINK:
1225 /*
1226 * No mapping needed. Just check that it's a valid MC address.
1227 */
1228 sdl = (struct sockaddr_dl *)sa;
1229 e_addr = LLADDR(sdl);
1230 if (!ETHER_IS_MULTICAST(e_addr))
1231 return EADDRNOTAVAIL;
1232 *llsa = NULL;
1233 return 0;
1234
1235 #ifdef INET
1236 case AF_INET:
1237 sin = (struct sockaddr_in *)sa;
1238 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1239 return EADDRNOTAVAIL;
1240 sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1241 sdl->sdl_alen = ETHER_ADDR_LEN;
1242 e_addr = LLADDR(sdl);
1243 ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1244 *llsa = (struct sockaddr *)sdl;
1245 return 0;
1246 #endif
1247 #ifdef INET6
1248 case AF_INET6:
1249 sin6 = (struct sockaddr_in6 *)sa;
1250 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1251 /*
1252 * An IP6 address of 0 means listen to all
1253 * of the Ethernet multicast address used for IP6.
1254 * (This is used for multicast routers.)
1255 */
1256 ifp->if_flags |= IFF_ALLMULTI;
1257 *llsa = NULL;
1258 return 0;
1259 }
1260 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1261 return EADDRNOTAVAIL;
1262 sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1263 sdl->sdl_alen = ETHER_ADDR_LEN;
1264 e_addr = LLADDR(sdl);
1265 ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1266 *llsa = (struct sockaddr *)sdl;
1267 return 0;
1268 #endif
1269
1270 default:
1271 /*
1272 * Well, the text isn't quite right, but it's the name
1273 * that counts...
1274 */
1275 return EAFNOSUPPORT;
1276 }
1277 }
1278
1279 static moduledata_t ether_mod = {
1280 .name = "ether",
1281 };
1282
1283 void
ether_vlan_mtap(struct bpf_if * bp,struct mbuf * m,void * data,u_int dlen)1284 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1285 {
1286 struct ether_vlan_header vlan;
1287 struct mbuf mv, mb;
1288
1289 KASSERT((m->m_flags & M_VLANTAG) != 0,
1290 ("%s: vlan information not present", __func__));
1291 KASSERT(m->m_len >= sizeof(struct ether_header),
1292 ("%s: mbuf not large enough for header", __func__));
1293 bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1294 vlan.evl_proto = vlan.evl_encap_proto;
1295 vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1296 vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1297 m->m_len -= sizeof(struct ether_header);
1298 m->m_data += sizeof(struct ether_header);
1299 /*
1300 * If a data link has been supplied by the caller, then we will need to
1301 * re-create a stack allocated mbuf chain with the following structure:
1302 *
1303 * (1) mbuf #1 will contain the supplied data link
1304 * (2) mbuf #2 will contain the vlan header
1305 * (3) mbuf #3 will contain the original mbuf's packet data
1306 *
1307 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1308 */
1309 if (data != NULL) {
1310 mv.m_next = m;
1311 mv.m_data = (caddr_t)&vlan;
1312 mv.m_len = sizeof(vlan);
1313 mb.m_next = &mv;
1314 mb.m_data = data;
1315 mb.m_len = dlen;
1316 bpf_mtap(bp, &mb);
1317 } else
1318 bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1319 m->m_len += sizeof(struct ether_header);
1320 m->m_data -= sizeof(struct ether_header);
1321 }
1322
1323 struct mbuf *
ether_vlanencap_proto(struct mbuf * m,uint16_t tag,uint16_t proto)1324 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1325 {
1326 struct ether_vlan_header *evl;
1327
1328 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1329 if (m == NULL)
1330 return (NULL);
1331 /* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1332
1333 if (m->m_len < sizeof(*evl)) {
1334 m = m_pullup(m, sizeof(*evl));
1335 if (m == NULL)
1336 return (NULL);
1337 }
1338
1339 /*
1340 * Transform the Ethernet header into an Ethernet header
1341 * with 802.1Q encapsulation.
1342 */
1343 evl = mtod(m, struct ether_vlan_header *);
1344 bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1345 (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1346 evl->evl_encap_proto = htons(proto);
1347 evl->evl_tag = htons(tag);
1348 return (m);
1349 }
1350
1351 void
ether_bpf_mtap_if(struct ifnet * ifp,struct mbuf * m)1352 ether_bpf_mtap_if(struct ifnet *ifp, struct mbuf *m)
1353 {
1354 if (bpf_peers_present(ifp->if_bpf)) {
1355 M_ASSERTVALID(m);
1356 if ((m->m_flags & M_VLANTAG) != 0)
1357 ether_vlan_mtap(ifp->if_bpf, m, NULL, 0);
1358 else
1359 bpf_mtap(ifp->if_bpf, m);
1360 }
1361 }
1362
1363 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1364 "IEEE 802.1Q VLAN");
1365 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1366 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1367 "for consistency");
1368
1369 VNET_DEFINE_STATIC(int, soft_pad);
1370 #define V_soft_pad VNET(soft_pad)
1371 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1372 &VNET_NAME(soft_pad), 0,
1373 "pad short frames before tagging");
1374
1375 /*
1376 * For now, make preserving PCP via an mbuf tag optional, as it increases
1377 * per-packet memory allocations and frees. In the future, it would be
1378 * preferable to reuse ether_vtag for this, or similar.
1379 */
1380 VNET_DEFINE(int, vlan_mtag_pcp) = 0;
1381 #define V_vlan_mtag_pcp VNET(vlan_mtag_pcp)
1382 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW | CTLFLAG_VNET,
1383 &VNET_NAME(vlan_mtag_pcp), 0,
1384 "Retain VLAN PCP information as packets are passed up the stack");
1385
1386 static inline bool
ether_do_pcp(struct ifnet * ifp,struct mbuf * m)1387 ether_do_pcp(struct ifnet *ifp, struct mbuf *m)
1388 {
1389 if (ifp->if_type == IFT_L2VLAN)
1390 return (false);
1391 if (ifp->if_pcp != IFNET_PCP_NONE || (m->m_flags & M_VLANTAG) != 0)
1392 return (true);
1393 if (V_vlan_mtag_pcp &&
1394 m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL) != NULL)
1395 return (true);
1396 return (false);
1397 }
1398
1399 bool
ether_8021q_frame(struct mbuf ** mp,struct ifnet * ife,struct ifnet * p,const struct ether_8021q_tag * qtag)1400 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1401 const struct ether_8021q_tag *qtag)
1402 {
1403 struct m_tag *mtag;
1404 int n;
1405 uint16_t tag;
1406 uint8_t pcp = qtag->pcp;
1407 static const char pad[8]; /* just zeros */
1408
1409 /*
1410 * Pad the frame to the minimum size allowed if told to.
1411 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1412 * paragraph C.4.4.3.b. It can help to work around buggy
1413 * bridges that violate paragraph C.4.4.3.a from the same
1414 * document, i.e., fail to pad short frames after untagging.
1415 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1416 * untagging it will produce a 62-byte frame, which is a runt
1417 * and requires padding. There are VLAN-enabled network
1418 * devices that just discard such runts instead or mishandle
1419 * them somehow.
1420 */
1421 if (V_soft_pad && p->if_type == IFT_ETHER) {
1422 for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1423 n > 0; n -= sizeof(pad)) {
1424 if (!m_append(*mp, min(n, sizeof(pad)), pad))
1425 break;
1426 }
1427 if (n > 0) {
1428 m_freem(*mp);
1429 *mp = NULL;
1430 if_printf(ife, "cannot pad short frame");
1431 return (false);
1432 }
1433 }
1434
1435 /*
1436 * If PCP is set in mbuf, use it
1437 */
1438 if ((*mp)->m_flags & M_VLANTAG) {
1439 pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1440 }
1441
1442 /*
1443 * If underlying interface can do VLAN tag insertion itself,
1444 * just pass the packet along. However, we need some way to
1445 * tell the interface where the packet came from so that it
1446 * knows how to find the VLAN tag to use, so we attach a
1447 * packet tag that holds it.
1448 */
1449 if (V_vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1450 MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1451 tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1452 else
1453 tag = EVL_MAKETAG(qtag->vid, pcp, 0);
1454 if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1455 (qtag->proto == ETHERTYPE_VLAN)) {
1456 (*mp)->m_pkthdr.ether_vtag = tag;
1457 (*mp)->m_flags |= M_VLANTAG;
1458 } else {
1459 *mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1460 if (*mp == NULL) {
1461 if_printf(ife, "unable to prepend 802.1Q header");
1462 return (false);
1463 }
1464 (*mp)->m_flags &= ~M_VLANTAG;
1465 }
1466 return (true);
1467 }
1468
1469 /*
1470 * Allocate an address from the FreeBSD Foundation OUI. This uses a
1471 * cryptographic hash function on the containing jail's name, UUID and the
1472 * interface name to attempt to provide a unique but stable address.
1473 * Pseudo-interfaces which require a MAC address should use this function to
1474 * allocate non-locally-administered addresses.
1475 */
1476 void
ether_gen_addr_byname(const char * nameunit,struct ether_addr * hwaddr)1477 ether_gen_addr_byname(const char *nameunit, struct ether_addr *hwaddr)
1478 {
1479 SHA1_CTX ctx;
1480 char *buf;
1481 char uuid[HOSTUUIDLEN + 1];
1482 uint64_t addr;
1483 int i, sz;
1484 unsigned char digest[SHA1_RESULTLEN];
1485 char jailname[MAXHOSTNAMELEN];
1486
1487 getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1488 if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1489 /* Fall back to a random mac address. */
1490 goto rando;
1491 }
1492
1493 /* If each (vnet) jail would also have a unique hostuuid this would not
1494 * be necessary. */
1495 getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1496 sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, nameunit,
1497 jailname);
1498 if (sz < 0) {
1499 /* Fall back to a random mac address. */
1500 goto rando;
1501 }
1502
1503 SHA1Init(&ctx);
1504 SHA1Update(&ctx, buf, sz);
1505 SHA1Final(digest, &ctx);
1506 free(buf, M_TEMP);
1507
1508 addr = (digest[0] << 8) | digest[1] | OUI_FREEBSD_GENERATED_LOW;
1509 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1510 hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1511 0xFF;
1512 }
1513
1514 return;
1515 rando:
1516 arc4rand(hwaddr, sizeof(*hwaddr), 0);
1517 /* Unicast */
1518 hwaddr->octet[0] &= 0xFE;
1519 /* Locally administered. */
1520 hwaddr->octet[0] |= 0x02;
1521 }
1522
1523 void
ether_gen_addr(struct ifnet * ifp,struct ether_addr * hwaddr)1524 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1525 {
1526 ether_gen_addr_byname(if_name(ifp), hwaddr);
1527 }
1528
1529 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1530 MODULE_VERSION(ether, 1);
1531