1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_netgraph.h"
35 #include "opt_mbuf_profiling.h"
36 #include "opt_rss.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/devctl.h>
41 #include <sys/eventhandler.h>
42 #include <sys/jail.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/module.h>
48 #include <sys/msan.h>
49 #include <sys/proc.h>
50 #include <sys/priv.h>
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/uuid.h>
56 #ifdef KDB
57 #include <sys/kdb.h>
58 #endif
59
60 #include <net/ieee_oui.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_private.h>
64 #include <net/if_arp.h>
65 #include <net/netisr.h>
66 #include <net/route.h>
67 #include <net/if_llc.h>
68 #include <net/if_dl.h>
69 #include <net/if_types.h>
70 #include <net/bpf.h>
71 #include <net/ethernet.h>
72 #include <net/if_bridgevar.h>
73 #include <net/if_vlan_var.h>
74 #include <net/if_llatbl.h>
75 #include <net/pfil.h>
76 #include <net/rss_config.h>
77 #include <net/vnet.h>
78
79 #include <netpfil/pf/pf_mtag.h>
80
81 #if defined(INET) || defined(INET6)
82 #include <netinet/in.h>
83 #include <netinet/in_var.h>
84 #include <netinet/if_ether.h>
85 #include <netinet/ip_carp.h>
86 #include <netinet/ip_var.h>
87 #endif
88 #ifdef INET6
89 #include <netinet6/nd6.h>
90 #endif
91 #include <security/mac/mac_framework.h>
92
93 #include <crypto/sha1.h>
94
95 #ifdef CTASSERT
96 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
97 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
98 #endif
99
100 VNET_DEFINE(pfil_head_t, link_pfil_head); /* Packet filter hooks */
101
102 /* netgraph node hooks for ng_ether(4) */
103 void (*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
104 void (*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
105 int (*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
106 void (*ng_ether_attach_p)(struct ifnet *ifp);
107 void (*ng_ether_detach_p)(struct ifnet *ifp);
108
109 /* if_bridge(4) support */
110 void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
111 bool (*bridge_same_p)(const void *, const void *);
112 void *(*bridge_get_softc_p)(struct ifnet *);
113 bool (*bridge_member_ifaddrs_p)(void);
114
115 /* if_lagg(4) support */
116 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
117
118 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
119 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
120
121 static int ether_resolvemulti(struct ifnet *, struct sockaddr **,
122 struct sockaddr *);
123 static int ether_requestencap(struct ifnet *, struct if_encap_req *);
124
125 static inline bool ether_do_pcp(struct ifnet *, struct mbuf *);
126
127 #define senderr(e) do { error = (e); goto bad;} while (0)
128
129 static void
update_mbuf_csumflags(struct mbuf * src,struct mbuf * dst)130 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
131 {
132 int csum_flags = 0;
133
134 if (src->m_pkthdr.csum_flags & CSUM_IP)
135 csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
136 if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
137 csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
138 if (src->m_pkthdr.csum_flags & CSUM_SCTP)
139 csum_flags |= CSUM_SCTP_VALID;
140 dst->m_pkthdr.csum_flags |= csum_flags;
141 if (csum_flags & CSUM_DATA_VALID)
142 dst->m_pkthdr.csum_data = 0xffff;
143 }
144
145 /*
146 * Handle link-layer encapsulation requests.
147 */
148 static int
ether_requestencap(struct ifnet * ifp,struct if_encap_req * req)149 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
150 {
151 struct ether_header *eh;
152 struct arphdr *ah;
153 uint16_t etype;
154 const u_char *lladdr;
155
156 if (req->rtype != IFENCAP_LL)
157 return (EOPNOTSUPP);
158
159 if (req->bufsize < ETHER_HDR_LEN)
160 return (ENOMEM);
161
162 eh = (struct ether_header *)req->buf;
163 lladdr = req->lladdr;
164 req->lladdr_off = 0;
165
166 switch (req->family) {
167 case AF_INET:
168 etype = htons(ETHERTYPE_IP);
169 break;
170 case AF_INET6:
171 etype = htons(ETHERTYPE_IPV6);
172 break;
173 case AF_ARP:
174 ah = (struct arphdr *)req->hdata;
175 ah->ar_hrd = htons(ARPHRD_ETHER);
176
177 switch(ntohs(ah->ar_op)) {
178 case ARPOP_REVREQUEST:
179 case ARPOP_REVREPLY:
180 etype = htons(ETHERTYPE_REVARP);
181 break;
182 case ARPOP_REQUEST:
183 case ARPOP_REPLY:
184 default:
185 etype = htons(ETHERTYPE_ARP);
186 break;
187 }
188
189 if (req->flags & IFENCAP_FLAG_BROADCAST)
190 lladdr = ifp->if_broadcastaddr;
191 break;
192 default:
193 return (EAFNOSUPPORT);
194 }
195
196 memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
197 memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
198 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
199 req->bufsize = sizeof(struct ether_header);
200
201 return (0);
202 }
203
204 static int
ether_resolve_addr(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro,u_char * phdr,uint32_t * pflags,struct llentry ** plle)205 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
206 const struct sockaddr *dst, struct route *ro, u_char *phdr,
207 uint32_t *pflags, struct llentry **plle)
208 {
209 uint32_t lleflags = 0;
210 int error = 0;
211 #if defined(INET) || defined(INET6)
212 struct ether_header *eh = (struct ether_header *)phdr;
213 uint16_t etype;
214 #endif
215
216 if (plle)
217 *plle = NULL;
218
219 switch (dst->sa_family) {
220 #ifdef INET
221 case AF_INET:
222 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
223 error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
224 plle);
225 else {
226 if (m->m_flags & M_BCAST)
227 memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
228 ETHER_ADDR_LEN);
229 else {
230 const struct in_addr *a;
231 a = &(((const struct sockaddr_in *)dst)->sin_addr);
232 ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
233 }
234 etype = htons(ETHERTYPE_IP);
235 memcpy(&eh->ether_type, &etype, sizeof(etype));
236 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
237 }
238 break;
239 #endif
240 #ifdef INET6
241 case AF_INET6:
242 if ((m->m_flags & M_MCAST) == 0) {
243 int af = RO_GET_FAMILY(ro, dst);
244 error = nd6_resolve(ifp, LLE_SF(af, 0), m, dst, phdr,
245 &lleflags, plle);
246 } else {
247 const struct in6_addr *a6;
248 a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
249 ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
250 etype = htons(ETHERTYPE_IPV6);
251 memcpy(&eh->ether_type, &etype, sizeof(etype));
252 memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
253 }
254 break;
255 #endif
256 default:
257 if_printf(ifp, "can't handle af%d\n", dst->sa_family);
258 if (m != NULL)
259 m_freem(m);
260 return (EAFNOSUPPORT);
261 }
262
263 if (error == EHOSTDOWN) {
264 if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
265 error = EHOSTUNREACH;
266 }
267
268 if (error != 0)
269 return (error);
270
271 *pflags = RT_MAY_LOOP;
272 if (lleflags & LLE_IFADDR)
273 *pflags |= RT_L2_ME;
274
275 return (0);
276 }
277
278 /*
279 * Ethernet output routine.
280 * Encapsulate a packet of type family for the local net.
281 * Use trailer local net encapsulation if enough data in first
282 * packet leaves a multiple of 512 bytes of data in remainder.
283 */
284 int
ether_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro)285 ether_output(struct ifnet *ifp, struct mbuf *m,
286 const struct sockaddr *dst, struct route *ro)
287 {
288 int error = 0;
289 char linkhdr[ETHER_HDR_LEN], *phdr;
290 struct ether_header *eh;
291 struct pf_mtag *t;
292 bool loop_copy;
293 int hlen; /* link layer header length */
294 uint32_t pflags;
295 struct llentry *lle = NULL;
296 int addref = 0;
297
298 phdr = NULL;
299 pflags = 0;
300 if (ro != NULL) {
301 /* XXX BPF uses ro_prepend */
302 if (ro->ro_prepend != NULL) {
303 phdr = ro->ro_prepend;
304 hlen = ro->ro_plen;
305 } else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
306 if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
307 lle = ro->ro_lle;
308 if (lle != NULL &&
309 (lle->la_flags & LLE_VALID) == 0) {
310 LLE_FREE(lle);
311 lle = NULL; /* redundant */
312 ro->ro_lle = NULL;
313 }
314 if (lle == NULL) {
315 /* if we lookup, keep cache */
316 addref = 1;
317 } else
318 /*
319 * Notify LLE code that
320 * the entry was used
321 * by datapath.
322 */
323 llentry_provide_feedback(lle);
324 }
325 if (lle != NULL) {
326 phdr = lle->r_linkdata;
327 hlen = lle->r_hdrlen;
328 pflags = lle->r_flags;
329 }
330 }
331 }
332
333 #ifdef MAC
334 error = mac_ifnet_check_transmit(ifp, m);
335 if (error)
336 senderr(error);
337 #endif
338
339 M_PROFILE(m);
340 if (ifp->if_flags & IFF_MONITOR)
341 senderr(ENETDOWN);
342 if (!((ifp->if_flags & IFF_UP) &&
343 (ifp->if_drv_flags & IFF_DRV_RUNNING)))
344 senderr(ENETDOWN);
345
346 if (phdr == NULL) {
347 /* No prepend data supplied. Try to calculate ourselves. */
348 phdr = linkhdr;
349 hlen = ETHER_HDR_LEN;
350 error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
351 addref ? &lle : NULL);
352 if (addref && lle != NULL)
353 ro->ro_lle = lle;
354 if (error != 0)
355 return (error == EWOULDBLOCK ? 0 : error);
356 }
357
358 if ((pflags & RT_L2_ME) != 0) {
359 update_mbuf_csumflags(m, m);
360 return (if_simloop(ifp, m, RO_GET_FAMILY(ro, dst), 0));
361 }
362 loop_copy = (pflags & RT_MAY_LOOP) != 0;
363
364 /*
365 * Add local net header. If no space in first mbuf,
366 * allocate another.
367 *
368 * Note that we do prepend regardless of RT_HAS_HEADER flag.
369 * This is done because BPF code shifts m_data pointer
370 * to the end of ethernet header prior to calling if_output().
371 */
372 M_PREPEND(m, hlen, M_NOWAIT);
373 if (m == NULL)
374 senderr(ENOBUFS);
375 if ((pflags & RT_HAS_HEADER) == 0) {
376 eh = mtod(m, struct ether_header *);
377 memcpy(eh, phdr, hlen);
378 }
379
380 /*
381 * If a simplex interface, and the packet is being sent to our
382 * Ethernet address or a broadcast address, loopback a copy.
383 * XXX To make a simplex device behave exactly like a duplex
384 * device, we should copy in the case of sending to our own
385 * ethernet address (thus letting the original actually appear
386 * on the wire). However, we don't do that here for security
387 * reasons and compatibility with the original behavior.
388 */
389 if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
390 ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
391 struct mbuf *n;
392
393 /*
394 * Because if_simloop() modifies the packet, we need a
395 * writable copy through m_dup() instead of a readonly
396 * one as m_copy[m] would give us. The alternative would
397 * be to modify if_simloop() to handle the readonly mbuf,
398 * but performancewise it is mostly equivalent (trading
399 * extra data copying vs. extra locking).
400 *
401 * XXX This is a local workaround. A number of less
402 * often used kernel parts suffer from the same bug.
403 * See PR kern/105943 for a proposed general solution.
404 */
405 if ((n = m_dup(m, M_NOWAIT)) != NULL) {
406 update_mbuf_csumflags(m, n);
407 (void)if_simloop(ifp, n, RO_GET_FAMILY(ro, dst), hlen);
408 } else
409 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
410 }
411
412 /*
413 * Bridges require special output handling.
414 */
415 if (ifp->if_bridge) {
416 BRIDGE_OUTPUT(ifp, m, error);
417 return (error);
418 }
419
420 #if defined(INET) || defined(INET6)
421 if (ifp->if_carp &&
422 (error = (*carp_output_p)(ifp, m, dst)))
423 goto bad;
424 #endif
425
426 /* Handle ng_ether(4) processing, if any */
427 if (ifp->if_l2com != NULL) {
428 KASSERT(ng_ether_output_p != NULL,
429 ("ng_ether_output_p is NULL"));
430 if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
431 bad: if (m != NULL)
432 m_freem(m);
433 return (error);
434 }
435 if (m == NULL)
436 return (0);
437 }
438
439 /* Continue with link-layer output */
440 return ether_output_frame(ifp, m);
441 }
442
443 static bool
ether_set_pcp(struct mbuf ** mp,struct ifnet * ifp,uint8_t pcp)444 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
445 {
446 struct ether_8021q_tag qtag;
447 struct ether_header *eh;
448
449 eh = mtod(*mp, struct ether_header *);
450 if (eh->ether_type == htons(ETHERTYPE_VLAN) ||
451 eh->ether_type == htons(ETHERTYPE_QINQ)) {
452 (*mp)->m_flags &= ~M_VLANTAG;
453 return (true);
454 }
455
456 qtag.vid = 0;
457 qtag.pcp = pcp;
458 qtag.proto = ETHERTYPE_VLAN;
459 if (ether_8021q_frame(mp, ifp, ifp, &qtag))
460 return (true);
461 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
462 return (false);
463 }
464
465 /*
466 * Ethernet link layer output routine to send a raw frame to the device.
467 *
468 * This assumes that the 14 byte Ethernet header is present and contiguous
469 * in the first mbuf (if BRIDGE'ing).
470 */
471 int
ether_output_frame(struct ifnet * ifp,struct mbuf * m)472 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
473 {
474 if (ether_do_pcp(ifp, m) && !ether_set_pcp(&m, ifp, ifp->if_pcp))
475 return (0);
476
477 if (PFIL_HOOKED_OUT(V_link_pfil_head))
478 switch (pfil_mbuf_out(V_link_pfil_head, &m, ifp, NULL)) {
479 case PFIL_DROPPED:
480 return (EACCES);
481 case PFIL_CONSUMED:
482 return (0);
483 }
484
485 #ifdef EXPERIMENTAL
486 #if defined(INET6) && defined(INET)
487 /* draft-ietf-6man-ipv6only-flag */
488 /* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
489 if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
490 struct ether_header *eh;
491
492 eh = mtod(m, struct ether_header *);
493 switch (ntohs(eh->ether_type)) {
494 case ETHERTYPE_IP:
495 case ETHERTYPE_ARP:
496 case ETHERTYPE_REVARP:
497 m_freem(m);
498 return (EAFNOSUPPORT);
499 /* NOTREACHED */
500 break;
501 };
502 }
503 #endif
504 #endif
505
506 /*
507 * Queue message on interface, update output statistics if successful,
508 * and start output if interface not yet active.
509 *
510 * If KMSAN is enabled, use it to verify that the data does not contain
511 * any uninitialized bytes.
512 */
513 kmsan_check_mbuf(m, "ether_output");
514 return ((ifp->if_transmit)(ifp, m));
515 }
516
517 /*
518 * Process a received Ethernet packet; the packet is in the
519 * mbuf chain m with the ethernet header at the front.
520 */
521 static void
ether_input_internal(struct ifnet * ifp,struct mbuf * m)522 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
523 {
524 struct ether_header *eh;
525 u_short etype;
526
527 if ((ifp->if_flags & IFF_UP) == 0) {
528 m_freem(m);
529 return;
530 }
531 #ifdef DIAGNOSTIC
532 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
533 if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
534 m_freem(m);
535 return;
536 }
537 #endif
538 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
539 /* Drivers should pullup and ensure the mbuf is valid */
540 if_printf(ifp, "discard frame w/o leading ethernet "
541 "header (len %d pkt len %d)\n",
542 m->m_len, m->m_pkthdr.len);
543 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
544 m_freem(m);
545 return;
546 }
547 eh = mtod(m, struct ether_header *);
548 etype = ntohs(eh->ether_type);
549 random_harvest_queue_ether(m, sizeof(*m));
550
551 #ifdef EXPERIMENTAL
552 #if defined(INET6) && defined(INET)
553 /* draft-ietf-6man-ipv6only-flag */
554 /* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
555 if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
556 switch (etype) {
557 case ETHERTYPE_IP:
558 case ETHERTYPE_ARP:
559 case ETHERTYPE_REVARP:
560 m_freem(m);
561 return;
562 /* NOTREACHED */
563 break;
564 };
565 }
566 #endif
567 #endif
568
569 CURVNET_SET_QUIET(ifp->if_vnet);
570
571 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
572 if (ETHER_IS_BROADCAST(eh->ether_dhost))
573 m->m_flags |= M_BCAST;
574 else
575 m->m_flags |= M_MCAST;
576 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
577 }
578
579 #ifdef MAC
580 /*
581 * Tag the mbuf with an appropriate MAC label before any other
582 * consumers can get to it.
583 */
584 mac_ifnet_create_mbuf(ifp, m);
585 #endif
586
587 /*
588 * Give bpf a chance at the packet.
589 */
590 ETHER_BPF_MTAP(ifp, m);
591
592 if (!(ifp->if_capenable & IFCAP_HWSTATS))
593 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
594
595 /* Allow monitor mode to claim this frame, after stats are updated. */
596 if (ifp->if_flags & IFF_MONITOR) {
597 m_freem(m);
598 CURVNET_RESTORE();
599 return;
600 }
601
602 /* Handle input from a lagg(4) port */
603 if (ifp->if_type == IFT_IEEE8023ADLAG) {
604 KASSERT(lagg_input_ethernet_p != NULL,
605 ("%s: if_lagg not loaded!", __func__));
606 m = (*lagg_input_ethernet_p)(ifp, m);
607 if (m != NULL)
608 ifp = m->m_pkthdr.rcvif;
609 else {
610 CURVNET_RESTORE();
611 return;
612 }
613 }
614
615 /*
616 * If the hardware did not process an 802.1Q tag, do this now,
617 * to allow 802.1P priority frames to be passed to the main input
618 * path correctly.
619 */
620 if ((m->m_flags & M_VLANTAG) == 0 &&
621 ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
622 struct ether_vlan_header *evl;
623
624 if (m->m_len < sizeof(*evl) &&
625 (m = m_pullup(m, sizeof(*evl))) == NULL) {
626 #ifdef DIAGNOSTIC
627 if_printf(ifp, "cannot pullup VLAN header\n");
628 #endif
629 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
630 CURVNET_RESTORE();
631 return;
632 }
633
634 evl = mtod(m, struct ether_vlan_header *);
635 m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
636 m->m_flags |= M_VLANTAG;
637
638 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
639 ETHER_HDR_LEN - ETHER_TYPE_LEN);
640 m_adj(m, ETHER_VLAN_ENCAP_LEN);
641 eh = mtod(m, struct ether_header *);
642 }
643
644 M_SETFIB(m, ifp->if_fib);
645
646 /* Allow ng_ether(4) to claim this frame. */
647 if (ifp->if_l2com != NULL) {
648 KASSERT(ng_ether_input_p != NULL,
649 ("%s: ng_ether_input_p is NULL", __func__));
650 m->m_flags &= ~M_PROMISC;
651 (*ng_ether_input_p)(ifp, &m);
652 if (m == NULL) {
653 CURVNET_RESTORE();
654 return;
655 }
656 eh = mtod(m, struct ether_header *);
657 }
658
659 /*
660 * Allow if_bridge(4) to claim this frame.
661 *
662 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
663 * and the frame should be delivered locally.
664 *
665 * If M_BRIDGE_INJECT is set, the packet was received directly by the
666 * bridge via netmap, so "ifp" is the bridge itself and the packet
667 * should be re-examined.
668 */
669 if (ifp->if_bridge != NULL || (m->m_flags & M_BRIDGE_INJECT) != 0) {
670 m->m_flags &= ~M_PROMISC;
671 BRIDGE_INPUT(ifp, m);
672 if (m == NULL) {
673 CURVNET_RESTORE();
674 return;
675 }
676 eh = mtod(m, struct ether_header *);
677 }
678
679 #if defined(INET) || defined(INET6)
680 /*
681 * Clear M_PROMISC on frame so that carp(4) will see it when the
682 * mbuf flows up to Layer 3.
683 * FreeBSD's implementation of carp(4) uses the inprotosw
684 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
685 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
686 * is outside the scope of the M_PROMISC test below.
687 * TODO: Maintain a hash table of ethernet addresses other than
688 * ether_dhost which may be active on this ifp.
689 */
690 if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
691 m->m_flags &= ~M_PROMISC;
692 } else
693 #endif
694 {
695 /*
696 * If the frame received was not for our MAC address, set the
697 * M_PROMISC flag on the mbuf chain. The frame may need to
698 * be seen by the rest of the Ethernet input path in case of
699 * re-entry (e.g. bridge, vlan, netgraph) but should not be
700 * seen by upper protocol layers.
701 */
702 if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
703 bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
704 m->m_flags |= M_PROMISC;
705 }
706
707 ether_demux(ifp, m);
708 CURVNET_RESTORE();
709 }
710
711 /*
712 * Ethernet input dispatch; by default, direct dispatch here regardless of
713 * global configuration. However, if RSS is enabled, hook up RSS affinity
714 * so that when deferred or hybrid dispatch is enabled, we can redistribute
715 * load based on RSS.
716 *
717 * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
718 * not it had already done work distribution via multi-queue. Then we could
719 * direct dispatch in the event load balancing was already complete and
720 * handle the case of interfaces with different capabilities better.
721 *
722 * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
723 * at multiple layers?
724 *
725 * XXXRW: For now, enable all this only if RSS is compiled in, although it
726 * works fine without RSS. Need to characterise the performance overhead
727 * of the detour through the netisr code in the event the result is always
728 * direct dispatch.
729 */
730 static void
ether_nh_input(struct mbuf * m)731 ether_nh_input(struct mbuf *m)
732 {
733
734 M_ASSERTPKTHDR(m);
735 KASSERT(m->m_pkthdr.rcvif != NULL,
736 ("%s: NULL interface pointer", __func__));
737 ether_input_internal(m->m_pkthdr.rcvif, m);
738 }
739
740 static struct netisr_handler ether_nh = {
741 .nh_name = "ether",
742 .nh_handler = ether_nh_input,
743 .nh_proto = NETISR_ETHER,
744 #ifdef RSS
745 .nh_policy = NETISR_POLICY_CPU,
746 .nh_dispatch = NETISR_DISPATCH_DIRECT,
747 .nh_m2cpuid = rss_m2cpuid,
748 #else
749 .nh_policy = NETISR_POLICY_SOURCE,
750 .nh_dispatch = NETISR_DISPATCH_DIRECT,
751 #endif
752 };
753
754 static void
ether_init(__unused void * arg)755 ether_init(__unused void *arg)
756 {
757
758 netisr_register(ðer_nh);
759 }
760 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
761
762 static void
vnet_ether_init(const __unused void * arg)763 vnet_ether_init(const __unused void *arg)
764 {
765 struct pfil_head_args args;
766
767 args.pa_version = PFIL_VERSION;
768 args.pa_flags = PFIL_IN | PFIL_OUT;
769 args.pa_type = PFIL_TYPE_ETHERNET;
770 args.pa_headname = PFIL_ETHER_NAME;
771 V_link_pfil_head = pfil_head_register(&args);
772
773 #ifdef VIMAGE
774 netisr_register_vnet(ðer_nh);
775 #endif
776 }
777 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
778 vnet_ether_init, NULL);
779
780 #ifdef VIMAGE
781 static void
vnet_ether_pfil_destroy(const __unused void * arg)782 vnet_ether_pfil_destroy(const __unused void *arg)
783 {
784
785 pfil_head_unregister(V_link_pfil_head);
786 }
787 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
788 vnet_ether_pfil_destroy, NULL);
789
790 static void
vnet_ether_destroy(__unused void * arg)791 vnet_ether_destroy(__unused void *arg)
792 {
793
794 netisr_unregister_vnet(ðer_nh);
795 }
796 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
797 vnet_ether_destroy, NULL);
798 #endif
799
800 static void
ether_input(struct ifnet * ifp,struct mbuf * m)801 ether_input(struct ifnet *ifp, struct mbuf *m)
802 {
803 struct epoch_tracker et;
804 struct mbuf *mn;
805 bool needs_epoch;
806
807 needs_epoch = (ifp->if_flags & IFF_NEEDSEPOCH);
808 #ifdef INVARIANTS
809 /*
810 * This temporary code is here to prevent epoch unaware and unmarked
811 * drivers to panic the system. Once all drivers are taken care of,
812 * the whole INVARIANTS block should go away.
813 */
814 if (!needs_epoch && !in_epoch(net_epoch_preempt)) {
815 static bool printedonce;
816
817 needs_epoch = true;
818 if (!printedonce) {
819 printedonce = true;
820 if_printf(ifp, "called %s w/o net epoch! "
821 "PLEASE file a bug report.", __func__);
822 #ifdef KDB
823 kdb_backtrace();
824 #endif
825 }
826 }
827 #endif
828
829 /*
830 * The drivers are allowed to pass in a chain of packets linked with
831 * m_nextpkt. We split them up into separate packets here and pass
832 * them up. This allows the drivers to amortize the receive lock.
833 */
834 CURVNET_SET_QUIET(ifp->if_vnet);
835 if (__predict_false(needs_epoch))
836 NET_EPOCH_ENTER(et);
837 while (m) {
838 mn = m->m_nextpkt;
839 m->m_nextpkt = NULL;
840
841 /*
842 * We will rely on rcvif being set properly in the deferred
843 * context, so assert it is correct here.
844 */
845 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
846 KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
847 "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
848 netisr_dispatch(NETISR_ETHER, m);
849 m = mn;
850 }
851 if (__predict_false(needs_epoch))
852 NET_EPOCH_EXIT(et);
853 CURVNET_RESTORE();
854 }
855
856 /*
857 * Upper layer processing for a received Ethernet packet.
858 */
859 void
ether_demux(struct ifnet * ifp,struct mbuf * m)860 ether_demux(struct ifnet *ifp, struct mbuf *m)
861 {
862 struct ether_header *eh;
863 int i, isr;
864 u_short ether_type;
865
866 NET_EPOCH_ASSERT();
867 KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
868
869 /* Do not grab PROMISC frames in case we are re-entered. */
870 if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
871 i = pfil_mbuf_in(V_link_pfil_head, &m, ifp, NULL);
872 if (i != PFIL_PASS)
873 return;
874 }
875
876 eh = mtod(m, struct ether_header *);
877 ether_type = ntohs(eh->ether_type);
878
879 /*
880 * If this frame has a VLAN tag other than 0, call vlan_input()
881 * if its module is loaded. Otherwise, drop.
882 */
883 if ((m->m_flags & M_VLANTAG) &&
884 EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
885 if (ifp->if_vlantrunk == NULL) {
886 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
887 m_freem(m);
888 return;
889 }
890 KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
891 __func__));
892 /* Clear before possibly re-entering ether_input(). */
893 m->m_flags &= ~M_PROMISC;
894 (*vlan_input_p)(ifp, m);
895 return;
896 }
897
898 /*
899 * Pass promiscuously received frames to the upper layer if the user
900 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
901 */
902 if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
903 m_freem(m);
904 return;
905 }
906
907 /*
908 * Reset layer specific mbuf flags to avoid confusing upper layers.
909 */
910 m->m_flags &= ~M_VLANTAG;
911 m_clrprotoflags(m);
912
913 /*
914 * Dispatch frame to upper layer.
915 */
916 switch (ether_type) {
917 #ifdef INET
918 case ETHERTYPE_IP:
919 isr = NETISR_IP;
920 break;
921
922 case ETHERTYPE_ARP:
923 if (ifp->if_flags & IFF_NOARP) {
924 /* Discard packet if ARP is disabled on interface */
925 m_freem(m);
926 return;
927 }
928 isr = NETISR_ARP;
929 break;
930 #endif
931 #ifdef INET6
932 case ETHERTYPE_IPV6:
933 isr = NETISR_IPV6;
934 break;
935 #endif
936 default:
937 goto discard;
938 }
939
940 /* Strip off Ethernet header. */
941 m_adj(m, ETHER_HDR_LEN);
942
943 netisr_dispatch(isr, m);
944 return;
945
946 discard:
947 /*
948 * Packet is to be discarded. If netgraph is present,
949 * hand the packet to it for last chance processing;
950 * otherwise dispose of it.
951 */
952 if (ifp->if_l2com != NULL) {
953 KASSERT(ng_ether_input_orphan_p != NULL,
954 ("ng_ether_input_orphan_p is NULL"));
955 (*ng_ether_input_orphan_p)(ifp, m);
956 return;
957 }
958 m_freem(m);
959 }
960
961 /*
962 * Convert Ethernet address to printable (loggable) representation.
963 * This routine is for compatibility; it's better to just use
964 *
965 * printf("%6D", <pointer to address>, ":");
966 *
967 * since there's no static buffer involved.
968 */
969 char *
ether_sprintf(const u_char * ap)970 ether_sprintf(const u_char *ap)
971 {
972 static char etherbuf[18];
973 snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
974 return (etherbuf);
975 }
976
977 /*
978 * Perform common duties while attaching to interface list
979 */
980 void
ether_ifattach(struct ifnet * ifp,const u_int8_t * lla)981 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
982 {
983 int i;
984 struct ifaddr *ifa;
985 struct sockaddr_dl *sdl;
986
987 ifp->if_addrlen = ETHER_ADDR_LEN;
988 ifp->if_hdrlen = (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0 ?
989 ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN : ETHER_HDR_LEN;
990 ifp->if_mtu = ETHERMTU;
991 if_attach(ifp);
992 ifp->if_output = ether_output;
993 ifp->if_input = ether_input;
994 ifp->if_resolvemulti = ether_resolvemulti;
995 ifp->if_requestencap = ether_requestencap;
996 #ifdef VIMAGE
997 ifp->if_reassign = ether_reassign;
998 #endif
999 if (ifp->if_baudrate == 0)
1000 ifp->if_baudrate = IF_Mbps(10); /* just a default */
1001 ifp->if_broadcastaddr = etherbroadcastaddr;
1002
1003 ifa = ifp->if_addr;
1004 KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
1005 sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1006 sdl->sdl_type = IFT_ETHER;
1007 sdl->sdl_alen = ifp->if_addrlen;
1008 bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
1009
1010 if (ifp->if_hw_addr != NULL)
1011 bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
1012
1013 bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1014 if (ng_ether_attach_p != NULL)
1015 (*ng_ether_attach_p)(ifp);
1016
1017 /* Announce Ethernet MAC address if non-zero. */
1018 for (i = 0; i < ifp->if_addrlen; i++)
1019 if (lla[i] != 0)
1020 break;
1021 if (i != ifp->if_addrlen)
1022 if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1023
1024 uuid_ether_add(LLADDR(sdl));
1025
1026 /* Add necessary bits are setup; announce it now. */
1027 EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1028 if (IS_DEFAULT_VNET(curvnet))
1029 devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1030 }
1031
1032 /*
1033 * Perform common duties while detaching an Ethernet interface
1034 */
1035 void
ether_ifdetach(struct ifnet * ifp)1036 ether_ifdetach(struct ifnet *ifp)
1037 {
1038 struct sockaddr_dl *sdl;
1039
1040 sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1041 uuid_ether_del(LLADDR(sdl));
1042
1043 if (ifp->if_l2com != NULL) {
1044 KASSERT(ng_ether_detach_p != NULL,
1045 ("ng_ether_detach_p is NULL"));
1046 (*ng_ether_detach_p)(ifp);
1047 }
1048
1049 bpfdetach(ifp);
1050 if_detach(ifp);
1051 }
1052
1053 #ifdef VIMAGE
1054 void
ether_reassign(struct ifnet * ifp,struct vnet * new_vnet,char * unused __unused)1055 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1056 {
1057
1058 if (ifp->if_l2com != NULL) {
1059 KASSERT(ng_ether_detach_p != NULL,
1060 ("ng_ether_detach_p is NULL"));
1061 (*ng_ether_detach_p)(ifp);
1062 }
1063
1064 if (ng_ether_attach_p != NULL) {
1065 CURVNET_SET_QUIET(new_vnet);
1066 (*ng_ether_attach_p)(ifp);
1067 CURVNET_RESTORE();
1068 }
1069 }
1070 #endif
1071
1072 SYSCTL_DECL(_net_link);
1073 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1074 "Ethernet");
1075
1076 #if 0
1077 /*
1078 * This is for reference. We have a table-driven version
1079 * of the little-endian crc32 generator, which is faster
1080 * than the double-loop.
1081 */
1082 uint32_t
1083 ether_crc32_le(const uint8_t *buf, size_t len)
1084 {
1085 size_t i;
1086 uint32_t crc;
1087 int bit;
1088 uint8_t data;
1089
1090 crc = 0xffffffff; /* initial value */
1091
1092 for (i = 0; i < len; i++) {
1093 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1094 carry = (crc ^ data) & 1;
1095 crc >>= 1;
1096 if (carry)
1097 crc = (crc ^ ETHER_CRC_POLY_LE);
1098 }
1099 }
1100
1101 return (crc);
1102 }
1103 #else
1104 uint32_t
ether_crc32_le(const uint8_t * buf,size_t len)1105 ether_crc32_le(const uint8_t *buf, size_t len)
1106 {
1107 static const uint32_t crctab[] = {
1108 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1109 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1110 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1111 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1112 };
1113 size_t i;
1114 uint32_t crc;
1115
1116 crc = 0xffffffff; /* initial value */
1117
1118 for (i = 0; i < len; i++) {
1119 crc ^= buf[i];
1120 crc = (crc >> 4) ^ crctab[crc & 0xf];
1121 crc = (crc >> 4) ^ crctab[crc & 0xf];
1122 }
1123
1124 return (crc);
1125 }
1126 #endif
1127
1128 uint32_t
ether_crc32_be(const uint8_t * buf,size_t len)1129 ether_crc32_be(const uint8_t *buf, size_t len)
1130 {
1131 size_t i;
1132 uint32_t crc, carry;
1133 int bit;
1134 uint8_t data;
1135
1136 crc = 0xffffffff; /* initial value */
1137
1138 for (i = 0; i < len; i++) {
1139 for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1140 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1141 crc <<= 1;
1142 if (carry)
1143 crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1144 }
1145 }
1146
1147 return (crc);
1148 }
1149
1150 int
ether_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1151 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1152 {
1153 struct ifaddr *ifa = (struct ifaddr *) data;
1154 struct ifreq *ifr = (struct ifreq *) data;
1155 int error = 0;
1156
1157 switch (command) {
1158 case SIOCSIFADDR:
1159 ifp->if_flags |= IFF_UP;
1160
1161 switch (ifa->ifa_addr->sa_family) {
1162 #ifdef INET
1163 case AF_INET:
1164 ifp->if_init(ifp->if_softc); /* before arpwhohas */
1165 arp_ifinit(ifp, ifa);
1166 break;
1167 #endif
1168 default:
1169 ifp->if_init(ifp->if_softc);
1170 break;
1171 }
1172 break;
1173
1174 case SIOCGIFADDR:
1175 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1176 ETHER_ADDR_LEN);
1177 break;
1178
1179 case SIOCSIFMTU:
1180 /*
1181 * Set the interface MTU.
1182 */
1183 if (ifr->ifr_mtu > ETHERMTU) {
1184 error = EINVAL;
1185 } else {
1186 ifp->if_mtu = ifr->ifr_mtu;
1187 }
1188 break;
1189
1190 case SIOCSLANPCP:
1191 error = priv_check(curthread, PRIV_NET_SETLANPCP);
1192 if (error != 0)
1193 break;
1194 if (ifr->ifr_lan_pcp > 7 &&
1195 ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1196 error = EINVAL;
1197 } else {
1198 ifp->if_pcp = ifr->ifr_lan_pcp;
1199 /* broadcast event about PCP change */
1200 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1201 }
1202 break;
1203
1204 case SIOCGLANPCP:
1205 ifr->ifr_lan_pcp = ifp->if_pcp;
1206 break;
1207
1208 default:
1209 error = EINVAL; /* XXX netbsd has ENOTTY??? */
1210 break;
1211 }
1212 return (error);
1213 }
1214
1215 static int
ether_resolvemulti(struct ifnet * ifp,struct sockaddr ** llsa,struct sockaddr * sa)1216 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1217 struct sockaddr *sa)
1218 {
1219 struct sockaddr_dl *sdl;
1220 #ifdef INET
1221 struct sockaddr_in *sin;
1222 #endif
1223 #ifdef INET6
1224 struct sockaddr_in6 *sin6;
1225 #endif
1226 u_char *e_addr;
1227
1228 switch(sa->sa_family) {
1229 case AF_LINK:
1230 /*
1231 * No mapping needed. Just check that it's a valid MC address.
1232 */
1233 sdl = (struct sockaddr_dl *)sa;
1234 e_addr = LLADDR(sdl);
1235 if (!ETHER_IS_MULTICAST(e_addr))
1236 return EADDRNOTAVAIL;
1237 *llsa = NULL;
1238 return 0;
1239
1240 #ifdef INET
1241 case AF_INET:
1242 sin = (struct sockaddr_in *)sa;
1243 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1244 return EADDRNOTAVAIL;
1245 sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1246 sdl->sdl_alen = ETHER_ADDR_LEN;
1247 e_addr = LLADDR(sdl);
1248 ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1249 *llsa = (struct sockaddr *)sdl;
1250 return 0;
1251 #endif
1252 #ifdef INET6
1253 case AF_INET6:
1254 sin6 = (struct sockaddr_in6 *)sa;
1255 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1256 /*
1257 * An IP6 address of 0 means listen to all
1258 * of the Ethernet multicast address used for IP6.
1259 * (This is used for multicast routers.)
1260 */
1261 ifp->if_flags |= IFF_ALLMULTI;
1262 *llsa = NULL;
1263 return 0;
1264 }
1265 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1266 return EADDRNOTAVAIL;
1267 sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1268 sdl->sdl_alen = ETHER_ADDR_LEN;
1269 e_addr = LLADDR(sdl);
1270 ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1271 *llsa = (struct sockaddr *)sdl;
1272 return 0;
1273 #endif
1274
1275 default:
1276 /*
1277 * Well, the text isn't quite right, but it's the name
1278 * that counts...
1279 */
1280 return EAFNOSUPPORT;
1281 }
1282 }
1283
1284 static moduledata_t ether_mod = {
1285 .name = "ether",
1286 };
1287
1288 void
ether_vlan_mtap(struct bpf_if * bp,struct mbuf * m,void * data,u_int dlen)1289 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1290 {
1291 struct ether_vlan_header vlan;
1292 struct mbuf mv, mb;
1293
1294 KASSERT((m->m_flags & M_VLANTAG) != 0,
1295 ("%s: vlan information not present", __func__));
1296 KASSERT(m->m_len >= sizeof(struct ether_header),
1297 ("%s: mbuf not large enough for header", __func__));
1298 bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1299 vlan.evl_proto = vlan.evl_encap_proto;
1300 vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1301 vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1302 m->m_len -= sizeof(struct ether_header);
1303 m->m_data += sizeof(struct ether_header);
1304 /*
1305 * If a data link has been supplied by the caller, then we will need to
1306 * re-create a stack allocated mbuf chain with the following structure:
1307 *
1308 * (1) mbuf #1 will contain the supplied data link
1309 * (2) mbuf #2 will contain the vlan header
1310 * (3) mbuf #3 will contain the original mbuf's packet data
1311 *
1312 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1313 */
1314 if (data != NULL) {
1315 mv.m_next = m;
1316 mv.m_data = (caddr_t)&vlan;
1317 mv.m_len = sizeof(vlan);
1318 mb.m_next = &mv;
1319 mb.m_data = data;
1320 mb.m_len = dlen;
1321 bpf_mtap(bp, &mb);
1322 } else
1323 bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1324 m->m_len += sizeof(struct ether_header);
1325 m->m_data -= sizeof(struct ether_header);
1326 }
1327
1328 struct mbuf *
ether_vlanencap_proto(struct mbuf * m,uint16_t tag,uint16_t proto)1329 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1330 {
1331 struct ether_vlan_header *evl;
1332
1333 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1334 if (m == NULL)
1335 return (NULL);
1336 /* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1337
1338 if (m->m_len < sizeof(*evl)) {
1339 m = m_pullup(m, sizeof(*evl));
1340 if (m == NULL)
1341 return (NULL);
1342 }
1343
1344 /*
1345 * Transform the Ethernet header into an Ethernet header
1346 * with 802.1Q encapsulation.
1347 */
1348 evl = mtod(m, struct ether_vlan_header *);
1349 bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1350 (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1351 evl->evl_encap_proto = htons(proto);
1352 evl->evl_tag = htons(tag);
1353 return (m);
1354 }
1355
1356 void
ether_bpf_mtap_if(struct ifnet * ifp,struct mbuf * m)1357 ether_bpf_mtap_if(struct ifnet *ifp, struct mbuf *m)
1358 {
1359 if (bpf_peers_present(ifp->if_bpf)) {
1360 M_ASSERTVALID(m);
1361 if ((m->m_flags & M_VLANTAG) != 0)
1362 ether_vlan_mtap(ifp->if_bpf, m, NULL, 0);
1363 else
1364 bpf_mtap(ifp->if_bpf, m);
1365 }
1366 }
1367
1368 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1369 "IEEE 802.1Q VLAN");
1370 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1371 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1372 "for consistency");
1373
1374 VNET_DEFINE_STATIC(int, soft_pad);
1375 #define V_soft_pad VNET(soft_pad)
1376 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1377 &VNET_NAME(soft_pad), 0,
1378 "pad short frames before tagging");
1379
1380 /*
1381 * For now, make preserving PCP via an mbuf tag optional, as it increases
1382 * per-packet memory allocations and frees. In the future, it would be
1383 * preferable to reuse ether_vtag for this, or similar.
1384 */
1385 VNET_DEFINE(int, vlan_mtag_pcp) = 0;
1386 #define V_vlan_mtag_pcp VNET(vlan_mtag_pcp)
1387 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW | CTLFLAG_VNET,
1388 &VNET_NAME(vlan_mtag_pcp), 0,
1389 "Retain VLAN PCP information as packets are passed up the stack");
1390
1391 static inline bool
ether_do_pcp(struct ifnet * ifp,struct mbuf * m)1392 ether_do_pcp(struct ifnet *ifp, struct mbuf *m)
1393 {
1394 if (ifp->if_type == IFT_L2VLAN)
1395 return (false);
1396 if (ifp->if_pcp != IFNET_PCP_NONE || (m->m_flags & M_VLANTAG) != 0)
1397 return (true);
1398 if (V_vlan_mtag_pcp &&
1399 m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL) != NULL)
1400 return (true);
1401 return (false);
1402 }
1403
1404 bool
ether_8021q_frame(struct mbuf ** mp,struct ifnet * ife,struct ifnet * p,const struct ether_8021q_tag * qtag)1405 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1406 const struct ether_8021q_tag *qtag)
1407 {
1408 struct m_tag *mtag;
1409 int n;
1410 uint16_t tag;
1411 uint8_t pcp = qtag->pcp;
1412 static const char pad[8]; /* just zeros */
1413
1414 /*
1415 * Pad the frame to the minimum size allowed if told to.
1416 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1417 * paragraph C.4.4.3.b. It can help to work around buggy
1418 * bridges that violate paragraph C.4.4.3.a from the same
1419 * document, i.e., fail to pad short frames after untagging.
1420 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1421 * untagging it will produce a 62-byte frame, which is a runt
1422 * and requires padding. There are VLAN-enabled network
1423 * devices that just discard such runts instead or mishandle
1424 * them somehow.
1425 */
1426 if (V_soft_pad && p->if_type == IFT_ETHER) {
1427 for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1428 n > 0; n -= sizeof(pad)) {
1429 if (!m_append(*mp, min(n, sizeof(pad)), pad))
1430 break;
1431 }
1432 if (n > 0) {
1433 m_freem(*mp);
1434 *mp = NULL;
1435 if_printf(ife, "cannot pad short frame");
1436 return (false);
1437 }
1438 }
1439
1440 /*
1441 * If PCP is set in mbuf, use it
1442 */
1443 if ((*mp)->m_flags & M_VLANTAG) {
1444 pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1445 }
1446
1447 /*
1448 * If underlying interface can do VLAN tag insertion itself,
1449 * just pass the packet along. However, we need some way to
1450 * tell the interface where the packet came from so that it
1451 * knows how to find the VLAN tag to use, so we attach a
1452 * packet tag that holds it.
1453 */
1454 if (V_vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1455 MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1456 tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1457 else
1458 tag = EVL_MAKETAG(qtag->vid, pcp, 0);
1459 if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1460 (qtag->proto == ETHERTYPE_VLAN)) {
1461 (*mp)->m_pkthdr.ether_vtag = tag;
1462 (*mp)->m_flags |= M_VLANTAG;
1463 } else {
1464 *mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1465 if (*mp == NULL) {
1466 if_printf(ife, "unable to prepend 802.1Q header");
1467 return (false);
1468 }
1469 (*mp)->m_flags &= ~M_VLANTAG;
1470 }
1471 return (true);
1472 }
1473
1474 /*
1475 * Allocate an address from the FreeBSD Foundation OUI. This uses a
1476 * cryptographic hash function on the containing jail's name, UUID and the
1477 * interface name to attempt to provide a unique but stable address.
1478 * Pseudo-interfaces which require a MAC address should use this function to
1479 * allocate non-locally-administered addresses.
1480 */
1481 void
ether_gen_addr_byname(const char * nameunit,struct ether_addr * hwaddr)1482 ether_gen_addr_byname(const char *nameunit, struct ether_addr *hwaddr)
1483 {
1484 SHA1_CTX ctx;
1485 char *buf;
1486 char uuid[HOSTUUIDLEN + 1];
1487 uint64_t addr;
1488 int i, sz;
1489 char digest[SHA1_RESULTLEN];
1490 char jailname[MAXHOSTNAMELEN];
1491
1492 getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1493 if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1494 /* Fall back to a random mac address. */
1495 goto rando;
1496 }
1497
1498 /* If each (vnet) jail would also have a unique hostuuid this would not
1499 * be necessary. */
1500 getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1501 sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, nameunit,
1502 jailname);
1503 if (sz < 0) {
1504 /* Fall back to a random mac address. */
1505 goto rando;
1506 }
1507
1508 SHA1Init(&ctx);
1509 SHA1Update(&ctx, buf, sz);
1510 SHA1Final(digest, &ctx);
1511 free(buf, M_TEMP);
1512
1513 addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1514 OUI_FREEBSD_GENERATED_MASK;
1515 addr = OUI_FREEBSD(addr);
1516 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1517 hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1518 0xFF;
1519 }
1520
1521 return;
1522 rando:
1523 arc4rand(hwaddr, sizeof(*hwaddr), 0);
1524 /* Unicast */
1525 hwaddr->octet[0] &= 0xFE;
1526 /* Locally administered. */
1527 hwaddr->octet[0] |= 0x02;
1528 }
1529
1530 void
ether_gen_addr(struct ifnet * ifp,struct ether_addr * hwaddr)1531 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1532 {
1533 ether_gen_addr_byname(if_name(ifp), hwaddr);
1534 }
1535
1536 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1537 MODULE_VERSION(ether, 1);
1538