1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008 The FreeBSD Foundation
5 * Copyright (c) 2009-2021 Bjoern A. Zeeb <bz@FreeBSD.org>
6 *
7 * This software was developed by CK Software GmbH under sponsorship
8 * from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * A pair of virtual back-to-back connected ethernet like interfaces
34 * (``two interfaces with a virtual cross-over cable'').
35 *
36 * This is mostly intended to be used to provide connectivity between
37 * different virtual network stack instances.
38 */
39
40 #include <sys/cdefs.h>
41 #include "opt_rss.h"
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44
45 #include <sys/param.h>
46 #include <sys/bus.h>
47 #include <sys/hash.h>
48 #include <sys/interrupt.h>
49 #include <sys/jail.h>
50 #include <sys/kernel.h>
51 #include <sys/libkern.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/module.h>
55 #include <sys/proc.h>
56 #include <sys/queue.h>
57 #include <sys/sched.h>
58 #include <sys/smp.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/taskqueue.h>
62
63 #include <net/bpf.h>
64 #include <net/ethernet.h>
65 #include <net/if.h>
66 #include <net/if_var.h>
67 #include <net/if_clone.h>
68 #include <net/if_media.h>
69 #include <net/if_var.h>
70 #include <net/if_private.h>
71 #include <net/if_types.h>
72 #include <net/netisr.h>
73 #ifdef RSS
74 #include <net/rss_config.h>
75 #ifdef INET
76 #include <netinet/in_rss.h>
77 #endif
78 #ifdef INET6
79 #include <netinet6/in6_rss.h>
80 #endif
81 #endif
82 #include <net/vnet.h>
83
84 static const char epairname[] = "epair";
85 #define RXRSIZE 4096 /* Probably overkill by 4-8x. */
86
87 static MALLOC_DEFINE(M_EPAIR, epairname,
88 "Pair of virtual cross-over connected Ethernet-like interfaces");
89
90 VNET_DEFINE_STATIC(struct if_clone *, epair_cloner);
91 #define V_epair_cloner VNET(epair_cloner)
92
93 static unsigned int next_index = 0;
94 #define EPAIR_LOCK_INIT() mtx_init(&epair_n_index_mtx, "epairidx", \
95 NULL, MTX_DEF)
96 #define EPAIR_LOCK_DESTROY() mtx_destroy(&epair_n_index_mtx)
97 #define EPAIR_LOCK() mtx_lock(&epair_n_index_mtx)
98 #define EPAIR_UNLOCK() mtx_unlock(&epair_n_index_mtx)
99
100 struct epair_softc;
101 struct epair_queue {
102 struct mtx mtx;
103 struct mbufq q;
104 int id;
105 enum {
106 EPAIR_QUEUE_IDLE,
107 EPAIR_QUEUE_WAKING,
108 EPAIR_QUEUE_RUNNING,
109 } state;
110 struct task tx_task;
111 struct epair_softc *sc;
112 };
113
114 static struct mtx epair_n_index_mtx;
115 struct epair_softc {
116 struct ifnet *ifp; /* This ifp. */
117 struct ifnet *oifp; /* other ifp of pair. */
118 int num_queues;
119 struct epair_queue *queues;
120 struct ifmedia media; /* Media config (fake). */
121 STAILQ_ENTRY(epair_softc) entry;
122 };
123
124 struct epair_tasks_t {
125 int tasks;
126 struct taskqueue *tq[MAXCPU];
127 };
128
129 static struct epair_tasks_t epair_tasks;
130
131 static void
epair_clear_mbuf(struct mbuf * m)132 epair_clear_mbuf(struct mbuf *m)
133 {
134 M_ASSERTPKTHDR(m);
135
136 /* Remove any CSUM_SND_TAG as ether_input will barf. */
137 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
138 m_snd_tag_rele(m->m_pkthdr.snd_tag);
139 m->m_pkthdr.snd_tag = NULL;
140 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
141 }
142
143 /* Clear vlan information. */
144 m->m_flags &= ~M_VLANTAG;
145 m->m_pkthdr.ether_vtag = 0;
146
147 m_tag_delete_nonpersistent(m);
148 }
149
150 static void
epair_tx_start_deferred(void * arg,int pending)151 epair_tx_start_deferred(void *arg, int pending)
152 {
153 struct epair_queue *q = (struct epair_queue *)arg;
154 if_t ifp;
155 struct mbuf *m, *n;
156 bool resched;
157
158 ifp = q->sc->ifp;
159
160 if_ref(ifp);
161 CURVNET_SET(ifp->if_vnet);
162
163 mtx_lock(&q->mtx);
164 m = mbufq_flush(&q->q);
165 q->state = EPAIR_QUEUE_RUNNING;
166 mtx_unlock(&q->mtx);
167
168 while (m != NULL) {
169 n = STAILQ_NEXT(m, m_stailqpkt);
170 m->m_nextpkt = NULL;
171 if_input(ifp, m);
172 m = n;
173 }
174
175 /*
176 * Avoid flushing the queue more than once per task. We can otherwise
177 * end up starving ourselves in a multi-epair routing configuration.
178 */
179 mtx_lock(&q->mtx);
180 if (!mbufq_empty(&q->q)) {
181 resched = true;
182 q->state = EPAIR_QUEUE_WAKING;
183 } else {
184 resched = false;
185 q->state = EPAIR_QUEUE_IDLE;
186 }
187 mtx_unlock(&q->mtx);
188
189 if (resched)
190 taskqueue_enqueue(epair_tasks.tq[q->id], &q->tx_task);
191
192 CURVNET_RESTORE();
193 if_rele(ifp);
194 }
195
196 static struct epair_queue *
epair_select_queue(struct epair_softc * sc,struct mbuf * m)197 epair_select_queue(struct epair_softc *sc, struct mbuf *m)
198 {
199 uint32_t bucket;
200 #ifdef RSS
201 struct ether_header *eh;
202 int ret;
203
204 ret = rss_m2bucket(m, &bucket);
205 if (ret) {
206 /* Actually hash the packet. */
207 eh = mtod(m, struct ether_header *);
208
209 switch (ntohs(eh->ether_type)) {
210 #ifdef INET
211 case ETHERTYPE_IP:
212 rss_soft_m2cpuid_v4(m, 0, &bucket);
213 break;
214 #endif
215 #ifdef INET6
216 case ETHERTYPE_IPV6:
217 rss_soft_m2cpuid_v6(m, 0, &bucket);
218 break;
219 #endif
220 default:
221 bucket = 0;
222 break;
223 }
224 }
225 bucket %= sc->num_queues;
226 #else
227 bucket = 0;
228 #endif
229 return (&sc->queues[bucket]);
230 }
231
232 static void
epair_prepare_mbuf(struct mbuf * m,struct ifnet * src_ifp)233 epair_prepare_mbuf(struct mbuf *m, struct ifnet *src_ifp)
234 {
235 M_ASSERTPKTHDR(m);
236 epair_clear_mbuf(m);
237 if_setrcvif(m, src_ifp);
238 M_SETFIB(m, src_ifp->if_fib);
239
240 MPASS(m->m_nextpkt == NULL);
241 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
242 }
243
244 static void
epair_menq(struct mbuf * m,struct epair_softc * osc)245 epair_menq(struct mbuf *m, struct epair_softc *osc)
246 {
247 struct epair_queue *q;
248 struct ifnet *ifp, *oifp;
249 int error, len;
250 bool mcast;
251
252 /*
253 * I know this looks weird. We pass the "other sc" as we need that one
254 * and can get both ifps from it as well.
255 */
256 oifp = osc->ifp;
257 ifp = osc->oifp;
258
259 epair_prepare_mbuf(m, oifp);
260
261 /* Save values as once the mbuf is queued, it's not ours anymore. */
262 len = m->m_pkthdr.len;
263 mcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0;
264
265 q = epair_select_queue(osc, m);
266
267 mtx_lock(&q->mtx);
268 if (q->state == EPAIR_QUEUE_IDLE) {
269 q->state = EPAIR_QUEUE_WAKING;
270 taskqueue_enqueue(epair_tasks.tq[q->id], &q->tx_task);
271 }
272 error = mbufq_enqueue(&q->q, m);
273 mtx_unlock(&q->mtx);
274
275 if (error != 0) {
276 m_freem(m);
277 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
278 } else {
279 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
280 if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
281 if (mcast)
282 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
283 if_inc_counter(oifp, IFCOUNTER_IPACKETS, 1);
284 }
285 }
286
287 static void
epair_start(struct ifnet * ifp)288 epair_start(struct ifnet *ifp)
289 {
290 struct mbuf *m;
291 struct epair_softc *sc;
292 struct ifnet *oifp;
293
294 /*
295 * We get packets here from ether_output via if_handoff()
296 * and need to put them into the input queue of the oifp
297 * and will put the packet into the receive-queue (rxq) of the
298 * other interface (oifp) of our pair.
299 */
300 sc = ifp->if_softc;
301 oifp = sc->oifp;
302 sc = oifp->if_softc;
303 for (;;) {
304 IFQ_DEQUEUE(&ifp->if_snd, m);
305 if (m == NULL)
306 break;
307 M_ASSERTPKTHDR(m);
308 BPF_MTAP(ifp, m);
309
310 /* In case either interface is not usable drop the packet. */
311 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
312 (ifp->if_flags & IFF_UP) == 0 ||
313 (oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
314 (oifp->if_flags & IFF_UP) == 0) {
315 m_freem(m);
316 continue;
317 }
318
319 epair_menq(m, sc);
320 }
321 }
322
323 static int
epair_transmit(struct ifnet * ifp,struct mbuf * m)324 epair_transmit(struct ifnet *ifp, struct mbuf *m)
325 {
326 struct epair_softc *sc;
327 struct ifnet *oifp;
328 #ifdef ALTQ
329 int len;
330 bool mcast;
331 #endif
332
333 if (m == NULL)
334 return (0);
335 M_ASSERTPKTHDR(m);
336
337 /*
338 * We could just transmit this, but it makes testing easier if we're a
339 * little bit more like real hardware.
340 * Allow just that little bit extra for ethernet (and vlan) headers.
341 */
342 if (m->m_pkthdr.len > (ifp->if_mtu + sizeof(struct ether_vlan_header))) {
343 m_freem(m);
344 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
345 return (E2BIG);
346 }
347
348 /*
349 * We are not going to use the interface en/dequeue mechanism
350 * on the TX side. We are called from ether_output_frame()
351 * and will put the packet into the receive-queue (rxq) of the
352 * other interface (oifp) of our pair.
353 */
354 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
355 m_freem(m);
356 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
357 return (ENXIO);
358 }
359 if ((ifp->if_flags & IFF_UP) == 0) {
360 m_freem(m);
361 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
362 return (ENETDOWN);
363 }
364
365 BPF_MTAP(ifp, m);
366
367 /*
368 * In case the outgoing interface is not usable,
369 * drop the packet.
370 */
371 sc = ifp->if_softc;
372 oifp = sc->oifp;
373 if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
374 (oifp->if_flags & IFF_UP) == 0) {
375 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
376 m_freem(m);
377 return (0);
378 }
379
380 #ifdef ALTQ
381 len = m->m_pkthdr.len;
382 mcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0;
383 int error = 0;
384
385 /* Support ALTQ via the classic if_start() path. */
386 IF_LOCK(&ifp->if_snd);
387 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
388 ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
389 if (error)
390 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
391 IF_UNLOCK(&ifp->if_snd);
392 if (!error) {
393 if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
394 if (mcast)
395 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
396 epair_start(ifp);
397 }
398 return (error);
399 }
400 IF_UNLOCK(&ifp->if_snd);
401 #endif
402
403 epair_menq(m, oifp->if_softc);
404 return (0);
405 }
406
407 static void
epair_qflush(struct ifnet * ifp __unused)408 epair_qflush(struct ifnet *ifp __unused)
409 {
410 }
411
412 static int
epair_media_change(struct ifnet * ifp __unused)413 epair_media_change(struct ifnet *ifp __unused)
414 {
415
416 /* Do nothing. */
417 return (0);
418 }
419
420 static void
epair_media_status(struct ifnet * ifp __unused,struct ifmediareq * imr)421 epair_media_status(struct ifnet *ifp __unused, struct ifmediareq *imr)
422 {
423
424 imr->ifm_status = IFM_AVALID | IFM_ACTIVE;
425 imr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
426 }
427
428 static int
epair_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)429 epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
430 {
431 struct epair_softc *sc;
432 struct ifreq *ifr;
433 int error;
434
435 ifr = (struct ifreq *)data;
436 switch (cmd) {
437 case SIOCSIFFLAGS:
438 case SIOCADDMULTI:
439 case SIOCDELMULTI:
440 error = 0;
441 break;
442
443 case SIOCSIFMEDIA:
444 case SIOCGIFMEDIA:
445 sc = ifp->if_softc;
446 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
447 break;
448
449 case SIOCSIFMTU:
450 /* We basically allow all kinds of MTUs. */
451 ifp->if_mtu = ifr->ifr_mtu;
452 error = 0;
453 break;
454
455 default:
456 /* Let the common ethernet handler process this. */
457 error = ether_ioctl(ifp, cmd, data);
458 break;
459 }
460
461 return (error);
462 }
463
464 static void
epair_init(void * dummy __unused)465 epair_init(void *dummy __unused)
466 {
467 }
468
469 /*
470 * Interface cloning functions.
471 * We use our private ones so that we can create/destroy our secondary
472 * device along with the primary one.
473 */
474 static int
epair_clone_match(struct if_clone * ifc,const char * name)475 epair_clone_match(struct if_clone *ifc, const char *name)
476 {
477 const char *cp;
478
479 /*
480 * Our base name is epair.
481 * Our interfaces will be named epair<n>[ab].
482 * So accept anything of the following list:
483 * - epair
484 * - epair<n>
485 * but not the epair<n>[ab] versions.
486 */
487 if (strncmp(epairname, name, sizeof(epairname)-1) != 0)
488 return (0);
489
490 for (cp = name + sizeof(epairname) - 1; *cp != '\0'; cp++) {
491 if (*cp < '0' || *cp > '9')
492 return (0);
493 }
494
495 return (1);
496 }
497
498 static void
epair_clone_add(struct if_clone * ifc,struct epair_softc * scb)499 epair_clone_add(struct if_clone *ifc, struct epair_softc *scb)
500 {
501 struct ifnet *ifp;
502 uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
503
504 ifp = scb->ifp;
505 /* Copy epairNa etheraddr and change the last byte. */
506 memcpy(eaddr, scb->oifp->if_hw_addr, ETHER_ADDR_LEN);
507 eaddr[5] = 0x0b;
508 ether_ifattach(ifp, eaddr);
509
510 if_clone_addif(ifc, ifp);
511 }
512
513 static struct epair_softc *
epair_alloc_sc(struct if_clone * ifc)514 epair_alloc_sc(struct if_clone *ifc)
515 {
516 struct epair_softc *sc;
517
518 struct ifnet *ifp = if_alloc(IFT_ETHER);
519 sc = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO);
520 sc->ifp = ifp;
521 sc->num_queues = epair_tasks.tasks;
522 sc->queues = mallocarray(sc->num_queues, sizeof(struct epair_queue),
523 M_EPAIR, M_WAITOK);
524 for (int i = 0; i < sc->num_queues; i++) {
525 struct epair_queue *q = &sc->queues[i];
526 q->id = i;
527 q->state = EPAIR_QUEUE_IDLE;
528 mtx_init(&q->mtx, "epairq", NULL, MTX_DEF | MTX_NEW);
529 mbufq_init(&q->q, RXRSIZE);
530 q->sc = sc;
531 NET_TASK_INIT(&q->tx_task, 0, epair_tx_start_deferred, q);
532 }
533
534 /* Initialise pseudo media types. */
535 ifmedia_init(&sc->media, 0, epair_media_change, epair_media_status);
536 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
537 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T);
538
539 return (sc);
540 }
541
542 static void
epair_setup_ifp(struct epair_softc * sc,char * name,int unit)543 epair_setup_ifp(struct epair_softc *sc, char *name, int unit)
544 {
545 struct ifnet *ifp = sc->ifp;
546
547 ifp->if_softc = sc;
548 strlcpy(ifp->if_xname, name, IFNAMSIZ);
549 ifp->if_dname = epairname;
550 ifp->if_dunit = unit;
551 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
552 ifp->if_capabilities = IFCAP_VLAN_MTU;
553 ifp->if_capenable = IFCAP_VLAN_MTU;
554 ifp->if_transmit = epair_transmit;
555 ifp->if_qflush = epair_qflush;
556 ifp->if_start = epair_start;
557 ifp->if_ioctl = epair_ioctl;
558 ifp->if_init = epair_init;
559 if_setsendqlen(ifp, ifqmaxlen);
560 if_setsendqready(ifp);
561
562 ifp->if_baudrate = IF_Gbps(10); /* arbitrary maximum */
563 }
564
565 static void
epair_generate_mac(struct epair_softc * sc,uint8_t * eaddr)566 epair_generate_mac(struct epair_softc *sc, uint8_t *eaddr)
567 {
568 uint32_t key[3];
569 uint32_t hash;
570 uint64_t hostid;
571
572 EPAIR_LOCK();
573 #ifdef SMP
574 /* Get an approximate distribution. */
575 hash = next_index % mp_ncpus;
576 #else
577 hash = 0;
578 #endif
579 EPAIR_UNLOCK();
580
581 /*
582 * Calculate the etheraddr hashing the hostid and the
583 * interface index. The result would be hopefully unique.
584 * Note that the "a" component of an epair instance may get moved
585 * to a different VNET after creation. In that case its index
586 * will be freed and the index can get reused by new epair instance.
587 * Make sure we do not create same etheraddr again.
588 */
589 getcredhostid(curthread->td_ucred, (unsigned long *)&hostid);
590 if (hostid == 0)
591 arc4rand(&hostid, sizeof(hostid), 0);
592
593 struct ifnet *ifp = sc->ifp;
594 EPAIR_LOCK();
595 if (ifp->if_index > next_index)
596 next_index = ifp->if_index;
597 else
598 next_index++;
599
600 key[0] = (uint32_t)next_index;
601 EPAIR_UNLOCK();
602 key[1] = (uint32_t)(hostid & 0xffffffff);
603 key[2] = (uint32_t)((hostid >> 32) & 0xfffffffff);
604 hash = jenkins_hash32(key, 3, 0);
605
606 eaddr[0] = 0x02;
607 memcpy(&eaddr[1], &hash, 4);
608 eaddr[5] = 0x0a;
609 }
610
611 static void
epair_free_sc(struct epair_softc * sc)612 epair_free_sc(struct epair_softc *sc)
613 {
614
615 if_free(sc->ifp);
616 ifmedia_removeall(&sc->media);
617 for (int i = 0; i < sc->num_queues; i++) {
618 struct epair_queue *q = &sc->queues[i];
619 mtx_destroy(&q->mtx);
620 }
621 free(sc->queues, M_EPAIR);
622 free(sc, M_EPAIR);
623 }
624
625 static void
epair_set_state(struct ifnet * ifp,bool running)626 epair_set_state(struct ifnet *ifp, bool running)
627 {
628 if (running) {
629 ifp->if_drv_flags |= IFF_DRV_RUNNING;
630 if_link_state_change(ifp, LINK_STATE_UP);
631 } else {
632 if_link_state_change(ifp, LINK_STATE_DOWN);
633 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
634 }
635 }
636
637 static int
epair_handle_unit(struct if_clone * ifc,char * name,size_t len,int * punit)638 epair_handle_unit(struct if_clone *ifc, char *name, size_t len, int *punit)
639 {
640 int error = 0, unit, wildcard;
641 char *dp;
642
643 /* Try to see if a special unit was requested. */
644 error = ifc_name2unit(name, &unit);
645 if (error != 0)
646 return (error);
647 wildcard = (unit < 0);
648
649 error = ifc_alloc_unit(ifc, &unit);
650 if (error != 0)
651 return (error);
652
653 /*
654 * If no unit had been given, we need to adjust the ifName.
655 * Also make sure there is space for our extra [ab] suffix.
656 */
657 for (dp = name; *dp != '\0'; dp++);
658 if (wildcard) {
659 int slen = snprintf(dp, len - (dp - name), "%d", unit);
660 if (slen > len - (dp - name) - 1) {
661 /* ifName too long. */
662 error = ENOSPC;
663 goto done;
664 }
665 dp += slen;
666 }
667 if (len - (dp - name) - 1 < 1) {
668 /* No space left for our [ab] suffix. */
669 error = ENOSPC;
670 goto done;
671 }
672 *dp = 'b';
673 /* Must not change dp so we can replace 'a' by 'b' later. */
674 *(dp+1) = '\0';
675
676 /* Check if 'a' and 'b' interfaces already exist. */
677 if (ifunit(name) != NULL) {
678 error = EEXIST;
679 goto done;
680 }
681
682 *dp = 'a';
683 if (ifunit(name) != NULL) {
684 error = EEXIST;
685 goto done;
686 }
687 *punit = unit;
688 done:
689 if (error != 0)
690 ifc_free_unit(ifc, unit);
691
692 return (error);
693 }
694
695 static int
epair_clone_create(struct if_clone * ifc,char * name,size_t len,struct ifc_data * ifd,struct ifnet ** ifpp)696 epair_clone_create(struct if_clone *ifc, char *name, size_t len,
697 struct ifc_data *ifd, struct ifnet **ifpp)
698 {
699 struct epair_softc *sca, *scb;
700 struct ifnet *ifp;
701 char *dp;
702 int error, unit;
703 uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
704
705 error = epair_handle_unit(ifc, name, len, &unit);
706 if (error != 0)
707 return (error);
708
709 /* Allocate memory for both [ab] interfaces */
710 sca = epair_alloc_sc(ifc);
711 scb = epair_alloc_sc(ifc);
712
713 /*
714 * Cross-reference the interfaces so we will be able to free both.
715 */
716 sca->oifp = scb->ifp;
717 scb->oifp = sca->ifp;
718
719 /* Finish initialization of interface <n>a. */
720 ifp = sca->ifp;
721 epair_setup_ifp(sca, name, unit);
722 epair_generate_mac(sca, eaddr);
723
724 ether_ifattach(ifp, eaddr);
725
726 /* Swap the name and finish initialization of interface <n>b. */
727 dp = name + strlen(name) - 1;
728 *dp = 'b';
729
730 epair_setup_ifp(scb, name, unit);
731
732 ifp = scb->ifp;
733 /* We need to play some tricks here for the second interface. */
734 strlcpy(name, epairname, len);
735 /* Correctly set the name for the cloner list. */
736 strlcpy(name, scb->ifp->if_xname, len);
737
738 epair_clone_add(ifc, scb);
739
740 /*
741 * Restore name to <n>a as the ifp for this will go into the
742 * cloner list for the initial call.
743 */
744 strlcpy(name, sca->ifp->if_xname, len);
745
746 /* Tell the world, that we are ready to rock. */
747 epair_set_state(sca->ifp, true);
748 epair_set_state(scb->ifp, true);
749
750 *ifpp = sca->ifp;
751
752 return (0);
753 }
754
755 static void
epair_drain_rings(struct epair_softc * sc)756 epair_drain_rings(struct epair_softc *sc)
757 {
758 for (int i = 0; i < sc->num_queues; i++) {
759 struct epair_queue *q;
760 struct mbuf *m, *n;
761
762 q = &sc->queues[i];
763 mtx_lock(&q->mtx);
764 m = mbufq_flush(&q->q);
765 mtx_unlock(&q->mtx);
766
767 for (; m != NULL; m = n) {
768 n = m->m_nextpkt;
769 m_freem(m);
770 }
771 }
772 }
773
774 static int
epair_clone_destroy(struct if_clone * ifc,struct ifnet * ifp,uint32_t flags)775 epair_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
776 {
777 struct ifnet *oifp;
778 struct epair_softc *sca, *scb;
779 int unit, error;
780
781 /*
782 * In case we called into if_clone_destroyif() ourselves
783 * again to remove the second interface, the softc will be
784 * NULL. In that case so not do anything but return success.
785 */
786 if (ifp->if_softc == NULL)
787 return (0);
788
789 unit = ifp->if_dunit;
790 sca = ifp->if_softc;
791 oifp = sca->oifp;
792 scb = oifp->if_softc;
793
794 /* Frist get the interfaces down and detached. */
795 epair_set_state(ifp, false);
796 epair_set_state(oifp, false);
797
798 ether_ifdetach(ifp);
799 ether_ifdetach(oifp);
800
801 /* Third free any queued packets and all the resources. */
802 CURVNET_SET_QUIET(oifp->if_vnet);
803 epair_drain_rings(scb);
804 oifp->if_softc = NULL;
805 error = if_clone_destroyif(ifc, oifp);
806 if (error)
807 panic("%s: if_clone_destroyif() for our 2nd iface failed: %d",
808 __func__, error);
809 epair_free_sc(scb);
810 CURVNET_RESTORE();
811
812 epair_drain_rings(sca);
813 epair_free_sc(sca);
814
815 /* Last free the cloner unit. */
816 ifc_free_unit(ifc, unit);
817
818 return (0);
819 }
820
821 static void
vnet_epair_init(const void * unused __unused)822 vnet_epair_init(const void *unused __unused)
823 {
824 struct if_clone_addreq req = {
825 .match_f = epair_clone_match,
826 .create_f = epair_clone_create,
827 .destroy_f = epair_clone_destroy,
828 };
829 V_epair_cloner = ifc_attach_cloner(epairname, &req);
830 }
831 VNET_SYSINIT(vnet_epair_init, SI_SUB_PSEUDO, SI_ORDER_ANY,
832 vnet_epair_init, NULL);
833
834 static void
vnet_epair_uninit(const void * unused __unused)835 vnet_epair_uninit(const void *unused __unused)
836 {
837
838 ifc_detach_cloner(V_epair_cloner);
839 }
840 VNET_SYSUNINIT(vnet_epair_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
841 vnet_epair_uninit, NULL);
842
843 static int
epair_mod_init(void)844 epair_mod_init(void)
845 {
846 char name[32];
847 epair_tasks.tasks = 0;
848
849 #ifdef RSS
850 int cpu;
851
852 CPU_FOREACH(cpu) {
853 cpuset_t cpu_mask;
854
855 /* Pin to this CPU so we get appropriate NUMA allocations. */
856 thread_lock(curthread);
857 sched_bind(curthread, cpu);
858 thread_unlock(curthread);
859
860 snprintf(name, sizeof(name), "epair_task_%d", cpu);
861
862 epair_tasks.tq[cpu] = taskqueue_create(name, M_WAITOK,
863 taskqueue_thread_enqueue,
864 &epair_tasks.tq[cpu]);
865 CPU_SETOF(cpu, &cpu_mask);
866 taskqueue_start_threads_cpuset(&epair_tasks.tq[cpu], 1, PI_NET,
867 &cpu_mask, "%s", name);
868
869 epair_tasks.tasks++;
870 }
871 thread_lock(curthread);
872 sched_unbind(curthread);
873 thread_unlock(curthread);
874 #else
875 snprintf(name, sizeof(name), "epair_task");
876
877 epair_tasks.tq[0] = taskqueue_create(name, M_WAITOK,
878 taskqueue_thread_enqueue,
879 &epair_tasks.tq[0]);
880 taskqueue_start_threads(&epair_tasks.tq[0], 1, PI_NET, "%s", name);
881
882 epair_tasks.tasks = 1;
883 #endif
884
885 return (0);
886 }
887
888 static void
epair_mod_cleanup(void)889 epair_mod_cleanup(void)
890 {
891
892 for (int i = 0; i < epair_tasks.tasks; i++) {
893 taskqueue_drain_all(epair_tasks.tq[i]);
894 taskqueue_free(epair_tasks.tq[i]);
895 }
896 }
897
898 static int
epair_modevent(module_t mod,int type,void * data)899 epair_modevent(module_t mod, int type, void *data)
900 {
901 int ret;
902
903 switch (type) {
904 case MOD_LOAD:
905 EPAIR_LOCK_INIT();
906 ret = epair_mod_init();
907 if (ret != 0)
908 return (ret);
909 if (bootverbose)
910 printf("%s: %s initialized.\n", __func__, epairname);
911 break;
912 case MOD_UNLOAD:
913 epair_mod_cleanup();
914 EPAIR_LOCK_DESTROY();
915 if (bootverbose)
916 printf("%s: %s unloaded.\n", __func__, epairname);
917 break;
918 default:
919 return (EOPNOTSUPP);
920 }
921 return (0);
922 }
923
924 static moduledata_t epair_mod = {
925 "if_epair",
926 epair_modevent,
927 0
928 };
929
930 DECLARE_MODULE(if_epair, epair_mod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE);
931 MODULE_VERSION(if_epair, 3);
932