1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 The FreeBSD Foundation 5 * Copyright (c) 2009-2021 Bjoern A. Zeeb <bz@FreeBSD.org> 6 * 7 * This software was developed by CK Software GmbH under sponsorship 8 * from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * A pair of virtual back-to-back connected ethernet like interfaces 34 * (``two interfaces with a virtual cross-over cable''). 35 * 36 * This is mostly intended to be used to provide connectivity between 37 * different virtual network stack instances. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_rss.h" 44 #include "opt_inet.h" 45 #include "opt_inet6.h" 46 47 #include <sys/param.h> 48 #include <sys/bus.h> 49 #include <sys/hash.h> 50 #include <sys/interrupt.h> 51 #include <sys/jail.h> 52 #include <sys/kernel.h> 53 #include <sys/libkern.h> 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/module.h> 57 #include <sys/proc.h> 58 #include <sys/queue.h> 59 #include <sys/sched.h> 60 #include <sys/smp.h> 61 #include <sys/socket.h> 62 #include <sys/sockio.h> 63 #include <sys/taskqueue.h> 64 65 #include <net/bpf.h> 66 #include <net/ethernet.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/if_clone.h> 70 #include <net/if_media.h> 71 #include <net/if_var.h> 72 #include <net/if_private.h> 73 #include <net/if_types.h> 74 #include <net/netisr.h> 75 #ifdef RSS 76 #include <net/rss_config.h> 77 #ifdef INET 78 #include <netinet/in_rss.h> 79 #endif 80 #ifdef INET6 81 #include <netinet6/in6_rss.h> 82 #endif 83 #endif 84 #include <net/vnet.h> 85 86 static const char epairname[] = "epair"; 87 #define RXRSIZE 4096 /* Probably overkill by 4-8x. */ 88 89 static MALLOC_DEFINE(M_EPAIR, epairname, 90 "Pair of virtual cross-over connected Ethernet-like interfaces"); 91 92 VNET_DEFINE_STATIC(struct if_clone *, epair_cloner); 93 #define V_epair_cloner VNET(epair_cloner) 94 95 static unsigned int next_index = 0; 96 #define EPAIR_LOCK_INIT() mtx_init(&epair_n_index_mtx, "epairidx", \ 97 NULL, MTX_DEF) 98 #define EPAIR_LOCK_DESTROY() mtx_destroy(&epair_n_index_mtx) 99 #define EPAIR_LOCK() mtx_lock(&epair_n_index_mtx) 100 #define EPAIR_UNLOCK() mtx_unlock(&epair_n_index_mtx) 101 102 struct epair_softc; 103 struct epair_queue { 104 struct mtx mtx; 105 struct mbufq q; 106 int id; 107 enum { 108 EPAIR_QUEUE_IDLE, 109 EPAIR_QUEUE_WAKING, 110 EPAIR_QUEUE_RUNNING, 111 } state; 112 struct task tx_task; 113 struct epair_softc *sc; 114 }; 115 116 static struct mtx epair_n_index_mtx; 117 struct epair_softc { 118 struct ifnet *ifp; /* This ifp. */ 119 struct ifnet *oifp; /* other ifp of pair. */ 120 int num_queues; 121 struct epair_queue *queues; 122 struct ifmedia media; /* Media config (fake). */ 123 STAILQ_ENTRY(epair_softc) entry; 124 }; 125 126 struct epair_tasks_t { 127 int tasks; 128 struct taskqueue *tq[MAXCPU]; 129 }; 130 131 static struct epair_tasks_t epair_tasks; 132 133 static void 134 epair_clear_mbuf(struct mbuf *m) 135 { 136 /* Remove any CSUM_SND_TAG as ether_input will barf. */ 137 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 138 m_snd_tag_rele(m->m_pkthdr.snd_tag); 139 m->m_pkthdr.snd_tag = NULL; 140 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 141 } 142 143 m_tag_delete_nonpersistent(m); 144 } 145 146 static void 147 epair_tx_start_deferred(void *arg, int pending) 148 { 149 struct epair_queue *q = (struct epair_queue *)arg; 150 if_t ifp; 151 struct mbuf *m, *n; 152 bool resched; 153 154 ifp = q->sc->ifp; 155 156 if_ref(ifp); 157 CURVNET_SET(ifp->if_vnet); 158 159 mtx_lock(&q->mtx); 160 m = mbufq_flush(&q->q); 161 q->state = EPAIR_QUEUE_RUNNING; 162 mtx_unlock(&q->mtx); 163 164 while (m != NULL) { 165 n = STAILQ_NEXT(m, m_stailqpkt); 166 m->m_nextpkt = NULL; 167 if_input(ifp, m); 168 m = n; 169 } 170 171 /* 172 * Avoid flushing the queue more than once per task. We can otherwise 173 * end up starving ourselves in a multi-epair routing configuration. 174 */ 175 mtx_lock(&q->mtx); 176 if (mbufq_len(&q->q) > 0) { 177 resched = true; 178 q->state = EPAIR_QUEUE_WAKING; 179 } else { 180 resched = false; 181 q->state = EPAIR_QUEUE_IDLE; 182 } 183 mtx_unlock(&q->mtx); 184 185 if (resched) 186 taskqueue_enqueue(epair_tasks.tq[q->id], &q->tx_task); 187 188 CURVNET_RESTORE(); 189 if_rele(ifp); 190 } 191 192 static struct epair_queue * 193 epair_select_queue(struct epair_softc *sc, struct mbuf *m) 194 { 195 uint32_t bucket; 196 #ifdef RSS 197 struct ether_header *eh; 198 int ret; 199 200 ret = rss_m2bucket(m, &bucket); 201 if (ret) { 202 /* Actually hash the packet. */ 203 eh = mtod(m, struct ether_header *); 204 205 switch (ntohs(eh->ether_type)) { 206 #ifdef INET 207 case ETHERTYPE_IP: 208 rss_soft_m2cpuid_v4(m, 0, &bucket); 209 break; 210 #endif 211 #ifdef INET6 212 case ETHERTYPE_IPV6: 213 rss_soft_m2cpuid_v6(m, 0, &bucket); 214 break; 215 #endif 216 default: 217 bucket = 0; 218 break; 219 } 220 } 221 bucket %= sc->num_queues; 222 #else 223 bucket = 0; 224 #endif 225 return (&sc->queues[bucket]); 226 } 227 228 static void 229 epair_prepare_mbuf(struct mbuf *m, struct ifnet *src_ifp) 230 { 231 M_ASSERTPKTHDR(m); 232 epair_clear_mbuf(m); 233 if_setrcvif(m, src_ifp); 234 M_SETFIB(m, src_ifp->if_fib); 235 236 MPASS(m->m_nextpkt == NULL); 237 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); 238 } 239 240 static void 241 epair_menq(struct mbuf *m, struct epair_softc *osc) 242 { 243 struct epair_queue *q; 244 struct ifnet *ifp, *oifp; 245 int error, len; 246 bool mcast; 247 248 /* 249 * I know this looks weird. We pass the "other sc" as we need that one 250 * and can get both ifps from it as well. 251 */ 252 oifp = osc->ifp; 253 ifp = osc->oifp; 254 255 epair_prepare_mbuf(m, oifp); 256 257 /* Save values as once the mbuf is queued, it's not ours anymore. */ 258 len = m->m_pkthdr.len; 259 mcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0; 260 261 q = epair_select_queue(osc, m); 262 263 mtx_lock(&q->mtx); 264 if (q->state == EPAIR_QUEUE_IDLE) { 265 q->state = EPAIR_QUEUE_WAKING; 266 taskqueue_enqueue(epair_tasks.tq[q->id], &q->tx_task); 267 } 268 error = mbufq_enqueue(&q->q, m); 269 mtx_unlock(&q->mtx); 270 271 if (error != 0) { 272 m_freem(m); 273 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); 274 } else { 275 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 276 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 277 if (mcast) 278 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 279 if_inc_counter(oifp, IFCOUNTER_IPACKETS, 1); 280 } 281 } 282 283 static void 284 epair_start(struct ifnet *ifp) 285 { 286 struct mbuf *m; 287 struct epair_softc *sc; 288 struct ifnet *oifp; 289 290 /* 291 * We get packets here from ether_output via if_handoff() 292 * and need to put them into the input queue of the oifp 293 * and will put the packet into the receive-queue (rxq) of the 294 * other interface (oifp) of our pair. 295 */ 296 sc = ifp->if_softc; 297 oifp = sc->oifp; 298 sc = oifp->if_softc; 299 for (;;) { 300 IFQ_DEQUEUE(&ifp->if_snd, m); 301 if (m == NULL) 302 break; 303 M_ASSERTPKTHDR(m); 304 BPF_MTAP(ifp, m); 305 306 /* In case either interface is not usable drop the packet. */ 307 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 308 (ifp->if_flags & IFF_UP) == 0 || 309 (oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 310 (oifp->if_flags & IFF_UP) == 0) { 311 m_freem(m); 312 continue; 313 } 314 315 epair_menq(m, sc); 316 } 317 } 318 319 static int 320 epair_transmit(struct ifnet *ifp, struct mbuf *m) 321 { 322 struct epair_softc *sc; 323 struct ifnet *oifp; 324 #ifdef ALTQ 325 int len; 326 bool mcast; 327 #endif 328 329 if (m == NULL) 330 return (0); 331 M_ASSERTPKTHDR(m); 332 333 /* 334 * We are not going to use the interface en/dequeue mechanism 335 * on the TX side. We are called from ether_output_frame() 336 * and will put the packet into the receive-queue (rxq) of the 337 * other interface (oifp) of our pair. 338 */ 339 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 340 m_freem(m); 341 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 342 return (ENXIO); 343 } 344 if ((ifp->if_flags & IFF_UP) == 0) { 345 m_freem(m); 346 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 347 return (ENETDOWN); 348 } 349 350 BPF_MTAP(ifp, m); 351 352 /* 353 * In case the outgoing interface is not usable, 354 * drop the packet. 355 */ 356 sc = ifp->if_softc; 357 oifp = sc->oifp; 358 if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 359 (oifp->if_flags & IFF_UP) == 0) { 360 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 361 m_freem(m); 362 return (0); 363 } 364 365 #ifdef ALTQ 366 len = m->m_pkthdr.len; 367 mcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0; 368 int error = 0; 369 370 /* Support ALTQ via the classic if_start() path. */ 371 IF_LOCK(&ifp->if_snd); 372 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 373 ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 374 if (error) 375 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); 376 IF_UNLOCK(&ifp->if_snd); 377 if (!error) { 378 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 379 if (mcast) 380 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 381 epair_start(ifp); 382 } 383 return (error); 384 } 385 IF_UNLOCK(&ifp->if_snd); 386 #endif 387 388 epair_menq(m, oifp->if_softc); 389 return (0); 390 } 391 392 static void 393 epair_qflush(struct ifnet *ifp __unused) 394 { 395 } 396 397 static int 398 epair_media_change(struct ifnet *ifp __unused) 399 { 400 401 /* Do nothing. */ 402 return (0); 403 } 404 405 static void 406 epair_media_status(struct ifnet *ifp __unused, struct ifmediareq *imr) 407 { 408 409 imr->ifm_status = IFM_AVALID | IFM_ACTIVE; 410 imr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 411 } 412 413 static int 414 epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 415 { 416 struct epair_softc *sc; 417 struct ifreq *ifr; 418 int error; 419 420 ifr = (struct ifreq *)data; 421 switch (cmd) { 422 case SIOCSIFFLAGS: 423 case SIOCADDMULTI: 424 case SIOCDELMULTI: 425 error = 0; 426 break; 427 428 case SIOCSIFMEDIA: 429 case SIOCGIFMEDIA: 430 sc = ifp->if_softc; 431 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 432 break; 433 434 case SIOCSIFMTU: 435 /* We basically allow all kinds of MTUs. */ 436 ifp->if_mtu = ifr->ifr_mtu; 437 error = 0; 438 break; 439 440 default: 441 /* Let the common ethernet handler process this. */ 442 error = ether_ioctl(ifp, cmd, data); 443 break; 444 } 445 446 return (error); 447 } 448 449 static void 450 epair_init(void *dummy __unused) 451 { 452 } 453 454 /* 455 * Interface cloning functions. 456 * We use our private ones so that we can create/destroy our secondary 457 * device along with the primary one. 458 */ 459 static int 460 epair_clone_match(struct if_clone *ifc, const char *name) 461 { 462 const char *cp; 463 464 /* 465 * Our base name is epair. 466 * Our interfaces will be named epair<n>[ab]. 467 * So accept anything of the following list: 468 * - epair 469 * - epair<n> 470 * but not the epair<n>[ab] versions. 471 */ 472 if (strncmp(epairname, name, sizeof(epairname)-1) != 0) 473 return (0); 474 475 for (cp = name + sizeof(epairname) - 1; *cp != '\0'; cp++) { 476 if (*cp < '0' || *cp > '9') 477 return (0); 478 } 479 480 return (1); 481 } 482 483 static void 484 epair_clone_add(struct if_clone *ifc, struct epair_softc *scb) 485 { 486 struct ifnet *ifp; 487 uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 488 489 ifp = scb->ifp; 490 /* Copy epairNa etheraddr and change the last byte. */ 491 memcpy(eaddr, scb->oifp->if_hw_addr, ETHER_ADDR_LEN); 492 eaddr[5] = 0x0b; 493 ether_ifattach(ifp, eaddr); 494 495 if_clone_addif(ifc, ifp); 496 } 497 498 static struct epair_softc * 499 epair_alloc_sc(struct if_clone *ifc) 500 { 501 struct epair_softc *sc; 502 503 struct ifnet *ifp = if_alloc(IFT_ETHER); 504 if (ifp == NULL) 505 return (NULL); 506 507 sc = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO); 508 sc->ifp = ifp; 509 sc->num_queues = epair_tasks.tasks; 510 sc->queues = mallocarray(sc->num_queues, sizeof(struct epair_queue), 511 M_EPAIR, M_WAITOK); 512 for (int i = 0; i < sc->num_queues; i++) { 513 struct epair_queue *q = &sc->queues[i]; 514 q->id = i; 515 q->state = EPAIR_QUEUE_IDLE; 516 mtx_init(&q->mtx, "epairq", NULL, MTX_DEF | MTX_NEW); 517 mbufq_init(&q->q, RXRSIZE); 518 q->sc = sc; 519 NET_TASK_INIT(&q->tx_task, 0, epair_tx_start_deferred, q); 520 } 521 522 /* Initialise pseudo media types. */ 523 ifmedia_init(&sc->media, 0, epair_media_change, epair_media_status); 524 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); 525 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T); 526 527 return (sc); 528 } 529 530 static void 531 epair_setup_ifp(struct epair_softc *sc, char *name, int unit) 532 { 533 struct ifnet *ifp = sc->ifp; 534 535 ifp->if_softc = sc; 536 strlcpy(ifp->if_xname, name, IFNAMSIZ); 537 ifp->if_dname = epairname; 538 ifp->if_dunit = unit; 539 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 540 ifp->if_flags |= IFF_KNOWSEPOCH; 541 ifp->if_capabilities = IFCAP_VLAN_MTU; 542 ifp->if_capenable = IFCAP_VLAN_MTU; 543 ifp->if_transmit = epair_transmit; 544 ifp->if_qflush = epair_qflush; 545 ifp->if_start = epair_start; 546 ifp->if_ioctl = epair_ioctl; 547 ifp->if_init = epair_init; 548 if_setsendqlen(ifp, ifqmaxlen); 549 if_setsendqready(ifp); 550 551 ifp->if_baudrate = IF_Gbps(10); /* arbitrary maximum */ 552 } 553 554 static void 555 epair_generate_mac(struct epair_softc *sc, uint8_t *eaddr) 556 { 557 uint32_t key[3]; 558 uint32_t hash; 559 uint64_t hostid; 560 561 EPAIR_LOCK(); 562 #ifdef SMP 563 /* Get an approximate distribution. */ 564 hash = next_index % mp_ncpus; 565 #else 566 hash = 0; 567 #endif 568 EPAIR_UNLOCK(); 569 570 /* 571 * Calculate the etheraddr hashing the hostid and the 572 * interface index. The result would be hopefully unique. 573 * Note that the "a" component of an epair instance may get moved 574 * to a different VNET after creation. In that case its index 575 * will be freed and the index can get reused by new epair instance. 576 * Make sure we do not create same etheraddr again. 577 */ 578 getcredhostid(curthread->td_ucred, (unsigned long *)&hostid); 579 if (hostid == 0) 580 arc4rand(&hostid, sizeof(hostid), 0); 581 582 struct ifnet *ifp = sc->ifp; 583 EPAIR_LOCK(); 584 if (ifp->if_index > next_index) 585 next_index = ifp->if_index; 586 else 587 next_index++; 588 589 key[0] = (uint32_t)next_index; 590 EPAIR_UNLOCK(); 591 key[1] = (uint32_t)(hostid & 0xffffffff); 592 key[2] = (uint32_t)((hostid >> 32) & 0xfffffffff); 593 hash = jenkins_hash32(key, 3, 0); 594 595 eaddr[0] = 0x02; 596 memcpy(&eaddr[1], &hash, 4); 597 eaddr[5] = 0x0a; 598 } 599 600 static void 601 epair_free_sc(struct epair_softc *sc) 602 { 603 if (sc == NULL) 604 return; 605 606 if_free(sc->ifp); 607 ifmedia_removeall(&sc->media); 608 for (int i = 0; i < sc->num_queues; i++) { 609 struct epair_queue *q = &sc->queues[i]; 610 mtx_destroy(&q->mtx); 611 } 612 free(sc->queues, M_EPAIR); 613 free(sc, M_EPAIR); 614 } 615 616 static void 617 epair_set_state(struct ifnet *ifp, bool running) 618 { 619 if (running) { 620 ifp->if_drv_flags |= IFF_DRV_RUNNING; 621 if_link_state_change(ifp, LINK_STATE_UP); 622 } else { 623 if_link_state_change(ifp, LINK_STATE_DOWN); 624 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 625 } 626 } 627 628 static int 629 epair_handle_unit(struct if_clone *ifc, char *name, size_t len, int *punit) 630 { 631 int error = 0, unit, wildcard; 632 char *dp; 633 634 /* Try to see if a special unit was requested. */ 635 error = ifc_name2unit(name, &unit); 636 if (error != 0) 637 return (error); 638 wildcard = (unit < 0); 639 640 error = ifc_alloc_unit(ifc, &unit); 641 if (error != 0) 642 return (error); 643 644 /* 645 * If no unit had been given, we need to adjust the ifName. 646 * Also make sure there is space for our extra [ab] suffix. 647 */ 648 for (dp = name; *dp != '\0'; dp++); 649 if (wildcard) { 650 int slen = snprintf(dp, len - (dp - name), "%d", unit); 651 if (slen > len - (dp - name) - 1) { 652 /* ifName too long. */ 653 error = ENOSPC; 654 goto done; 655 } 656 dp += slen; 657 } 658 if (len - (dp - name) - 1 < 1) { 659 /* No space left for our [ab] suffix. */ 660 error = ENOSPC; 661 goto done; 662 } 663 *dp = 'b'; 664 /* Must not change dp so we can replace 'a' by 'b' later. */ 665 *(dp+1) = '\0'; 666 667 /* Check if 'a' and 'b' interfaces already exist. */ 668 if (ifunit(name) != NULL) { 669 error = EEXIST; 670 goto done; 671 } 672 673 *dp = 'a'; 674 if (ifunit(name) != NULL) { 675 error = EEXIST; 676 goto done; 677 } 678 *punit = unit; 679 done: 680 if (error != 0) 681 ifc_free_unit(ifc, unit); 682 683 return (error); 684 } 685 686 static int 687 epair_clone_create(struct if_clone *ifc, char *name, size_t len, 688 struct ifc_data *ifd, struct ifnet **ifpp) 689 { 690 struct epair_softc *sca, *scb; 691 struct ifnet *ifp; 692 char *dp; 693 int error, unit; 694 uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 695 696 error = epair_handle_unit(ifc, name, len, &unit); 697 if (error != 0) 698 return (error); 699 700 /* Allocate memory for both [ab] interfaces */ 701 sca = epair_alloc_sc(ifc); 702 scb = epair_alloc_sc(ifc); 703 if (sca == NULL || scb == NULL) { 704 epair_free_sc(sca); 705 epair_free_sc(scb); 706 ifc_free_unit(ifc, unit); 707 return (ENOSPC); 708 } 709 710 /* 711 * Cross-reference the interfaces so we will be able to free both. 712 */ 713 sca->oifp = scb->ifp; 714 scb->oifp = sca->ifp; 715 716 /* Finish initialization of interface <n>a. */ 717 ifp = sca->ifp; 718 epair_setup_ifp(sca, name, unit); 719 epair_generate_mac(sca, eaddr); 720 721 ether_ifattach(ifp, eaddr); 722 723 /* Swap the name and finish initialization of interface <n>b. */ 724 dp = name + strlen(name) - 1; 725 *dp = 'b'; 726 727 epair_setup_ifp(scb, name, unit); 728 729 ifp = scb->ifp; 730 /* We need to play some tricks here for the second interface. */ 731 strlcpy(name, epairname, len); 732 /* Correctly set the name for the cloner list. */ 733 strlcpy(name, scb->ifp->if_xname, len); 734 735 epair_clone_add(ifc, scb); 736 737 /* 738 * Restore name to <n>a as the ifp for this will go into the 739 * cloner list for the initial call. 740 */ 741 strlcpy(name, sca->ifp->if_xname, len); 742 743 /* Tell the world, that we are ready to rock. */ 744 epair_set_state(sca->ifp, true); 745 epair_set_state(scb->ifp, true); 746 747 *ifpp = sca->ifp; 748 749 return (0); 750 } 751 752 static void 753 epair_drain_rings(struct epair_softc *sc) 754 { 755 for (int i = 0; i < sc->num_queues; i++) { 756 struct epair_queue *q; 757 struct mbuf *m, *n; 758 759 q = &sc->queues[i]; 760 mtx_lock(&q->mtx); 761 m = mbufq_flush(&q->q); 762 mtx_unlock(&q->mtx); 763 764 for (; m != NULL; m = n) { 765 n = m->m_nextpkt; 766 m_freem(m); 767 } 768 } 769 } 770 771 static int 772 epair_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags) 773 { 774 struct ifnet *oifp; 775 struct epair_softc *sca, *scb; 776 int unit, error; 777 778 /* 779 * In case we called into if_clone_destroyif() ourselves 780 * again to remove the second interface, the softc will be 781 * NULL. In that case so not do anything but return success. 782 */ 783 if (ifp->if_softc == NULL) 784 return (0); 785 786 unit = ifp->if_dunit; 787 sca = ifp->if_softc; 788 oifp = sca->oifp; 789 scb = oifp->if_softc; 790 791 /* Frist get the interfaces down and detached. */ 792 epair_set_state(ifp, false); 793 epair_set_state(oifp, false); 794 795 ether_ifdetach(ifp); 796 ether_ifdetach(oifp); 797 798 /* Third free any queued packets and all the resources. */ 799 CURVNET_SET_QUIET(oifp->if_vnet); 800 epair_drain_rings(scb); 801 oifp->if_softc = NULL; 802 error = if_clone_destroyif(ifc, oifp); 803 if (error) 804 panic("%s: if_clone_destroyif() for our 2nd iface failed: %d", 805 __func__, error); 806 epair_free_sc(scb); 807 CURVNET_RESTORE(); 808 809 epair_drain_rings(sca); 810 epair_free_sc(sca); 811 812 /* Last free the cloner unit. */ 813 ifc_free_unit(ifc, unit); 814 815 return (0); 816 } 817 818 static void 819 vnet_epair_init(const void *unused __unused) 820 { 821 struct if_clone_addreq req = { 822 .match_f = epair_clone_match, 823 .create_f = epair_clone_create, 824 .destroy_f = epair_clone_destroy, 825 }; 826 V_epair_cloner = ifc_attach_cloner(epairname, &req); 827 } 828 VNET_SYSINIT(vnet_epair_init, SI_SUB_PSEUDO, SI_ORDER_ANY, 829 vnet_epair_init, NULL); 830 831 static void 832 vnet_epair_uninit(const void *unused __unused) 833 { 834 835 ifc_detach_cloner(V_epair_cloner); 836 } 837 VNET_SYSUNINIT(vnet_epair_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, 838 vnet_epair_uninit, NULL); 839 840 static int 841 epair_mod_init(void) 842 { 843 char name[32]; 844 epair_tasks.tasks = 0; 845 846 #ifdef RSS 847 int cpu; 848 849 CPU_FOREACH(cpu) { 850 cpuset_t cpu_mask; 851 852 /* Pin to this CPU so we get appropriate NUMA allocations. */ 853 thread_lock(curthread); 854 sched_bind(curthread, cpu); 855 thread_unlock(curthread); 856 857 snprintf(name, sizeof(name), "epair_task_%d", cpu); 858 859 epair_tasks.tq[cpu] = taskqueue_create(name, M_WAITOK, 860 taskqueue_thread_enqueue, 861 &epair_tasks.tq[cpu]); 862 CPU_SETOF(cpu, &cpu_mask); 863 taskqueue_start_threads_cpuset(&epair_tasks.tq[cpu], 1, PI_NET, 864 &cpu_mask, "%s", name); 865 866 epair_tasks.tasks++; 867 } 868 thread_lock(curthread); 869 sched_unbind(curthread); 870 thread_unlock(curthread); 871 #else 872 snprintf(name, sizeof(name), "epair_task"); 873 874 epair_tasks.tq[0] = taskqueue_create(name, M_WAITOK, 875 taskqueue_thread_enqueue, 876 &epair_tasks.tq[0]); 877 taskqueue_start_threads(&epair_tasks.tq[0], 1, PI_NET, "%s", name); 878 879 epair_tasks.tasks = 1; 880 #endif 881 882 return (0); 883 } 884 885 static void 886 epair_mod_cleanup(void) 887 { 888 889 for (int i = 0; i < epair_tasks.tasks; i++) { 890 taskqueue_drain_all(epair_tasks.tq[i]); 891 taskqueue_free(epair_tasks.tq[i]); 892 } 893 } 894 895 static int 896 epair_modevent(module_t mod, int type, void *data) 897 { 898 int ret; 899 900 switch (type) { 901 case MOD_LOAD: 902 EPAIR_LOCK_INIT(); 903 ret = epair_mod_init(); 904 if (ret != 0) 905 return (ret); 906 if (bootverbose) 907 printf("%s: %s initialized.\n", __func__, epairname); 908 break; 909 case MOD_UNLOAD: 910 epair_mod_cleanup(); 911 EPAIR_LOCK_DESTROY(); 912 if (bootverbose) 913 printf("%s: %s unloaded.\n", __func__, epairname); 914 break; 915 default: 916 return (EOPNOTSUPP); 917 } 918 return (0); 919 } 920 921 static moduledata_t epair_mod = { 922 "if_epair", 923 epair_modevent, 924 0 925 }; 926 927 DECLARE_MODULE(if_epair, epair_mod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE); 928 MODULE_VERSION(if_epair, 3); 929