1 /* 2 * Copyright (c) 2013 Chris Torek <torek @ torek net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * This file and its contents are supplied under the terms of the 28 * Common Development and Distribution License ("CDDL"), version 1.0. 29 * You may only use this file in accordance with the terms of version 30 * 1.0 of the CDDL. 31 * 32 * A full copy of the text of the CDDL should have accompanied this 33 * source. A copy of the CDDL is also available via the Internet at 34 * http://www.illumos.org/license/CDDL. 35 * 36 * Copyright 2015 Pluribus Networks Inc. 37 * Copyright 2019 Joyent, Inc. 38 * Copyright 2022 Oxide Computer Company 39 */ 40 41 #include <sys/types.h> 42 #include <sys/strsubr.h> 43 44 #include <sys/dlpi.h> 45 #include <sys/pattr.h> 46 #include <sys/vlan.h> 47 48 #include "viona_impl.h" 49 50 51 52 #define VTNET_MAXSEGS 32 53 54 /* Min. octets in an ethernet frame minus FCS */ 55 #define MIN_BUF_SIZE 60 56 #define NEED_VLAN_PAD_SIZE (MIN_BUF_SIZE - VLAN_TAGSZ) 57 58 static mblk_t *viona_vlan_pad_mp; 59 60 void 61 viona_rx_init(void) 62 { 63 mblk_t *mp; 64 65 ASSERT(viona_vlan_pad_mp == NULL); 66 67 /* Create mblk for padding when VLAN tags are stripped */ 68 mp = allocb_wait(VLAN_TAGSZ, BPRI_HI, STR_NOSIG, NULL); 69 bzero(mp->b_rptr, VLAN_TAGSZ); 70 mp->b_wptr += VLAN_TAGSZ; 71 viona_vlan_pad_mp = mp; 72 } 73 74 void 75 viona_rx_fini(void) 76 { 77 mblk_t *mp; 78 79 /* Clean up the VLAN padding mblk */ 80 mp = viona_vlan_pad_mp; 81 viona_vlan_pad_mp = NULL; 82 VERIFY(mp != NULL && mp->b_cont == NULL); 83 freemsg(mp); 84 } 85 86 void 87 viona_worker_rx(viona_vring_t *ring, viona_link_t *link) 88 { 89 (void) thread_vsetname(curthread, "viona_rx_%p", ring); 90 91 ASSERT(MUTEX_HELD(&ring->vr_lock)); 92 ASSERT3U(ring->vr_state, ==, VRS_RUN); 93 94 viona_ring_disable_notify(ring); 95 96 do { 97 if (vmm_drv_lease_expired(ring->vr_lease)) { 98 /* 99 * Set the renewal flag, causing incoming traffic to be 100 * dropped, and issue an RX barrier to ensure any 101 * threads in the RX callbacks will have finished. 102 * The vr_lock cannot be held across the barrier as it 103 * poses a deadlock risk. 104 */ 105 ring->vr_state_flags |= VRSF_RENEW; 106 mutex_exit(&ring->vr_lock); 107 mac_rx_barrier(link->l_mch); 108 mutex_enter(&ring->vr_lock); 109 110 if (!viona_ring_lease_renew(ring)) { 111 break; 112 } 113 ring->vr_state_flags &= ~VRSF_RENEW; 114 } 115 116 /* 117 * For now, there is little to do in the RX worker as inbound 118 * data is delivered by MAC via the RX callbacks. If tap-like 119 * functionality is added later, this would be a convenient 120 * place to inject frames into the guest. 121 */ 122 (void) cv_wait_sig(&ring->vr_cv, &ring->vr_lock); 123 } while (!vring_need_bail(ring)); 124 125 ring->vr_state = VRS_STOP; 126 127 /* 128 * The RX ring is stopping, before we start tearing it down it 129 * is imperative that we perform an RX barrier so that 130 * incoming packets are dropped at viona_rx_classified(). 131 */ 132 mutex_exit(&ring->vr_lock); 133 mac_rx_barrier(link->l_mch); 134 mutex_enter(&ring->vr_lock); 135 136 viona_ring_enable_notify(ring); 137 } 138 139 static size_t 140 viona_copy_mblk(const mblk_t *mp, size_t seek, caddr_t buf, size_t len, 141 boolean_t *end) 142 { 143 size_t copied = 0; 144 size_t off = 0; 145 146 /* Seek past already-consumed data */ 147 while (seek > 0 && mp != NULL) { 148 const size_t chunk = MBLKL(mp); 149 150 if (chunk > seek) { 151 off = seek; 152 break; 153 } 154 mp = mp->b_cont; 155 seek -= chunk; 156 } 157 158 while (mp != NULL) { 159 const size_t chunk = MBLKL(mp) - off; 160 const size_t to_copy = MIN(chunk, len); 161 162 bcopy(mp->b_rptr + off, buf, to_copy); 163 copied += to_copy; 164 buf += to_copy; 165 len -= to_copy; 166 167 /* 168 * If all the remaining data in the mblk_t was copied, move on 169 * to the next one in the chain. Any seek offset applied to 170 * the first mblk copy is zeroed out for subsequent operations. 171 */ 172 if (chunk == to_copy) { 173 mp = mp->b_cont; 174 off = 0; 175 } 176 #ifdef DEBUG 177 else { 178 /* 179 * The only valid reason for the copy to consume less 180 * than the entire contents of the mblk_t is because 181 * the output buffer has been filled. 182 */ 183 ASSERT0(len); 184 } 185 #endif 186 187 /* Go no further if the buffer has been filled */ 188 if (len == 0) { 189 break; 190 } 191 192 } 193 *end = (mp == NULL); 194 return (copied); 195 } 196 197 static int 198 viona_recv_plain(viona_vring_t *ring, const mblk_t *mp, size_t msz) 199 { 200 struct iovec iov[VTNET_MAXSEGS]; 201 uint16_t cookie; 202 int n; 203 const size_t hdr_sz = sizeof (struct virtio_net_hdr); 204 struct virtio_net_hdr *hdr; 205 size_t len, copied = 0; 206 caddr_t buf = NULL; 207 boolean_t end = B_FALSE; 208 const uint32_t features = ring->vr_link->l_features; 209 vmm_page_t *pages = NULL; 210 211 ASSERT(msz >= MIN_BUF_SIZE); 212 213 n = vq_popchain(ring, iov, VTNET_MAXSEGS, &cookie, &pages); 214 if (n <= 0) { 215 /* Without available buffers, the frame must be dropped. */ 216 return (ENOSPC); 217 } 218 if (iov[0].iov_len < hdr_sz) { 219 /* 220 * There is little to do if there is not even space available 221 * for the sole header. Zero the buffer and bail out as a last 222 * act of desperation. 223 */ 224 bzero(iov[0].iov_base, iov[0].iov_len); 225 goto bad_frame; 226 } 227 228 /* Grab the address of the header before anything else */ 229 hdr = (struct virtio_net_hdr *)iov[0].iov_base; 230 231 /* 232 * If there is any space remaining in the first buffer after writing 233 * the header, fill it with frame data. 234 */ 235 if (iov[0].iov_len > hdr_sz) { 236 buf = (caddr_t)iov[0].iov_base + hdr_sz; 237 len = iov[0].iov_len - hdr_sz; 238 239 copied += viona_copy_mblk(mp, copied, buf, len, &end); 240 } 241 242 /* Copy any remaining data into subsequent buffers, if present */ 243 for (int i = 1; i < n && !end; i++) { 244 buf = (caddr_t)iov[i].iov_base; 245 len = iov[i].iov_len; 246 247 copied += viona_copy_mblk(mp, copied, buf, len, &end); 248 } 249 250 /* Was the expected amount of data copied? */ 251 if (copied != msz) { 252 VIONA_PROBE5(too_short, viona_vring_t *, ring, 253 uint16_t, cookie, mblk_t *, mp, size_t, copied, 254 size_t, msz); 255 VIONA_RING_STAT_INCR(ring, too_short); 256 goto bad_frame; 257 } 258 259 /* Populate (read: zero) the header and account for it in the size */ 260 bzero(hdr, hdr_sz); 261 copied += hdr_sz; 262 263 /* Add chksum bits, if needed */ 264 if ((features & VIRTIO_NET_F_GUEST_CSUM) != 0) { 265 uint32_t cksum_flags; 266 267 if (((features & VIRTIO_NET_F_GUEST_TSO4) != 0) && 268 ((DB_CKSUMFLAGS(mp) & HW_LSO) != 0)) { 269 hdr->vrh_gso_type |= VIRTIO_NET_HDR_GSO_TCPV4; 270 hdr->vrh_gso_size = DB_LSOMSS(mp); 271 } 272 273 mac_hcksum_get((mblk_t *)mp, NULL, NULL, NULL, NULL, 274 &cksum_flags); 275 if ((cksum_flags & HCK_FULLCKSUM_OK) != 0) { 276 hdr->vrh_flags |= VIRTIO_NET_HDR_F_DATA_VALID; 277 } 278 } 279 280 /* Release this chain */ 281 vmm_drv_page_release_chain(pages); 282 vq_pushchain(ring, copied, cookie); 283 return (0); 284 285 bad_frame: 286 VIONA_PROBE3(bad_rx_frame, viona_vring_t *, ring, uint16_t, cookie, 287 mblk_t *, mp); 288 VIONA_RING_STAT_INCR(ring, bad_rx_frame); 289 290 vmm_drv_page_release_chain(pages); 291 vq_pushchain(ring, MAX(copied, MIN_BUF_SIZE + hdr_sz), cookie); 292 return (EINVAL); 293 } 294 295 static int 296 viona_recv_merged(viona_vring_t *ring, const mblk_t *mp, size_t msz) 297 { 298 struct iovec iov[VTNET_MAXSEGS]; 299 used_elem_t uelem[VTNET_MAXSEGS]; 300 vmm_page_t *pages = NULL, *hdr_pages = NULL; 301 int n, i = 0, buf_idx = 0, err = 0; 302 uint16_t cookie; 303 caddr_t buf; 304 size_t len, copied = 0, chunk = 0; 305 struct virtio_net_mrgrxhdr *hdr = NULL; 306 const size_t hdr_sz = sizeof (struct virtio_net_mrgrxhdr); 307 boolean_t end = B_FALSE; 308 const uint32_t features = ring->vr_link->l_features; 309 310 ASSERT(msz >= MIN_BUF_SIZE); 311 312 n = vq_popchain(ring, iov, VTNET_MAXSEGS, &cookie, &hdr_pages); 313 if (n <= 0) { 314 /* Without available buffers, the frame must be dropped. */ 315 VIONA_PROBE2(no_space, viona_vring_t *, ring, mblk_t *, mp); 316 VIONA_RING_STAT_INCR(ring, no_space); 317 return (ENOSPC); 318 } 319 if (iov[0].iov_len < hdr_sz) { 320 /* 321 * There is little to do if there is not even space available 322 * for the sole header. Zero the buffer and bail out as a last 323 * act of desperation. 324 */ 325 bzero(iov[0].iov_base, iov[0].iov_len); 326 uelem[0].id = cookie; 327 uelem[0].len = iov[0].iov_len; 328 err = EINVAL; 329 goto done; 330 } 331 332 /* Grab the address of the header and do initial population */ 333 hdr = (struct virtio_net_mrgrxhdr *)iov[0].iov_base; 334 bzero(hdr, hdr_sz); 335 hdr->vrh_bufs = 1; 336 337 /* 338 * If there is any space remaining in the first buffer after writing 339 * the header, fill it with frame data. The size of the header itself 340 * is accounted for later. 341 */ 342 if (iov[0].iov_len > hdr_sz) { 343 buf = iov[0].iov_base + hdr_sz; 344 len = iov[0].iov_len - hdr_sz; 345 346 size_t copy_len; 347 copy_len = viona_copy_mblk(mp, copied, buf, len, &end); 348 chunk += copy_len; 349 copied += copy_len; 350 } 351 i = 1; 352 353 do { 354 while (i < n && !end) { 355 buf = iov[i].iov_base; 356 len = iov[i].iov_len; 357 358 size_t copy_len; 359 copy_len = viona_copy_mblk(mp, copied, buf, len, &end); 360 chunk += copy_len; 361 copied += copy_len; 362 i++; 363 } 364 365 uelem[buf_idx].id = cookie; 366 uelem[buf_idx].len = chunk; 367 368 /* 369 * Try to grab another buffer from the ring if the mblk has not 370 * yet been entirely copied out. 371 */ 372 if (!end) { 373 if (buf_idx == (VTNET_MAXSEGS - 1)) { 374 /* 375 * Our arbitrary limit on the number of buffers 376 * to offer for merge has already been reached. 377 */ 378 err = EOVERFLOW; 379 break; 380 } 381 if (pages != NULL) { 382 vmm_drv_page_release_chain(pages); 383 pages = NULL; 384 } 385 n = vq_popchain(ring, iov, VTNET_MAXSEGS, &cookie, 386 &pages); 387 if (n <= 0) { 388 /* 389 * Without more immediate space to perform the 390 * copying, there is little choice left but to 391 * drop the packet. 392 */ 393 err = EMSGSIZE; 394 break; 395 } 396 chunk = 0; 397 i = 0; 398 buf_idx++; 399 /* 400 * Keep the header up-to-date with the number of 401 * buffers, but never reference its value since the 402 * guest could meddle with it. 403 */ 404 hdr->vrh_bufs++; 405 } 406 } while (!end && copied < msz); 407 408 /* Account for the header size in the first buffer */ 409 uelem[0].len += hdr_sz; 410 411 /* 412 * If no other errors were encounted during the copy, was the expected 413 * amount of data transfered? 414 */ 415 if (err == 0 && copied != msz) { 416 VIONA_PROBE5(too_short, viona_vring_t *, ring, 417 uint16_t, cookie, mblk_t *, mp, size_t, copied, 418 size_t, msz); 419 VIONA_RING_STAT_INCR(ring, too_short); 420 err = EINVAL; 421 } 422 423 /* Add chksum bits, if needed */ 424 if ((features & VIRTIO_NET_F_GUEST_CSUM) != 0) { 425 uint32_t cksum_flags; 426 427 if (((features & VIRTIO_NET_F_GUEST_TSO4) != 0) && 428 ((DB_CKSUMFLAGS(mp) & HW_LSO) != 0)) { 429 hdr->vrh_gso_type |= VIRTIO_NET_HDR_GSO_TCPV4; 430 hdr->vrh_gso_size = DB_LSOMSS(mp); 431 } 432 433 mac_hcksum_get((mblk_t *)mp, NULL, NULL, NULL, NULL, 434 &cksum_flags); 435 if ((cksum_flags & HCK_FULLCKSUM_OK) != 0) { 436 hdr->vrh_flags |= VIRTIO_NET_HDR_F_DATA_VALID; 437 } 438 } 439 440 done: 441 switch (err) { 442 case 0: 443 /* Success can fall right through to ring delivery */ 444 break; 445 446 case EMSGSIZE: 447 VIONA_PROBE3(rx_merge_underrun, viona_vring_t *, ring, 448 uint16_t, cookie, mblk_t *, mp); 449 VIONA_RING_STAT_INCR(ring, rx_merge_underrun); 450 break; 451 452 case EOVERFLOW: 453 VIONA_PROBE3(rx_merge_overrun, viona_vring_t *, ring, 454 uint16_t, cookie, mblk_t *, mp); 455 VIONA_RING_STAT_INCR(ring, rx_merge_overrun); 456 break; 457 458 default: 459 VIONA_PROBE3(bad_rx_frame, viona_vring_t *, ring, 460 uint16_t, cookie, mblk_t *, mp); 461 VIONA_RING_STAT_INCR(ring, bad_rx_frame); 462 } 463 464 if (hdr_pages != NULL) { 465 vmm_drv_page_release_chain(hdr_pages); 466 } 467 if (pages != NULL) { 468 vmm_drv_page_release_chain(pages); 469 } 470 vq_pushchain_many(ring, buf_idx + 1, uelem); 471 return (err); 472 } 473 474 static void 475 viona_rx_common(viona_vring_t *ring, mblk_t *mp, boolean_t is_loopback) 476 { 477 viona_link_t *link = ring->vr_link; 478 mblk_t *mprx = NULL, **mprx_prevp = &mprx; 479 mblk_t *mpdrop = NULL, **mpdrop_prevp = &mpdrop; 480 const boolean_t do_merge = 481 ((link->l_features & VIRTIO_NET_F_MRG_RXBUF) != 0); 482 483 size_t nrx = 0, ndrop = 0; 484 485 while (mp != NULL) { 486 mblk_t *next = mp->b_next; 487 mblk_t *pad = NULL; 488 size_t size = msgsize(mp); 489 int err = 0; 490 491 mp->b_next = NULL; 492 493 /* 494 * We treat both a 'drop' response and errors the same here 495 * and put the packet on the drop chain. As packets may be 496 * subject to different actions in ipf (which do not all 497 * return the same set of error values), an error processing 498 * one packet doesn't mean the next packet will also generate 499 * an error. 500 */ 501 if (VNETHOOK_INTERESTED_IN(link->l_neti) && 502 viona_hook(link, ring, &mp, B_FALSE) != 0) { 503 if (mp != NULL) { 504 *mpdrop_prevp = mp; 505 mpdrop_prevp = &mp->b_next; 506 } else { 507 /* 508 * If the hook consumer (e.g. ipf) already 509 * freed the mblk_t, update the drop count now. 510 */ 511 ndrop++; 512 } 513 mp = next; 514 continue; 515 } 516 517 /* 518 * Ethernet frames are expected to be padded out in order to 519 * meet the minimum size. 520 * 521 * A special case is made for frames which are short by 522 * VLAN_TAGSZ, having been stripped of their VLAN tag while 523 * traversing MAC. A preallocated (and recycled) mblk is used 524 * for that specific condition. 525 * 526 * All other frames that fall short on length will have custom 527 * zero-padding allocated appended to them. 528 */ 529 if (size == NEED_VLAN_PAD_SIZE) { 530 ASSERT(MBLKL(viona_vlan_pad_mp) == VLAN_TAGSZ); 531 ASSERT(viona_vlan_pad_mp->b_cont == NULL); 532 533 for (pad = mp; pad->b_cont != NULL; pad = pad->b_cont) 534 ; 535 536 pad->b_cont = viona_vlan_pad_mp; 537 size += VLAN_TAGSZ; 538 } else if (size < MIN_BUF_SIZE) { 539 const size_t pad_size = MIN_BUF_SIZE - size; 540 mblk_t *zero_mp; 541 542 zero_mp = allocb(pad_size, BPRI_MED); 543 if (zero_mp == NULL) { 544 err = ENOMEM; 545 goto pad_drop; 546 } 547 548 VIONA_PROBE3(rx_pad_short, viona_vring_t *, ring, 549 mblk_t *, mp, size_t, pad_size); 550 VIONA_RING_STAT_INCR(ring, rx_pad_short); 551 zero_mp->b_wptr += pad_size; 552 bzero(zero_mp->b_rptr, pad_size); 553 linkb(mp, zero_mp); 554 size += pad_size; 555 } 556 557 if (do_merge) { 558 err = viona_recv_merged(ring, mp, size); 559 } else { 560 err = viona_recv_plain(ring, mp, size); 561 } 562 563 /* 564 * The VLAN padding mblk is meant for continual reuse, so 565 * remove it from the chain to prevent it from being freed. 566 * 567 * Custom allocated padding does not require this treatment and 568 * is freed normally. 569 */ 570 if (pad != NULL) { 571 pad->b_cont = NULL; 572 } 573 574 pad_drop: 575 /* 576 * While an error during rx processing 577 * (viona_recv_{merged,plain}) does not free mp on error, 578 * hook processing might or might not free mp. Handle either 579 * scenario -- if mp is not yet free, it is queued up and 580 * freed after the guest has been notified. If mp is 581 * already NULL, just proceed on. 582 */ 583 if (err != 0) { 584 *mpdrop_prevp = mp; 585 mpdrop_prevp = &mp->b_next; 586 587 /* 588 * If the available ring is empty, do not bother 589 * attempting to deliver any more frames. Count the 590 * rest as dropped too. 591 */ 592 if (err == ENOSPC) { 593 mp->b_next = next; 594 break; 595 } 596 } else { 597 /* Chain successful mblks to be freed later */ 598 *mprx_prevp = mp; 599 mprx_prevp = &mp->b_next; 600 nrx++; 601 } 602 mp = next; 603 } 604 605 membar_enter(); 606 viona_intr_ring(ring, B_FALSE); 607 608 /* Free successfully received frames */ 609 if (mprx != NULL) { 610 freemsgchain(mprx); 611 } 612 613 /* Free dropped frames, also tallying them */ 614 mp = mpdrop; 615 while (mp != NULL) { 616 mblk_t *next = mp->b_next; 617 618 mp->b_next = NULL; 619 freemsg(mp); 620 mp = next; 621 ndrop++; 622 } 623 VIONA_PROBE3(rx, viona_link_t *, link, size_t, nrx, size_t, ndrop); 624 } 625 626 static void 627 viona_rx_classified(void *arg, mac_resource_handle_t mrh, mblk_t *mp, 628 boolean_t is_loopback) 629 { 630 viona_vring_t *ring = (viona_vring_t *)arg; 631 632 /* Drop traffic if ring is inactive or renewing its lease */ 633 if (ring->vr_state != VRS_RUN || 634 (ring->vr_state_flags & VRSF_RENEW) != 0) { 635 freemsgchain(mp); 636 return; 637 } 638 639 viona_rx_common(ring, mp, is_loopback); 640 } 641 642 static void 643 viona_rx_mcast(void *arg, mac_resource_handle_t mrh, mblk_t *mp, 644 boolean_t is_loopback) 645 { 646 viona_vring_t *ring = (viona_vring_t *)arg; 647 mac_handle_t mh = ring->vr_link->l_mh; 648 mblk_t *mp_mcast_only = NULL; 649 mblk_t **mpp = &mp_mcast_only; 650 651 /* Drop traffic if ring is inactive or renewing its lease */ 652 if (ring->vr_state != VRS_RUN || 653 (ring->vr_state_flags & VRSF_RENEW) != 0) { 654 freemsgchain(mp); 655 return; 656 } 657 658 /* 659 * In addition to multicast traffic, broadcast packets will also arrive 660 * via the MAC_CLIENT_PROMISC_MULTI handler. The mac_rx_set() callback 661 * for fully-classified traffic has already delivered that broadcast 662 * traffic, so it should be suppressed here, rather than duplicating it 663 * to the guest. 664 */ 665 while (mp != NULL) { 666 mblk_t *mp_next; 667 mac_header_info_t mhi; 668 int err; 669 670 mp_next = mp->b_next; 671 mp->b_next = NULL; 672 673 /* Determine the packet type */ 674 err = mac_vlan_header_info(mh, mp, &mhi); 675 if (err != 0) { 676 mblk_t *pull; 677 678 /* 679 * It is possible that gathering of the header 680 * information was impeded by a leading mblk_t which 681 * was of inadequate length to reference the needed 682 * fields. Try again, in case that could be solved 683 * with a pull-up. 684 */ 685 pull = msgpullup(mp, sizeof (struct ether_vlan_header)); 686 if (pull == NULL) { 687 err = ENOMEM; 688 } else { 689 err = mac_vlan_header_info(mh, pull, &mhi); 690 freemsg(pull); 691 } 692 693 if (err != 0) { 694 VIONA_RING_STAT_INCR(ring, rx_mcast_check); 695 } 696 } 697 698 /* Chain up matching packets while discarding others */ 699 if (err == 0 && mhi.mhi_dsttype == MAC_ADDRTYPE_MULTICAST) { 700 *mpp = mp; 701 mpp = &mp->b_next; 702 } else { 703 freemsg(mp); 704 } 705 706 mp = mp_next; 707 } 708 709 if (mp_mcast_only != NULL) { 710 viona_rx_common(ring, mp_mcast_only, is_loopback); 711 } 712 } 713 714 int 715 viona_rx_set(viona_link_t *link) 716 { 717 viona_vring_t *ring = &link->l_vrings[VIONA_VQ_RX]; 718 int err; 719 720 mac_rx_set(link->l_mch, viona_rx_classified, ring); 721 err = mac_promisc_add(link->l_mch, MAC_CLIENT_PROMISC_MULTI, 722 viona_rx_mcast, ring, &link->l_mph, 723 MAC_PROMISC_FLAGS_NO_TX_LOOP | MAC_PROMISC_FLAGS_VLAN_TAG_STRIP); 724 if (err != 0) { 725 mac_rx_clear(link->l_mch); 726 } 727 728 return (err); 729 } 730 731 void 732 viona_rx_clear(viona_link_t *link) 733 { 734 mac_promisc_remove(link->l_mph); 735 mac_rx_clear(link->l_mch); 736 } 737