Lines Matching full:m0

1580 	struct mbuf *m0;  in service_iq_fl()  local
1614 m0 = NULL; in service_iq_fl()
1637 m0 = get_fl_payload(sc, fl, lq); in service_iq_fl()
1638 if (__predict_false(m0 == NULL)) in service_iq_fl()
1646 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); in service_iq_fl()
1826 struct mbuf *m0, *m, **pnext; in get_fl_payload() local
1830 M_ASSERTPKTHDR(fl->m0); in get_fl_payload()
1831 MPASS(fl->m0->m_pkthdr.len == plen); in get_fl_payload()
1834 m0 = fl->m0; in get_fl_payload()
1846 m0 = get_scatter_segment(sc, fl, 0, plen); in get_fl_payload()
1847 if (m0 == NULL) in get_fl_payload()
1849 remaining = plen - m0->m_len; in get_fl_payload()
1850 pnext = &m0->m_next; in get_fl_payload()
1856 fl->m0 = m0; in get_fl_payload()
1868 M_ASSERTPKTHDR(m0); in get_fl_payload()
1869 return (m0); in get_fl_payload()
1936 struct mbuf *m0; in eth_rx() local
1991 rc = pfil_mem_in(vi->pfil, frame, slen, ifp, &m0); in eth_rx()
2003 m0 = get_fl_payload(sc, fl, plen); in eth_rx()
2004 if (__predict_false(m0 == NULL)) in eth_rx()
2007 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; in eth_rx()
2008 m0->m_len -= sc->params.sge.fl_pktshift; in eth_rx()
2009 m0->m_data += sc->params.sge.fl_pktshift; in eth_rx()
2012 m0->m_pkthdr.rcvif = ifp; in eth_rx()
2013 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); in eth_rx()
2014 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); in eth_rx()
2035 m0->m_pkthdr.csum_data = be16toh(cpl->csum); in eth_rx()
2038 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2042 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2049 M_HASHTYPE_SETINNER(m0); in eth_rx()
2064 m0->m_pkthdr.csum_data = 0xffff; in eth_rx()
2066 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | in eth_rx()
2069 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | in eth_rx()
2076 MPASS(m0->m_pkthdr.csum_data == 0xffff); in eth_rx()
2081 m0->m_pkthdr.csum_flags = in eth_rx()
2096 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); in eth_rx()
2097 m0->m_flags |= M_VLANTAG; in eth_rx()
2107 m0->m_pkthdr.rcv_tstmp = t4_tstmp_to_ns(sc, in eth_rx()
2109 if (m0->m_pkthdr.rcv_tstmp != 0) in eth_rx()
2110 m0->m_flags |= M_TSTMP; in eth_rx()
2114 m0->m_pkthdr.numa_domain = if_getnumadomain(ifp); in eth_rx()
2118 (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || in eth_rx()
2119 M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { in eth_rx()
2121 tcp_lro_queue_mbuf(lro, m0); in eth_rx()
2124 if (tcp_lro_rx(lro, m0, 0) == 0) in eth_rx()
2128 if_input(ifp, m0); in eth_rx()
2655 struct mbuf *m0 = *mp, *m; in parse_pkt() local
2672 M_ASSERTPKTHDR(m0); in parse_pkt()
2673 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { in parse_pkt()
2676 m_freem(m0); in parse_pkt()
2685 M_ASSERTPKTHDR(m0); in parse_pkt()
2686 MPASS(m0->m_pkthdr.len > 0); in parse_pkt()
2687 nsegs = count_mbuf_nsegs(m0, 0, &cflags); in parse_pkt()
2689 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) in parse_pkt()
2690 mst = m0->m_pkthdr.snd_tag; in parse_pkt()
2697 set_mbuf_cflags(m0, cflags); in parse_pkt()
2698 rc = t6_ktls_parse_pkt(m0); in parse_pkt()
2704 if (nsegs > max_nsegs_allowed(m0, vm_wr)) { in parse_pkt()
2710 if ((m = m_defrag(m0, M_NOWAIT)) == NULL) { in parse_pkt()
2714 *mp = m0 = m; /* update caller's copy after defrag */ in parse_pkt()
2718 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && in parse_pkt()
2721 m0 = m_pullup(m0, m0->m_pkthdr.len); in parse_pkt()
2722 if (m0 == NULL) { in parse_pkt()
2727 *mp = m0; /* update caller's copy after pullup */ in parse_pkt()
2730 set_mbuf_nsegs(m0, nsegs); in parse_pkt()
2731 set_mbuf_cflags(m0, cflags); in parse_pkt()
2732 calculate_mbuf_len16(m0, vm_wr); in parse_pkt()
2740 if (__predict_false(needs_eo(mst) && !needs_outer_l4_csum(m0))) { in parse_pkt()
2741 m_snd_tag_rele(m0->m_pkthdr.snd_tag); in parse_pkt()
2742 m0->m_pkthdr.snd_tag = NULL; in parse_pkt()
2743 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; in parse_pkt()
2748 if (!needs_hwcsum(m0) in parse_pkt()
2755 m = m0; in parse_pkt()
2762 m0->m_pkthdr.l2hlen = sizeof(*evh); in parse_pkt()
2764 m0->m_pkthdr.l2hlen = sizeof(*eh); in parse_pkt()
2769 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2771 m_advance(&m, &offset, m0->m_pkthdr.l2hlen); in parse_pkt()
2778 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2786 if (needs_vxlan_csum(m0)) { in parse_pkt()
2789 if (needs_vxlan_tso(m0)) { in parse_pkt()
2798 m0->m_pkthdr.l3hlen = ip->ip_hl << 2; in parse_pkt()
2814 if (needs_vxlan_csum(m0)) { in parse_pkt()
2815 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2816 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); in parse_pkt()
2819 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + in parse_pkt()
2826 m0->m_pkthdr.inner_l2hlen = sizeof(*evh); in parse_pkt()
2828 m0->m_pkthdr.inner_l2hlen = sizeof(*eh); in parse_pkt()
2830 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2832 m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); in parse_pkt()
2838 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); in parse_pkt()
2846 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; in parse_pkt()
2861 if (needs_inner_tcp_csum(m0)) { in parse_pkt()
2862 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); in parse_pkt()
2863 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; in parse_pkt()
2865 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); in parse_pkt()
2866 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | in parse_pkt()
2872 if (needs_outer_tcp_csum(m0)) { in parse_pkt()
2873 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); in parse_pkt()
2874 m0->m_pkthdr.l4hlen = tcp->th_off * 4; in parse_pkt()
2877 set_mbuf_eo_tsclk_tsoff(m0, in parse_pkt()
2881 set_mbuf_eo_tsclk_tsoff(m0, 0); in parse_pkt()
2882 } else if (needs_outer_udp_csum(m0)) { in parse_pkt()
2883 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); in parse_pkt()
2891 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + in parse_pkt()
2892 m0->m_pkthdr.l4hlen; in parse_pkt()
2894 nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags); in parse_pkt()
2895 MPASS(cflags == mbuf_cflags(m0)); in parse_pkt()
2896 set_mbuf_eo_nsegs(m0, nsegs); in parse_pkt()
2897 set_mbuf_eo_len16(m0, in parse_pkt()
2898 txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); in parse_pkt()
2899 rc = ethofld_transmit(mst->ifp, m0); in parse_pkt()
2906 MPASS(m0 == *mp); in parse_pkt()
3144 struct mbuf *m0; in eth_tx() local
3157 m0 = r->items[cidx]; in eth_tx()
3158 m_freem(m0); in eth_tx()
3183 m0 = r->items[cidx]; in eth_tx()
3184 M_ASSERTPKTHDR(m0); in eth_tx()
3185 MPASS(m0->m_nextpkt == NULL); in eth_tx()
3192 if (cannot_use_txpkts(m0)) in eth_tx()
3203 rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd); in eth_tx()
3205 rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd); in eth_tx()
3238 /* m0 was coalesced into txq->txpkts. */ in eth_tx()
3243 * m0 is suitable for tx coalescing but could not be in eth_tx()
3245 * been transmitted. Start a new txpkts with m0. in eth_tx()
3255 n = tx_len16_to_desc(mbuf_len16(m0)); in eth_tx()
3263 if (mbuf_cflags(m0) & MC_RAW_WR) { in eth_tx()
3264 n = write_raw_wr(txq, wr, m0, avail); in eth_tx()
3266 } else if (mbuf_cflags(m0) & MC_TLS) { in eth_tx()
3267 ETHER_BPF_MTAP(ifp, m0); in eth_tx()
3268 n = t6_ktls_write_wr(txq, wr, m0, avail); in eth_tx()
3271 ETHER_BPF_MTAP(ifp, m0); in eth_tx()
3273 n = write_txpkt_vm_wr(sc, txq, m0); in eth_tx()
3275 n = write_txpkt_wr(sc, txq, m0, avail); in eth_tx()
3278 if (!(mbuf_cflags(m0) & MC_TLS)) in eth_tx()
5140 m_freem(fl->m0); in free_fl_buffers()
5347 write_lso_cpl(void *cpl, struct mbuf *m0) in write_lso_cpl() argument
5352 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_lso_cpl()
5353 m0->m_pkthdr.l4hlen > 0, in write_lso_cpl()
5355 __func__, m0)); in write_lso_cpl()
5359 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_lso_cpl()
5360 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_lso_cpl()
5361 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_lso_cpl()
5362 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_lso_cpl()
5368 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_lso_cpl()
5370 lso->len = htobe32(m0->m_pkthdr.len); in write_lso_cpl()
5376 write_tnl_lso_cpl(void *cpl, struct mbuf *m0) in write_tnl_lso_cpl() argument
5381 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && in write_tnl_lso_cpl()
5382 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && in write_tnl_lso_cpl()
5383 m0->m_pkthdr.inner_l5hlen > 0, in write_tnl_lso_cpl()
5385 __func__, m0)); in write_tnl_lso_cpl()
5386 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_tnl_lso_cpl()
5387 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, in write_tnl_lso_cpl()
5389 __func__, m0)); in write_tnl_lso_cpl()
5395 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5396 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | in write_tnl_lso_cpl()
5398 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5409 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + in write_tnl_lso_cpl()
5410 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + in write_tnl_lso_cpl()
5411 m0->m_pkthdr.l5hlen) | in write_tnl_lso_cpl()
5417 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | in write_tnl_lso_cpl()
5418 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | in write_tnl_lso_cpl()
5419 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); in write_tnl_lso_cpl()
5420 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) in write_tnl_lso_cpl()
5425 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); in write_tnl_lso_cpl()
5428 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); in write_tnl_lso_cpl()
5443 write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) in write_txpkt_vm_wr() argument
5455 M_ASSERTPKTHDR(m0); in write_txpkt_vm_wr()
5457 len16 = mbuf_len16(m0); in write_txpkt_vm_wr()
5458 pktlen = m0->m_pkthdr.len; in write_txpkt_vm_wr()
5460 if (needs_tso(m0)) in write_txpkt_vm_wr()
5482 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); in write_txpkt_vm_wr()
5484 if (needs_tso(m0)) { in write_txpkt_vm_wr()
5485 cpl = write_lso_cpl(wr + 1, m0); in write_txpkt_vm_wr()
5491 ctrl1 = csum_to_ctrl(sc, m0); in write_txpkt_vm_wr()
5496 if (needs_vlan_insertion(m0)) { in write_txpkt_vm_wr()
5498 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_vm_wr()
5521 write_gl_to_txd(txq, m0, &dst, 0); in write_txpkt_vm_wr()
5523 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_vm_wr()
5528 txsd->m = m0; in write_txpkt_vm_wr()
5542 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) in write_raw_wr() argument
5550 len16 = mbuf_len16(m0); in write_raw_wr()
5555 for (m = m0; m != NULL; m = m->m_next) in write_raw_wr()
5561 txsd->m = m0; in write_raw_wr()
5575 write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, in write_txpkt_wr() argument
5588 M_ASSERTPKTHDR(m0); in write_txpkt_wr()
5590 len16 = mbuf_len16(m0); in write_txpkt_wr()
5591 nsegs = mbuf_nsegs(m0); in write_txpkt_wr()
5592 pktlen = m0->m_pkthdr.len; in write_txpkt_wr()
5594 if (needs_tso(m0)) { in write_txpkt_wr()
5595 if (needs_vxlan_tso(m0)) in write_txpkt_wr()
5599 } else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && in write_txpkt_wr()
5620 if (needs_tso(m0)) { in write_txpkt_wr()
5621 if (needs_vxlan_tso(m0)) { in write_txpkt_wr()
5622 cpl = write_tnl_lso_cpl(wr + 1, m0); in write_txpkt_wr()
5625 cpl = write_lso_cpl(wr + 1, m0); in write_txpkt_wr()
5632 ctrl1 = csum_to_ctrl(sc, m0); in write_txpkt_wr()
5635 if (needs_vxlan_csum(m0)) in write_txpkt_wr()
5642 if (needs_vlan_insertion(m0)) { in write_txpkt_wr()
5644 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_txpkt_wr()
5660 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); in write_txpkt_wr()
5665 for (m = m0; m != NULL; m = m->m_next) { in write_txpkt_wr()
5680 txsd->m = m0; in write_txpkt_wr()
6630 struct mbuf *m0, int compl) in write_ethofld_wr() argument
6642 M_ASSERTPKTHDR(m0); in write_ethofld_wr()
6643 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && in write_ethofld_wr()
6644 m0->m_pkthdr.l4hlen > 0, in write_ethofld_wr()
6645 ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); in write_ethofld_wr()
6647 len16 = mbuf_eo_len16(m0); in write_ethofld_wr()
6648 nsegs = mbuf_eo_nsegs(m0); in write_ethofld_wr()
6649 pktlen = m0->m_pkthdr.len; in write_ethofld_wr()
6651 if (needs_tso(m0)) in write_ethofld_wr()
6653 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6661 if (needs_outer_udp_csum(m0)) { in write_ethofld_wr()
6663 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6664 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6665 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6673 MPASS(needs_outer_tcp_csum(m0)); in write_ethofld_wr()
6675 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; in write_ethofld_wr()
6676 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); in write_ethofld_wr()
6677 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; in write_ethofld_wr()
6678 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); in write_ethofld_wr()
6683 if (needs_tso(m0)) { in write_ethofld_wr()
6686 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6690 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - in write_ethofld_wr()
6692 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | in write_ethofld_wr()
6693 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); in write_ethofld_wr()
6694 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) in write_ethofld_wr()
6698 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); in write_ethofld_wr()
6710 MPASS(needs_outer_l4_csum(m0)); in write_ethofld_wr()
6711 ctrl1 = csum_to_ctrl(cst->adapter, m0); in write_ethofld_wr()
6714 if (needs_vlan_insertion(m0)) { in write_ethofld_wr()
6716 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); in write_ethofld_wr()
6727 m_copydata(m0, 0, immhdrs, (void *)p); in write_ethofld_wr()
6743 for (; m0 != NULL; m0 = m0->m_next) { in write_ethofld_wr()
6744 if (__predict_false(m0->m_len == 0)) in write_ethofld_wr()
6746 if (immhdrs >= m0->m_len) { in write_ethofld_wr()
6747 immhdrs -= m0->m_len; in write_ethofld_wr()
6750 if (m0->m_flags & M_EXTPG) in write_ethofld_wr()
6751 sglist_append_mbuf_epg(&sg, m0, in write_ethofld_wr()
6752 mtod(m0, vm_offset_t), m0->m_len); in write_ethofld_wr()
6754 sglist_append(&sg, mtod(m0, char *) + immhdrs, in write_ethofld_wr()
6755 m0->m_len - immhdrs); in write_ethofld_wr()
6841 ethofld_transmit(if_t ifp, struct mbuf *m0) in ethofld_transmit() argument
6846 MPASS(m0->m_nextpkt == NULL); in ethofld_transmit()
6847 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG); in ethofld_transmit()
6848 MPASS(m0->m_pkthdr.snd_tag != NULL); in ethofld_transmit()
6849 cst = mst_to_crt(m0->m_pkthdr.snd_tag); in ethofld_transmit()
6862 if (M_HASHTYPE_ISHASH(m0)) in ethofld_transmit()
6863 rss_hash = m0->m_pkthdr.flowid; in ethofld_transmit()
6874 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { in ethofld_transmit()
6879 mbufq_enqueue(&cst->pending_tx, m0); in ethofld_transmit()
6880 cst->plen += m0->m_pkthdr.len; in ethofld_transmit()
6901 ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) in ethofld_fw4_ack() argument