Lines Matching refs:mb
90 mlx5e_select_queue_by_send_tag(if_t ifp, struct mbuf *mb)
95 mb_tag = mb->m_pkthdr.snd_tag;
137 mlx5e_select_queue(if_t ifp, struct mbuf *mb)
145 if (mb->m_flags & M_VLANTAG) {
146 tc = (mb->m_pkthdr.ether_vtag >> 13);
156 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
160 if (rss_hash2bucket(mb->m_pkthdr.flowid,
161 M_HASHTYPE_GET(mb), &temp) == 0)
165 ch = (mb->m_pkthdr.flowid % 128) % ch;
168 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
179 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
185 eh = mtod(mb, struct ether_vlan_header *);
186 if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
189 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
216 if (mb->m_pkthdr.len < min_inline)
221 return (MIN(mb->m_pkthdr.len, sq->max_inline));
237 mlx5e_get_full_header_size(const struct mbuf *mb, const struct tcphdr **ppth)
247 eh = mtod(mb, const struct ether_vlan_header *);
248 if (unlikely(mb->m_len < ETHER_HDR_LEN))
251 if (unlikely(mb->m_len < ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
262 ip = (const struct ip *)(mb->m_data + eth_hdr_len);
263 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip)))
280 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len);
281 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6)))
299 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) {
300 const struct mbuf *m_th = mb->m_next;
301 if (unlikely(mb->m_len != eth_hdr_len ||
306 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len);
316 if (unlikely(mb->m_pkthdr.len < eth_hdr_len))
331 mlx5e_parse_mbuf_chain(const struct mbuf **mb, int *poffset, int eth_hdr_len,
334 if (unlikely(mb[0]->m_len == eth_hdr_len)) {
336 if (unlikely((mb[0] = mb[0]->m_next) == NULL))
339 if (unlikely(mb[0]->m_len < eth_hdr_len - poffset[0] + min_len))
341 return (mb[0]->m_data + eth_hdr_len - poffset[0]);
353 mlx5e_get_vxlan_header_size(const struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
370 pkt_hdr_len = mb->m_pkthdr.len;
371 has_outer_vlan_tag = (mb->m_flags & M_VLANTAG) != 0;
374 eh = mtod(mb, const struct ether_vlan_header *);
375 if (unlikely(mb->m_len < ETHER_HDR_LEN))
379 if (unlikely(mb->m_len < ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN))
390 ip4 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
401 udp = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
410 ip6 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
420 udp = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
439 if (unlikely(mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
445 eh = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len, ETHER_HDR_LEN);
449 if (unlikely(mb->m_len < eth_hdr_len - offset + ETHER_HDR_LEN +
462 ip4 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
473 ip6 = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
495 udp = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
505 th = mlx5e_parse_mbuf_chain(&mb, &offset, eth_hdr_len,
559 struct mbuf *mb;
565 mb = *mbp;
570 sq->mbuf[pi].num_bytes = mb->m_pkthdr.len;
575 mb, segs, &nsegs, BUS_DMA_NOWAIT);
580 mb = m_defrag(*mbp, M_NOWAIT);
581 if (mb == NULL) {
582 mb = *mbp;
587 mb, segs, &nsegs, BUS_DMA_NOWAIT);
605 m_freem(mb);
614 m_freem(mb);
667 sq->mbuf[pi].mbuf = mb;
680 m_freem(mb);
695 struct mbuf *mb;
727 mb = *mbp;
746 mb = *mbp;
748 mlx5e_accel_ipsec_handle_tx(mb, wqe);
752 ETHER_BPF_MTAP(ifp, mb);
754 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
757 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
763 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
765 u32 mss = mb->m_pkthdr.tso_segsz;
771 args.ihs = mlx5e_get_full_header_size(mb, NULL);
776 payload_len = mb->m_pkthdr.len - args.ihs;
786 } else if (mb->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN) {
788 if (mb->m_pkthdr.csum_flags & (CSUM_INNER_IP_TSO |
791 u32 mss = mb->m_pkthdr.tso_segsz;
798 args.ihs = mlx5e_get_vxlan_header_size(mb, wqe,
810 payload_len = mb->m_pkthdr.len - args.ihs;
826 if (mb->m_pkthdr.csum_flags &
834 } else if (mb->m_pkthdr.csum_flags & CSUM_INNER_IP) {
844 args.ihs = mlx5e_get_vxlan_header_size(mb, wqe,
853 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
862 args.ihs = mlx5e_get_full_header_size(mb, NULL);
864 args.ihs = mlx5e_get_l2_header_size(sq, mb);
867 args.ihs = mlx5e_get_l2_header_size(sq, mb);
872 if ((mb->m_flags & M_VLANTAG) != 0 &&
876 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
878 } else if ((mb->m_flags & M_VLANTAG) == 0 &&
884 args.ihs = mlx5e_get_l2_header_size(sq, mb);
890 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
895 } else if ((mb->m_flags & M_VLANTAG) != 0) {
901 if (mb->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_ENCAP_VXLAN)) {
910 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
911 m_adj(mb, ETHER_HDR_LEN);
915 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
917 m_copydata(mb, 0, args.ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
918 m_adj(mb, args.ihs - ETHER_HDR_LEN);
925 if (unlikely(mb->m_pkthdr.csum_flags & (CSUM_TSO |
932 m_copydata(mb, 0, args.ihs, wqe->eth.inline_hdr_start);
933 m_adj(mb, args.ihs);
945 mb, segs, &nsegs, BUS_DMA_NOWAIT);
950 mb = m_defrag(*mbp, M_NOWAIT);
951 if (mb == NULL) {
952 mb = *mbp;
957 mb, segs, &nsegs, BUS_DMA_NOWAIT);
970 m_freem(mb);
971 mb = NULL;
998 sq->mbuf[pi].mbuf = mb;
1017 m_freem(mb);
1035 struct mbuf *mb;
1072 mb = sq->mbuf[ci].mbuf;
1077 if (unlikely(mb == NULL)) {
1086 m_freem(mb);
1105 mlx5e_xmit_locked(if_t ifp, struct mlx5e_sq *sq, struct mbuf *mb)
1111 m_freem(mb);
1116 if (mlx5e_sq_xmit(sq, &mb) != 0) {
1118 m_freem(mb);
1141 mlx5e_xmit(if_t ifp, struct mbuf *mb)
1146 if (mb->m_pkthdr.csum_flags & CSUM_SND_TAG) {
1147 MPASS(mb->m_pkthdr.snd_tag->ifp == ifp);
1148 sq = mlx5e_select_queue_by_send_tag(ifp, mb);
1154 sq = mlx5e_select_queue(ifp, mb);
1157 m_freem(mb);
1165 ret = mlx5e_xmit_locked(ifp, sq, mb);