Lines Matching +full:1 +full:mb

7  * 1. Redistributions of source code must retain the above copyright
39 struct mbuf *mb; in mlx5e_alloc_rx_wqe() local
48 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz); in mlx5e_alloc_rx_wqe()
49 if (unlikely(mb == NULL)) in mlx5e_alloc_rx_wqe()
52 mb->m_len = rq->wqe_sz; in mlx5e_alloc_rx_wqe()
53 mb->m_pkthdr.len = rq->wqe_sz; in mlx5e_alloc_rx_wqe()
55 for (i = 1; i < rq->nsegs; i++) { in mlx5e_alloc_rx_wqe()
56 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0, rq->wqe_sz); in mlx5e_alloc_rx_wqe()
57 if (unlikely(mb == NULL)) { in mlx5e_alloc_rx_wqe()
61 mb->m_len = rq->wqe_sz; in mlx5e_alloc_rx_wqe()
65 mb = mb_head; in mlx5e_alloc_rx_wqe()
68 m_adj(mb, MLX5E_NET_IP_ALIGN); in mlx5e_alloc_rx_wqe()
74 mb, segs, &nsegs, BUS_DMA_NOWAIT); in mlx5e_alloc_rx_wqe()
85 for (i = 1; i != nsegs; i++) { in mlx5e_alloc_rx_wqe()
94 rq->mbuf[ix].mbuf = mb; in mlx5e_alloc_rx_wqe()
95 rq->mbuf[ix].data = mb->m_data; in mlx5e_alloc_rx_wqe()
102 m_freem(mb); in mlx5e_alloc_rx_wqe()
116 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq); in mlx5e_post_rx_wqes()
150 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) in mlx5e_lro_update_hdr() argument
164 eh = mtod(mb, struct ether_header *); in mlx5e_lro_update_hdr()
176 ip4 = (struct ip *)(eh + 1); in mlx5e_lro_update_hdr()
177 th = (struct tcphdr *)(ip4 + 1); in mlx5e_lro_update_hdr()
180 ip6 = (struct ip6_hdr *)(eh + 1); in mlx5e_lro_update_hdr()
181 th = (struct tcphdr *)(ip6 + 1); in mlx5e_lro_update_hdr()
187 ts_ptr = (uint32_t *)(th + 1); in mlx5e_lro_update_hdr()
217 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); in mlx5e_lro_update_hdr()
226 ip4->ip_sum = in_cksum_skip(mb, (ip4->ip_hl << 2) + in mlx5e_lro_update_hdr()
251 tcp_csum = ~in6_cksum_partial_l2(mb, IPPROTO_TCP, in mlx5e_lro_update_hdr()
326 struct mbuf *mb, struct mlx5e_rq_mbuf *mr, u32 cqe_bcnt) in mlx5e_build_rx_mbuf() argument
335 if (lro_num_seg > 1) { in mlx5e_build_rx_mbuf()
336 mlx5e_lro_update_hdr(mb, cqe); in mlx5e_build_rx_mbuf()
341 mb->m_pkthdr.len = cqe_bcnt; in mlx5e_build_rx_mbuf()
342 for (mb_head = mb; mb != NULL; mb = mb->m_next) { in mlx5e_build_rx_mbuf()
343 if (mb->m_len > cqe_bcnt) in mlx5e_build_rx_mbuf()
344 mb->m_len = cqe_bcnt; in mlx5e_build_rx_mbuf()
345 cqe_bcnt -= mb->m_len; in mlx5e_build_rx_mbuf()
347 if (likely(mb->m_next != NULL)) { in mlx5e_build_rx_mbuf()
349 m_freem(mb->m_next); in mlx5e_build_rx_mbuf()
350 mb->m_next = NULL; in mlx5e_build_rx_mbuf()
356 mb = mb_head; in mlx5e_build_rx_mbuf()
360 mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result); in mlx5e_build_rx_mbuf()
367 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4); in mlx5e_build_rx_mbuf()
370 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4); in mlx5e_build_rx_mbuf()
373 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4); in mlx5e_build_rx_mbuf()
377 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6); in mlx5e_build_rx_mbuf()
380 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6); in mlx5e_build_rx_mbuf()
383 M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6); in mlx5e_build_rx_mbuf()
386 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH); in mlx5e_build_rx_mbuf()
390 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH); in mlx5e_build_rx_mbuf()
394 M_HASHTYPE_SETINNER(mb); in mlx5e_build_rx_mbuf()
397 mb->m_pkthdr.flowid = rq->ix; in mlx5e_build_rx_mbuf()
398 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE); in mlx5e_build_rx_mbuf()
400 mb->m_pkthdr.rcvif = ifp; in mlx5e_build_rx_mbuf()
401 mb->m_pkthdr.leaf_rcvif = ifp; in mlx5e_build_rx_mbuf()
411 mb->m_pkthdr.csum_flags |= in mlx5e_build_rx_mbuf()
415 mb->m_pkthdr.csum_data = htons(0xffff); in mlx5e_build_rx_mbuf()
418 mb->m_pkthdr.csum_flags |= in mlx5e_build_rx_mbuf()
428 mb->m_pkthdr.csum_flags = in mlx5e_build_rx_mbuf()
431 mb->m_pkthdr.csum_data = htons(0xffff); in mlx5e_build_rx_mbuf()
437 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info); in mlx5e_build_rx_mbuf()
438 mb->m_flags |= M_VLANTAG; in mlx5e_build_rx_mbuf()
450 mb->m_flags |= M_TSTMP_HPREC; in mlx5e_build_rx_mbuf()
453 mb->m_pkthdr.rcv_tstmp = tstmp; in mlx5e_build_rx_mbuf()
454 mb->m_flags |= M_TSTMP; in mlx5e_build_rx_mbuf()
460 mb->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; in mlx5e_build_rx_mbuf()
470 mlx5e_accel_ipsec_handle_rx(ifp, mb, cqe, mr); in mlx5e_build_rx_mbuf()
506 (((cq->wq.cc + i) >> cq->wq.log_sz) & 1); in mlx5e_decompress_cqe()
531 mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array); in mlx5e_decompress_cqes()
557 struct mbuf *mb; in mlx5e_poll_rx_cq() local
588 rq->mbuf[wqe_counter].data, seglen, rq->ifp, &mb); in mlx5e_poll_rx_cq()
619 (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) { in mlx5e_poll_rx_cq()
621 mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN; in mlx5e_poll_rx_cq()
623 mb->m_data += MLX5E_NET_IP_ALIGN; in mlx5e_poll_rx_cq()
625 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t), in mlx5e_poll_rx_cq()
628 mb = rq->mbuf[wqe_counter].mbuf; in mlx5e_poll_rx_cq()
635 mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter], in mlx5e_poll_rx_cq()
640 mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp); in mlx5e_poll_rx_cq()
644 tcp_lro_queue_mbuf(&rq->lro, mb); in mlx5e_poll_rx_cq()
646 if (mb->m_pkthdr.csum_flags == 0 || in mlx5e_poll_rx_cq()
649 tcp_lro_rx(&rq->lro, mb, 0) != 0) { in mlx5e_poll_rx_cq()
650 if_input(rq->ifp, mb); in mlx5e_poll_rx_cq()
677 struct mbuf *mb = m_gethdr(M_NOWAIT, MT_DATA); in mlx5e_rx_cq_comp() local
679 if (mb != NULL) { in mlx5e_rx_cq_comp()
681 mb->m_pkthdr.len = mb->m_len = 15; in mlx5e_rx_cq_comp()
682 memset(mb->m_data, 255, 14); in mlx5e_rx_cq_comp()
683 mb->m_data[14] = rq->ix; in mlx5e_rx_cq_comp()
684 mb->m_pkthdr.rcvif = rq->ifp; in mlx5e_rx_cq_comp()
685 mb->m_pkthdr.leaf_rcvif = rq->ifp; in mlx5e_rx_cq_comp()
686 if_input(rq->ifp, mb); in mlx5e_rx_cq_comp()
708 while (1) { in mlx5e_rx_cq_comp()