Lines Matching +full:1 +full:mb

77 	    1,					/* any alignment */  in mlx4_en_create_tx_ring()
91 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
145 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
173 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
202 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
225 ring->last_nr_txbb = 1; in mlx4_en_activate_tx_ring()
233 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
254 struct mbuf *mb, int len, __be32 owner_bit) in mlx4_en_store_inline_lso_data() argument
259 m_copydata(mb, 0, len, inl + 4); in mlx4_en_store_inline_lso_data()
294 struct mbuf *mb; in mlx4_en_free_tx_desc() local
297 mb = tx_info->mb; in mlx4_en_free_tx_desc()
299 if (mb == NULL) in mlx4_en_free_tx_desc()
306 m_freem(mb); in mlx4_en_free_tx_desc()
432 mod_timer(&cq->timer, jiffies + 1); in mlx4_en_tx_irq()
477 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) in mlx4_en_xmit_poll()
485 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) in mlx4_en_get_inline_hdr_size() argument
490 retval = MIN(ring->inline_thold, mb->m_len); in mlx4_en_get_inline_hdr_size()
494 retval = MIN(ring->inline_thold, mb->m_pkthdr.len); in mlx4_en_get_inline_hdr_size()
499 mlx4_en_get_header_size(struct mbuf *mb) in mlx4_en_get_header_size() argument
509 eh = mtod(mb, struct ether_vlan_header *); in mlx4_en_get_header_size()
510 if (mb->m_len < ETHER_HDR_LEN) in mlx4_en_get_header_size()
519 if (mb->m_len < eth_hdr_len) in mlx4_en_get_header_size()
523 ip = (struct ip *)(mb->m_data + eth_hdr_len); in mlx4_en_get_header_size()
524 if (mb->m_len < eth_hdr_len + sizeof(*ip)) in mlx4_en_get_header_size()
532 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); in mlx4_en_get_header_size()
533 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) in mlx4_en_get_header_size()
542 if (mb->m_len < eth_hdr_len + sizeof(*th)) in mlx4_en_get_header_size()
544 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); in mlx4_en_get_header_size()
547 if (mb->m_len < eth_hdr_len) in mlx4_en_get_header_size()
554 struct mbuf *mb, int len, __be32 owner_bit) in mlx4_en_store_inline_data() argument
560 m_copydata(mb, 0, len, inl + 4); in mlx4_en_store_inline_data()
564 m_copydata(mb, 0, len, inl + 4); in mlx4_en_store_inline_data()
567 m_copydata(mb, 0, spc, inl + 4); in mlx4_en_store_inline_data()
568 m_copydata(mb, spc, len - spc, inl + 8 + spc); in mlx4_en_store_inline_data()
583 SET_BYTE_COUNT((1U << 31) | MIN_PKT_LEN); in mlx4_en_store_inline_header()
586 SET_BYTE_COUNT((1U << 31) | len); in mlx4_en_store_inline_header()
589 SET_BYTE_COUNT((1U << 31) | (len - spc)); in mlx4_en_store_inline_header()
592 SET_BYTE_COUNT((1U << 31) | spc); in mlx4_en_store_inline_header()
607 u16 mlx4_en_select_queue(if_t dev, struct mbuf *mb) in mlx4_en_select_queue() argument
614 #if (MLX4_EN_NUM_UP > 1) in mlx4_en_select_queue()
616 if (mb->m_flags & M_VLANTAG) { in mlx4_en_select_queue()
617 u32 vlan_tag = mb->m_pkthdr.ether_vtag; in mlx4_en_select_queue()
621 queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom); in mlx4_en_select_queue()
645 struct mbuf *mb = *mbp; in mlx4_en_xmit() local
672 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); in mlx4_en_xmit()
676 (u32) (ring->prod - ring->cons - 1)); in mlx4_en_xmit()
679 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); in mlx4_en_xmit()
692 ETHER_BPF_MTAP(ifp, mb); in mlx4_en_xmit()
697 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) in mlx4_en_xmit()
700 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | in mlx4_en_xmit()
711 if (mb->m_flags & M_VLANTAG) { in mlx4_en_xmit()
712 tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag); in mlx4_en_xmit()
725 m_copydata(mb, 0, 2, __DEVOLATILE(void *, &tx_desc->ctrl.srcrb_flags16[0])); in mlx4_en_xmit()
726 m_copydata(mb, 2, 4, __DEVOLATILE(void *, &tx_desc->ctrl.imm)); in mlx4_en_xmit()
733 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { in mlx4_en_xmit()
735 u32 mss = mb->m_pkthdr.tso_segsz; in mlx4_en_xmit()
740 ihs = mlx4_en_get_header_size(mb); in mlx4_en_xmit()
747 payload_len = mb->m_pkthdr.len - ihs; in mlx4_en_xmit()
749 num_pkts = 1; in mlx4_en_xmit()
759 mb, ihs, owner_bit); in mlx4_en_xmit()
763 ihs = mlx4_en_get_inline_hdr_size(ring, mb); in mlx4_en_xmit()
765 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); in mlx4_en_xmit()
771 mb, ihs, owner_bit); in mlx4_en_xmit()
773 m_adj(mb, ihs); in mlx4_en_xmit()
776 mb, segs, &nr_segs, BUS_DMA_NOWAIT); in mlx4_en_xmit()
780 m = m_defrag(mb, M_NOWAIT); in mlx4_en_xmit()
785 mb = m; in mlx4_en_xmit()
788 mb, segs, &nr_segs, BUS_DMA_NOWAIT); in mlx4_en_xmit()
803 m_freem(mb); in mlx4_en_xmit()
804 mb = NULL; in mlx4_en_xmit()
817 if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) { in mlx4_en_xmit()
822 pad = (DS_FACT * (pad + 1)); in mlx4_en_xmit()
837 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) + in mlx4_en_xmit()
845 tx_info->mb = mb; in mlx4_en_xmit()
859 dseg->byte_count = SET_BYTE_COUNT((1U << 31)|0); in mlx4_en_xmit()
869 dseg->byte_count = SET_BYTE_COUNT((1U << 31)|0); in mlx4_en_xmit()
925 m_freem(mb); in mlx4_en_xmit()
930 mlx4_en_transmit_locked(if_t ifp, int tx_ind, struct mbuf *mb) in mlx4_en_transmit_locked() argument
938 m_freem(mb); in mlx4_en_transmit_locked()
942 if (mlx4_en_xmit(priv, tx_ind, &mb) != 0) { in mlx4_en_transmit_locked()
944 m_freem(mb); in mlx4_en_transmit_locked()
985 if_inc_counter(dev, IFCOUNTER_IQDROPS, 1); in mlx4_en_transmit()