Lines Matching refs:rq

35 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,  in mlx5e_alloc_rx_wqe()  argument
45 if (rq->mbuf[ix].mbuf != NULL) in mlx5e_alloc_rx_wqe()
48 mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz); in mlx5e_alloc_rx_wqe()
52 mb->m_len = rq->wqe_sz; in mlx5e_alloc_rx_wqe()
53 mb->m_pkthdr.len = rq->wqe_sz; in mlx5e_alloc_rx_wqe()
55 for (i = 1; i < rq->nsegs; i++) { in mlx5e_alloc_rx_wqe()
56 mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0, rq->wqe_sz); in mlx5e_alloc_rx_wqe()
61 mb->m_len = rq->wqe_sz; in mlx5e_alloc_rx_wqe()
62 mb_head->m_pkthdr.len += rq->wqe_sz; in mlx5e_alloc_rx_wqe()
70 err = mlx5_accel_ipsec_rx_tag_add(rq->ifp, &rq->mbuf[ix]); in mlx5e_alloc_rx_wqe()
73 err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map, in mlx5e_alloc_rx_wqe()
78 bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map); in mlx5e_alloc_rx_wqe()
89 for (; i < rq->nsegs; i++) { in mlx5e_alloc_rx_wqe()
94 rq->mbuf[ix].mbuf = mb; in mlx5e_alloc_rx_wqe()
95 rq->mbuf[ix].data = mb->m_data; in mlx5e_alloc_rx_wqe()
97 bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map, in mlx5e_alloc_rx_wqe()
107 mlx5e_post_rx_wqes(struct mlx5e_rq *rq) in mlx5e_post_rx_wqes() argument
109 if (unlikely(rq->enabled == 0)) in mlx5e_post_rx_wqes()
112 while (!mlx5_wq_ll_is_full(&rq->wq)) { in mlx5e_post_rx_wqes()
113 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); in mlx5e_post_rx_wqes()
115 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) { in mlx5e_post_rx_wqes()
116 callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq); in mlx5e_post_rx_wqes()
119 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index)); in mlx5e_post_rx_wqes()
125 mlx5_wq_ll_update_db_record(&rq->wq); in mlx5e_post_rx_wqes()
325 mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, in mlx5e_build_rx_mbuf() argument
328 if_t ifp = rq->ifp; in mlx5e_build_rx_mbuf()
337 rq->stats.lro_packets++; in mlx5e_build_rx_mbuf()
338 rq->stats.lro_bytes += cqe_bcnt; in mlx5e_build_rx_mbuf()
397 mb->m_pkthdr.flowid = rq->ix; in mlx5e_build_rx_mbuf()
422 rq->stats.csum_none++; in mlx5e_build_rx_mbuf()
433 rq->stats.csum_none++; in mlx5e_build_rx_mbuf()
441 c = container_of(rq, struct mlx5e_channel, rq); in mlx5e_build_rx_mbuf()
461 rq->stats.decrypted_ok_packets++; in mlx5e_build_rx_mbuf()
464 rq->stats.decrypted_error_packets++; in mlx5e_build_rx_mbuf()
547 mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget) in mlx5e_poll_rx_cq() argument
552 CURVNET_SET_QUIET(if_getvnet(rq->ifp)); in mlx5e_poll_rx_cq()
553 pfil = rq->channel->priv->pfil; in mlx5e_poll_rx_cq()
562 cqe = mlx5e_get_cqe(&rq->cq); in mlx5e_poll_rx_cq()
567 mlx5e_decompress_cqes(&rq->cq); in mlx5e_poll_rx_cq()
569 mlx5_cqwq_pop(&rq->cq.wq); in mlx5e_poll_rx_cq()
573 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); in mlx5e_poll_rx_cq()
576 bus_dmamap_sync(rq->dma_tag, in mlx5e_poll_rx_cq()
577 rq->mbuf[wqe_counter].dma_map, in mlx5e_poll_rx_cq()
581 mlx5e_dump_err_cqe(&rq->cq, rq->rqn, (const void *)cqe); in mlx5e_poll_rx_cq()
582 rq->stats.wqe_err++; in mlx5e_poll_rx_cq()
587 rv = pfil_mem_in(rq->channel->priv->pfil, in mlx5e_poll_rx_cq()
588 rq->mbuf[wqe_counter].data, seglen, rq->ifp, &mb); in mlx5e_poll_rx_cq()
598 rq->stats.packets++; in mlx5e_poll_rx_cq()
625 bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t), in mlx5e_poll_rx_cq()
628 mb = rq->mbuf[wqe_counter].mbuf; in mlx5e_poll_rx_cq()
629 rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */ in mlx5e_poll_rx_cq()
631 bus_dmamap_unload(rq->dma_tag, in mlx5e_poll_rx_cq()
632 rq->mbuf[wqe_counter].dma_map); in mlx5e_poll_rx_cq()
635 mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter], in mlx5e_poll_rx_cq()
637 rq->stats.bytes += byte_cnt; in mlx5e_poll_rx_cq()
638 rq->stats.packets++; in mlx5e_poll_rx_cq()
640 mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp); in mlx5e_poll_rx_cq()
644 tcp_lro_queue_mbuf(&rq->lro, mb); in mlx5e_poll_rx_cq()
647 (if_getcapenable(rq->ifp) & IFCAP_LRO) == 0 || in mlx5e_poll_rx_cq()
648 rq->lro.lro_cnt == 0 || in mlx5e_poll_rx_cq()
649 tcp_lro_rx(&rq->lro, mb, 0) != 0) { in mlx5e_poll_rx_cq()
650 if_input(rq->ifp, mb); in mlx5e_poll_rx_cq()
654 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, in mlx5e_poll_rx_cq()
659 mlx5_cqwq_update_db_record(&rq->cq.wq); in mlx5e_poll_rx_cq()
669 struct mlx5e_channel *c = container_of(mcq, struct mlx5e_channel, rq.cq.mcq); in mlx5e_rx_cq_comp()
670 struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq); in mlx5e_rx_cq_comp() local
683 mb->m_data[14] = rq->ix; in mlx5e_rx_cq_comp()
684 mb->m_pkthdr.rcvif = rq->ifp; in mlx5e_rx_cq_comp()
685 mb->m_pkthdr.leaf_rcvif = rq->ifp; in mlx5e_rx_cq_comp()
686 if_input(rq->ifp, mb); in mlx5e_rx_cq_comp()
699 mtx_lock(&rq->mtx); in mlx5e_rx_cq_comp()
700 if (rq->enabled == 0) in mlx5e_rx_cq_comp()
702 rq->processing++; in mlx5e_rx_cq_comp()
709 if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) != in mlx5e_rx_cq_comp()
715 mlx5e_post_rx_wqes(rq); in mlx5e_rx_cq_comp()
717 mlx5e_post_rx_wqes(rq); in mlx5e_rx_cq_comp()
719 if (rq->dim.mode != NET_DIM_CQ_PERIOD_MODE_DISABLED) in mlx5e_rx_cq_comp()
720 net_dim(&rq->dim, rq->stats.packets, rq->stats.bytes); in mlx5e_rx_cq_comp()
721 mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock)); in mlx5e_rx_cq_comp()
722 tcp_lro_flush_all(&rq->lro); in mlx5e_rx_cq_comp()
723 rq->processing--; in mlx5e_rx_cq_comp()
725 mtx_unlock(&rq->mtx); in mlx5e_rx_cq_comp()