Lines Matching refs:rq

165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
166 static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
170 static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
1509 oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
1511 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1518 if (rq->ring->cidx == rq->ring->pidx) {
1523 pd = &rq->pckts[rq->ring->cidx];
1525 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1526 bus_dmamap_unload(rq->tag, pd->map);
1527 RING_GET(rq->ring, 1);
1528 rq->pending--;
1530 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1537 if(rq->islro)
1543 if(rq->islro)
1552 if(rq->islro) {
1574 oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
1576 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1603 cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1604 if(cq_info.pkt_size % rq->cfg.frag_size)
1607 oce_rx_mbuf_chain(rq, &cq_info, &m);
1618 if (rq->queue_index)
1619 m->m_pkthdr.flowid = (rq->queue_index - 1);
1621 m->m_pkthdr.flowid = rq->queue_index;
1646 rq->rx_stats.rx_pkts++;
1647 rq->rx_stats.rx_bytes += cq_info.pkt_size;
1648 rq->rx_stats.rx_frags += cq_info.num_frags;
1649 rq->rx_stats.rx_ucast_pkts++;
1655 oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1657 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1670 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1675 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1691 oce_rx_mbuf_chain(rq, &cq_info, &m);
1695 if (rq->queue_index)
1696 m->m_pkthdr.flowid = (rq->queue_index - 1);
1698 m->m_pkthdr.flowid = rq->queue_index;
1726 (rq->lro.lro_cnt != 0)) {
1727 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1728 rq->lro_pkts_queued ++;
1740 rq->rx_stats.rx_pkts++;
1741 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1742 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1744 rq->rx_stats.rx_mcast_pkts++;
1746 rq->rx_stats.rx_ucast_pkts++;
1753 oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
1757 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1760 if (rq->ring->cidx == rq->ring->pidx) {
1765 pd = &rq->pckts[rq->ring->cidx];
1766 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1767 bus_dmamap_unload(rq->tag, pd->map);
1773 RING_GET(rq->ring, 1);
1774 rq->pending--;
1814 oce_rx_flush_lro(struct oce_rq *rq)
1816 struct lro_ctrl *lro = &rq->lro;
1817 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1823 rq->lro_pkts_queued = 0;
1835 lro = &sc->rq[i]->lro;
1854 lro = &sc->rq[i]->lro;
1862 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1864 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1876 pd = &rq->pckts[rq->ring->pidx];
1884 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1886 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1901 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1903 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1907 RING_PUT(rq->ring, 1);
1909 rq->pending++;
1915 rxdb_reg.bits.qid = rq->rq_id;
1916 if(rq->islro) {
1917 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1926 rxdb_reg.bits.qid = rq->rq_id;
1928 if(rq->islro) {
1929 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1942 oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
1945 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1947 if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1948 oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1950 if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1951 oce_alloc_rx_bufs(rq, 64);
1961 struct oce_rq *rq = (struct oce_rq *)arg;
1962 struct oce_cq *cq = rq->cq;
1963 POCE_SOFTC sc = rq->parent;
1968 LOCK(&rq->rx_lock);
1973 /* we should not get singleton cqe after cqe1 on same rq */
1974 if(rq->cqe_firstpart != NULL) {
1979 rq->rx_stats.rxcp_err++;
1982 oce_rx_lro(rq, cqe, NULL);
1983 rq->rx_stats.rx_compl++;
1990 /* we should not get cqe1 after cqe1 on same rq */
1991 if(rq->cqe_firstpart != NULL) {
1995 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
2000 rq->rx_stats.rxcp_err++;
2004 if(rq->cqe_firstpart == NULL) {
2008 oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2010 rq->rx_stats.rx_compl++;
2011 rq->cqe_firstpart->valid = 0;
2013 rq->cqe_firstpart = NULL;
2024 oce_check_rx_bufs(sc, num_cqes, rq);
2026 UNLOCK(&rq->rx_lock);
2035 struct oce_rq *rq = (struct oce_rq *)arg;
2036 struct oce_cq *cq = rq->cq;
2037 POCE_SOFTC sc = rq->parent;
2042 if(rq->islro) {
2047 LOCK(&rq->rx_lock);
2055 oce_rx(rq, cqe);
2057 rq->rx_stats.rxcp_err++;
2060 oce_rx(rq, cqe);
2062 rq->rx_stats.rx_compl++;
2066 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2067 oce_rx_flush_lro(rq);
2083 oce_rx_flush_lro(rq);
2086 oce_check_rx_bufs(sc, num_cqes, rq);
2087 UNLOCK(&rq->rx_lock);
2308 struct oce_rq *rq;
2324 rq = sc->rq[0];
2325 rxpkts = rq->rx_stats.rx_pkts;
2329 rq = sc->rq[i + 1];
2330 rxpkts += rq->rx_stats.rx_pkts;
2512 struct oce_rq *rq;
2541 for_all_rq_queues(sc, rq, i)
2542 oce_drain_rq_cq(rq);
2559 struct oce_rq *rq;
2569 for_all_rq_queues(sc, rq, i) {
2570 rc = oce_start_rq(rq);