Lines Matching refs:rxq

564 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)  in mana_get_rxbuf_pre()  argument
566 struct net_device *ndev = rxq->ndev; in mana_get_rxbuf_pre()
578 if (mpc->rxbpre_datasize != rxq->datasize) { in mana_get_rxbuf_pre()
580 mpc->rxbpre_datasize, rxq->datasize); in mana_get_rxbuf_pre()
584 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { in mana_get_rxbuf_pre()
586 mpc->rxbpre_alloc_size, rxq->alloc_size); in mana_get_rxbuf_pre()
590 if (mpc->rxbpre_headroom != rxq->headroom) { in mana_get_rxbuf_pre()
592 mpc->rxbpre_headroom, rxq->headroom); in mana_get_rxbuf_pre()
1318 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1324 init_completion(&rxq->fence_event); in mana_fence_rq()
1328 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1334 rxq->rxq_idx, err); in mana_fence_rq()
1341 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
1348 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { in mana_fence_rq()
1350 rxq->rxq_idx); in mana_fence_rq()
1360 struct mana_rxq *rxq; in mana_fence_rqs() local
1364 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1365 err = mana_fence_rq(apc, rxq); in mana_fence_rqs()
1521 static void mana_post_pkt_rxq(struct mana_rxq *rxq) in mana_post_pkt_rxq() argument
1527 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
1528 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
1529 rxq->buf_index = 0; in mana_post_pkt_rxq()
1531 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
1533 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1541 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, in mana_build_skb() argument
1544 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); in mana_build_skb()
1555 skb_reserve(skb, rxq->headroom); in mana_build_skb()
1562 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) in mana_rx_skb() argument
1564 struct mana_stats_rx *rx_stats = &rxq->stats; in mana_rx_skb()
1565 struct net_device *ndev = rxq->ndev; in mana_rx_skb()
1567 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb()
1574 rxq->rx_cq.work_done++; in mana_rx_skb()
1575 napi = &rxq->rx_cq.napi; in mana_rx_skb()
1582 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); in mana_rx_skb()
1584 if (act == XDP_REDIRECT && !rxq->xdp_rc) in mana_rx_skb()
1590 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); in mana_rx_skb()
1649 page_pool_recycle_direct(rxq->page_pool, in mana_rx_skb()
1652 WARN_ON_ONCE(rxq->xdp_save_va); in mana_rx_skb()
1654 rxq->xdp_save_va = buf_va; in mana_rx_skb()
1662 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, in mana_get_rxfrag() argument
1671 if (rxq->xdp_save_va) { in mana_get_rxfrag()
1672 va = rxq->xdp_save_va; in mana_get_rxfrag()
1673 rxq->xdp_save_va = NULL; in mana_get_rxfrag()
1674 } else if (rxq->alloc_size > PAGE_SIZE) { in mana_get_rxfrag()
1676 va = napi_alloc_frag(rxq->alloc_size); in mana_get_rxfrag()
1678 va = netdev_alloc_frag(rxq->alloc_size); in mana_get_rxfrag()
1685 if (compound_order(page) < get_order(rxq->alloc_size)) { in mana_get_rxfrag()
1690 page = page_pool_dev_alloc_pages(rxq->page_pool); in mana_get_rxfrag()
1698 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, in mana_get_rxfrag()
1702 page_pool_put_full_page(rxq->page_pool, page, false); in mana_get_rxfrag()
1713 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, in mana_refill_rx_oob() argument
1721 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); in mana_refill_rx_oob()
1725 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, in mana_refill_rx_oob()
1735 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, in mana_process_rx_cqe() argument
1739 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_process_rx_cqe()
1740 struct net_device *ndev = rxq->ndev; in mana_process_rx_cqe()
1756 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1766 complete(&rxq->fence_event); in mana_process_rx_cqe()
1781 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1785 curr = rxq->buf_index; in mana_process_rx_cqe()
1786 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1789 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); in mana_process_rx_cqe()
1794 mana_rx_skb(old_buf, old_fp, oob, rxq); in mana_process_rx_cqe()
1797 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1799 mana_post_pkt_rxq(rxq); in mana_process_rx_cqe()
1805 struct mana_rxq *rxq = cq->rxq; in mana_poll_rx_cq() local
1811 rxq->xdp_flush = false; in mana_poll_rx_cq()
1818 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) in mana_poll_rx_cq()
1821 mana_process_rx_cqe(rxq, cq, &comp[i]); in mana_poll_rx_cq()
1825 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_poll_rx_cq()
1827 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); in mana_poll_rx_cq()
1830 if (rxq->xdp_flush) in mana_poll_rx_cq()
2088 struct mana_rxq *rxq, bool napi_initialized) in mana_destroy_rxq() argument
2098 if (!rxq) in mana_destroy_rxq()
2101 debugfs_remove_recursive(rxq->mana_rx_debugfs); in mana_destroy_rxq()
2103 napi = &rxq->rx_cq.napi; in mana_destroy_rxq()
2112 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mana_destroy_rxq()
2114 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2116 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2118 if (rxq->xdp_save_va) in mana_destroy_rxq()
2119 put_page(virt_to_head_page(rxq->xdp_save_va)); in mana_destroy_rxq()
2121 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
2122 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
2133 page_pool_put_full_page(rxq->page_pool, page, false); in mana_destroy_rxq()
2140 page_pool_destroy(rxq->page_pool); in mana_destroy_rxq()
2142 if (rxq->gdma_rq) in mana_destroy_rxq()
2143 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
2145 kfree(rxq); in mana_destroy_rxq()
2149 struct mana_rxq *rxq, struct device *dev) in mana_fill_rx_oob() argument
2151 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_fill_rx_oob()
2157 va = mana_get_rxbuf_pre(rxq, &da); in mana_fill_rx_oob()
2159 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); in mana_fill_rx_oob()
2168 rx_oob->sgl[0].size = rxq->datasize; in mana_fill_rx_oob()
2178 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size) in mana_alloc_rx_wqe() argument
2186 WARN_ON(rxq->datasize == 0); in mana_alloc_rx_wqe()
2191 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
2192 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
2197 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, in mana_alloc_rx_wqe()
2217 static int mana_push_wqe(struct mana_rxq *rxq) in mana_push_wqe() argument
2223 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
2224 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
2226 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
2235 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) in mana_create_page_pool() argument
2237 struct mana_port_context *mpc = netdev_priv(rxq->ndev); in mana_create_page_pool()
2243 pprm.napi = &rxq->rx_cq.napi; in mana_create_page_pool()
2244 pprm.netdev = rxq->ndev; in mana_create_page_pool()
2246 rxq->page_pool = page_pool_create(&pprm); in mana_create_page_pool()
2248 if (IS_ERR(rxq->page_pool)) { in mana_create_page_pool()
2249 ret = PTR_ERR(rxq->page_pool); in mana_create_page_pool()
2250 rxq->page_pool = NULL; in mana_create_page_pool()
2268 struct mana_rxq *rxq; in mana_create_rxq() local
2273 rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size), in mana_create_rxq()
2275 if (!rxq) in mana_create_rxq()
2278 rxq->ndev = ndev; in mana_create_rxq()
2279 rxq->num_rx_buf = apc->rx_queue_size; in mana_create_rxq()
2280 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
2281 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
2283 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, in mana_create_rxq()
2284 &rxq->headroom); in mana_create_rxq()
2287 err = mana_create_page_pool(rxq, gc); in mana_create_rxq()
2293 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq()
2305 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
2310 cq = &rxq->rx_cq; in mana_create_rxq()
2312 cq->rxq = rxq; in mana_create_rxq()
2327 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; in mana_create_rxq()
2328 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
2336 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
2340 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
2343 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2346 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
2349 err = mana_push_wqe(rxq); in mana_create_rxq()
2362 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, in mana_create_rxq()
2364 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in mana_create_rxq()
2365 rxq->page_pool)); in mana_create_rxq()
2372 return rxq; in mana_create_rxq()
2376 mana_destroy_rxq(apc, rxq, false); in mana_create_rxq()
2386 struct mana_rxq *rxq; in mana_create_rxq_debugfs() local
2389 rxq = apc->rxqs[idx]; in mana_create_rxq_debugfs()
2392 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs); in mana_create_rxq_debugfs()
2393 debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head); in mana_create_rxq_debugfs()
2394 debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail); in mana_create_rxq_debugfs()
2395 debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf); in mana_create_rxq_debugfs()
2396 debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs, in mana_create_rxq_debugfs()
2397 &rxq->rx_cq.gdma_cq->head); in mana_create_rxq_debugfs()
2398 debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs, in mana_create_rxq_debugfs()
2399 &rxq->rx_cq.gdma_cq->tail); in mana_create_rxq_debugfs()
2400 debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget); in mana_create_rxq_debugfs()
2401 debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops); in mana_create_rxq_debugfs()
2402 debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq, in mana_create_rxq_debugfs()
2410 struct mana_rxq *rxq; in mana_add_rx_queues() local
2415 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2416 if (!rxq) { in mana_add_rx_queues()
2421 u64_stats_init(&rxq->stats.syncp); in mana_add_rx_queues()
2423 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2436 struct mana_rxq *rxq; in mana_destroy_vport() local
2440 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2441 if (!rxq) in mana_destroy_vport()
2444 mana_destroy_rxq(apc, rxq, true); in mana_destroy_vport()