Lines Matching refs:rxq
410 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq, in mana_load_rx_mbuf() argument
419 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize); in mana_load_rx_mbuf()
427 mlen = rxq->datasize; in mana_load_rx_mbuf()
446 counter_u64_add(rxq->stats.dma_mapping_err, 1); in mana_load_rx_mbuf()
467 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq, in mana_unload_rx_mbuf() argument
1340 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument
1346 init_completion(&rxq->fence_event); in mana_fence_rq()
1350 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1356 rxq->rxq_idx, err); in mana_fence_rq()
1363 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
1370 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) { in mana_fence_rq()
1372 rxq->rxq_idx); in mana_fence_rq()
1383 struct mana_rxq *rxq; in mana_fence_rqs() local
1387 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1388 err = mana_fence_rq(apc, rxq); in mana_fence_rqs()
1589 mana_post_pkt_rxq(struct mana_rxq *rxq) in mana_post_pkt_rxq() argument
1595 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
1596 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
1597 rxq->buf_index = 0; in mana_post_pkt_rxq()
1599 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
1601 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1605 rxq->rxq_idx, err); in mana_post_pkt_rxq()
1611 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu); in mana_post_pkt_rxq()
1617 struct mana_rxq *rxq) in mana_rx_mbuf() argument
1619 struct mana_stats *rx_stats = &rxq->stats; in mana_rx_mbuf()
1620 if_t ndev = rxq->ndev; in mana_rx_mbuf()
1622 uint16_t rxq_idx = rxq->rxq_idx; in mana_rx_mbuf()
1628 rxq->rx_cq.work_done++; in mana_rx_mbuf()
1700 rxq->lro_tried++; in mana_rx_mbuf()
1701 if (rxq->lro.lro_cnt != 0 && in mana_rx_mbuf()
1702 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0) in mana_rx_mbuf()
1705 rxq->lro_failed++; in mana_rx_mbuf()
1720 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, in mana_process_rx_cqe() argument
1725 if_t ndev = rxq->ndev; in mana_process_rx_cqe()
1738 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1747 complete(&rxq->fence_event); in mana_process_rx_cqe()
1764 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1768 curr = rxq->buf_index; in mana_process_rx_cqe()
1769 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1781 mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false); in mana_process_rx_cqe()
1784 err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true); in mana_process_rx_cqe()
1789 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1); in mana_process_rx_cqe()
1796 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false); in mana_process_rx_cqe()
1799 mana_rx_mbuf(old_mbuf, oob, rxq); in mana_process_rx_cqe()
1802 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1804 mana_post_pkt_rxq(rxq); in mana_process_rx_cqe()
1826 if (comp[i].wq_num != cq->rxq->gdma_id) { in mana_poll_rx_cq()
1830 comp[i].wq_num, cq->rxq->gdma_id); in mana_poll_rx_cq()
1834 mana_process_rx_cqe(cq->rxq, cq, &comp[i]); in mana_poll_rx_cq()
1839 cq->rxq->gdma_rq->gdma_dev->gdma_context; in mana_poll_rx_cq()
1841 mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq); in mana_poll_rx_cq()
1844 tcp_lro_flush_all(&cq->rxq->lro); in mana_poll_rx_cq()
2192 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq, in mana_destroy_rxq() argument
2199 if (!rxq) in mana_destroy_rxq()
2209 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2211 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2213 mana_free_counters((counter_u64_t *)&rxq->stats, in mana_destroy_rxq()
2214 sizeof(rxq->stats)); in mana_destroy_rxq()
2217 tcp_lro_free(&rxq->lro); in mana_destroy_rxq()
2219 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
2220 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
2223 mana_unload_rx_mbuf(apc, rxq, rx_oob, true); in mana_destroy_rxq()
2228 if (rxq->gdma_rq) in mana_destroy_rxq()
2229 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
2231 free(rxq, M_DEVBUF); in mana_destroy_rxq()
2239 struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size) in mana_alloc_rx_wqe() argument
2245 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) { in mana_alloc_rx_wqe()
2247 "WARNING: Invalid rxq datasize %u\n", rxq->datasize); in mana_alloc_rx_wqe()
2253 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
2254 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
2266 err = mana_load_rx_mbuf(apc, rxq, rx_oob, true); in mana_alloc_rx_wqe()
2291 mana_push_wqe(struct mana_rxq *rxq) in mana_push_wqe() argument
2297 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
2298 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
2300 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
2320 struct mana_rxq *rxq; in mana_create_rxq() local
2325 rxq = malloc(sizeof(*rxq) + in mana_create_rxq()
2328 rxq->ndev = ndev; in mana_create_rxq()
2329 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; in mana_create_rxq()
2330 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
2335 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES); in mana_create_rxq()
2336 if (rxq->datasize > MAX_FRAME_SIZE) in mana_create_rxq()
2337 rxq->datasize = MAX_FRAME_SIZE; in mana_create_rxq()
2340 rxq_idx, rxq->datasize); in mana_create_rxq()
2342 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
2344 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq()
2350 err = tcp_lro_init(&rxq->lro); in mana_create_rxq()
2355 rxq->lro.ifp = ndev; in mana_create_rxq()
2359 mana_alloc_counters((counter_u64_t *)&rxq->stats, in mana_create_rxq()
2360 sizeof(rxq->stats)); in mana_create_rxq()
2370 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
2375 cq = &rxq->rx_cq; in mana_create_rxq()
2377 cq->rxq = rxq; in mana_create_rxq()
2392 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; in mana_create_rxq()
2393 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
2401 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
2405 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
2408 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2411 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
2414 err = mana_push_wqe(rxq); in mana_create_rxq()
2445 apc->port_idx, rxq->rxq_idx, cq->cpu); in mana_create_rxq()
2449 apc->port_idx, rxq->rxq_idx); in mana_create_rxq()
2455 return rxq; in mana_create_rxq()
2459 mana_destroy_rxq(apc, rxq, false); in mana_create_rxq()
2471 struct mana_rxq *rxq; in mana_add_rx_queues() local
2476 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2477 if (!rxq) { in mana_add_rx_queues()
2482 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2493 struct mana_rxq *rxq; in mana_destroy_vport() local
2497 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2498 if (!rxq) in mana_destroy_vport()
2501 mana_destroy_rxq(apc, rxq, true); in mana_destroy_vport()