Lines Matching refs:rxq
27 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) in qede_alloc_rx_buffer() argument
38 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer()
39 rxq->filled_buffers--; in qede_alloc_rx_buffer()
50 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer()
51 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer()
52 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer()
57 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer()
63 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer()
67 rxq->rx_headroom); in qede_alloc_rx_buffer()
69 rxq->sw_rx_prod++; in qede_alloc_rx_buffer()
70 rxq->filled_buffers++; in qede_alloc_rx_buffer()
502 bool qede_has_rx_work(struct qede_rx_queue *rxq) in qede_has_rx_work() argument
509 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_has_rx_work()
510 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_has_rx_work()
515 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) in qede_rx_bd_ring_consume() argument
517 qed_chain_consume(&rxq->rx_bd_ring); in qede_rx_bd_ring_consume()
518 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume()
524 static inline void qede_reuse_page(struct qede_rx_queue *rxq, in qede_reuse_page() argument
527 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); in qede_reuse_page()
531 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page()
538 rxq->rx_headroom); in qede_reuse_page()
540 rxq->sw_rx_prod++; in qede_reuse_page()
547 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) in qede_recycle_rx_bd_ring() argument
552 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring()
553 qede_reuse_page(rxq, curr_cons); in qede_recycle_rx_bd_ring()
554 qede_rx_bd_ring_consume(rxq); in qede_recycle_rx_bd_ring()
558 static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, in qede_realloc_rx_buffer() argument
562 curr_cons->page_offset += rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
565 if (unlikely(qede_alloc_rx_buffer(rxq, true))) { in qede_realloc_rx_buffer()
569 curr_cons->page_offset -= rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
574 dma_unmap_page(rxq->dev, curr_cons->mapping, in qede_realloc_rx_buffer()
575 PAGE_SIZE, rxq->data_direction); in qede_realloc_rx_buffer()
582 qede_reuse_page(rxq, curr_cons); in qede_realloc_rx_buffer()
588 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) in qede_update_rx_prod() argument
590 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); in qede_update_rx_prod()
591 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); in qede_update_rx_prod()
604 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), in qede_update_rx_prod()
639 struct qede_rx_queue *rxq, in qede_skb_receive() argument
665 struct qede_rx_queue *rxq, in qede_fill_frag_skb() argument
668 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb()
670 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; in qede_fill_frag_skb()
679 current_bd->page_offset + rxq->rx_headroom, in qede_fill_frag_skb()
682 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { in qede_fill_frag_skb()
690 qede_rx_bd_ring_consume(rxq); in qede_fill_frag_skb()
693 skb->truesize += rxq->rx_buf_seg_size; in qede_fill_frag_skb()
700 qede_recycle_rx_bd_ring(rxq, 1); in qede_fill_frag_skb()
740 qede_build_skb(struct qede_rx_queue *rxq, in qede_build_skb() argument
747 skb = build_skb(buf, rxq->rx_buf_seg_size); in qede_build_skb()
760 struct qede_rx_queue *rxq, in qede_tpa_rx_build_skb() argument
766 skb = qede_build_skb(rxq, bd, len, pad); in qede_tpa_rx_build_skb()
767 bd->page_offset += rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
770 if (unlikely(qede_alloc_rx_buffer(rxq, true))) { in qede_tpa_rx_build_skb()
773 bd->page_offset -= rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
780 qede_reuse_page(rxq, bd); in qede_tpa_rx_build_skb()
784 qede_rx_bd_ring_consume(rxq); in qede_tpa_rx_build_skb()
791 struct qede_rx_queue *rxq, in qede_rx_build_skb() argument
809 qede_reuse_page(rxq, bd); in qede_rx_build_skb()
813 skb = qede_build_skb(rxq, bd, len, pad); in qede_rx_build_skb()
815 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { in qede_rx_build_skb()
826 qede_rx_bd_ring_consume(rxq); in qede_rx_build_skb()
832 struct qede_rx_queue *rxq, in qede_tpa_start() argument
835 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
839 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_tpa_start()
840 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
842 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, in qede_tpa_start()
856 qede_rx_bd_ring_consume(rxq); in qede_tpa_start()
878 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
953 skb_record_rx_queue(skb, fp->rxq->rxq_id); in qede_gro_receive()
954 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); in qede_gro_receive()
958 struct qede_rx_queue *rxq, in qede_tpa_cont() argument
964 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
976 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end() local
981 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
985 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, in qede_tpa_end()
986 PAGE_SIZE, rxq->data_direction); in qede_tpa_end()
989 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
1026 qede_reuse_page(rxq, &tpa_info->buffer); in qede_tpa_end()
1081 struct qede_rx_queue *rxq, in qede_rx_xdp() argument
1090 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); in qede_rx_xdp()
1104 rxq->xdp_no_pass++; in qede_rx_xdp()
1109 if (unlikely(qede_alloc_rx_buffer(rxq, true))) { in qede_rx_xdp()
1110 qede_recycle_rx_bd_ring(rxq, 1); in qede_rx_xdp()
1122 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1123 rxq->data_direction); in qede_rx_xdp()
1128 dma_sync_single_for_device(rxq->dev, in qede_rx_xdp()
1130 *len, rxq->data_direction); in qede_rx_xdp()
1135 qede_rx_bd_ring_consume(rxq); in qede_rx_xdp()
1139 if (unlikely(qede_alloc_rx_buffer(rxq, true))) { in qede_rx_xdp()
1140 qede_recycle_rx_bd_ring(rxq, 1); in qede_rx_xdp()
1146 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1147 rxq->data_direction); in qede_rx_xdp()
1154 qede_rx_bd_ring_consume(rxq); in qede_rx_xdp()
1163 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1170 struct qede_rx_queue *rxq, in qede_rx_build_jumbo() argument
1184 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo()
1195 if (unlikely(qede_alloc_rx_buffer(rxq, true))) in qede_rx_build_jumbo()
1201 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_build_jumbo()
1202 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_build_jumbo()
1203 qede_rx_bd_ring_consume(rxq); in qede_rx_build_jumbo()
1205 dma_unmap_page(rxq->dev, bd->mapping, in qede_rx_build_jumbo()
1209 rxq->rx_headroom, cur_size, PAGE_SIZE); in qede_rx_build_jumbo()
1225 struct qede_rx_queue *rxq, in qede_rx_process_tpa_cqe() argument
1231 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1234 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1245 struct qede_rx_queue *rxq) in qede_rx_process_cqe() argument
1247 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); in qede_rx_process_cqe()
1258 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1272 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); in qede_rx_process_cqe()
1277 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_process_cqe()
1278 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_process_cqe()
1282 pad = fp_cqe->placement_offset + rxq->rx_headroom; in qede_rx_process_cqe()
1286 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe, in qede_rx_process_cqe()
1297 rxq->rx_ip_frags++; in qede_rx_process_cqe()
1299 rxq->rx_hw_errors++; in qede_rx_process_cqe()
1305 skb = qede_rx_build_skb(edev, rxq, bd, len, pad); in qede_rx_process_cqe()
1307 rxq->rx_alloc_errors++; in qede_rx_process_cqe()
1308 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); in qede_rx_process_cqe()
1316 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, in qede_rx_process_cqe()
1320 qede_recycle_rx_bd_ring(rxq, unmapped_frags); in qede_rx_process_cqe()
1330 skb_record_rx_queue(skb, rxq->rxq_id); in qede_rx_process_cqe()
1334 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); in qede_rx_process_cqe()
1341 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int() local
1346 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_rx_int()
1347 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1358 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq); in qede_rx_int()
1359 qed_chain_recycle_consumed(&rxq->rx_comp_ring); in qede_rx_int()
1360 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1364 rxq->rcv_pkts += rcv_pkts; in qede_rx_int()
1367 while (rxq->num_rx_buffers - rxq->filled_buffers) in qede_rx_int()
1368 if (qede_alloc_rx_buffer(rxq, false)) in qede_rx_int()
1372 qede_update_rx_prod(edev, rxq); in qede_rx_int()
1394 if (qede_has_rx_work(fp->rxq)) in qede_poll_is_more_work()
1439 qede_has_rx_work(fp->rxq)) ? in qede_poll()