1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ 3 4 #ifndef _HINIC3_RX_H_ 5 #define _HINIC3_RX_H_ 6 7 #include <linux/bitfield.h> 8 #include <linux/netdevice.h> 9 10 #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0) 11 #define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5) 12 #define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8) 13 #define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21) 14 #define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ 15 FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val) 16 17 #define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0) 18 #define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16) 19 #define RQ_CQE_SGE_GET(val, member) \ 20 FIELD_GET(RQ_CQE_SGE_##member##_MASK, val) 21 22 #define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0) 23 #define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16) 24 #define RQ_CQE_STATUS_RXDONE_MASK BIT(31) 25 #define RQ_CQE_STATUS_GET(val, member) \ 26 FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val) 27 28 /* RX Completion information that is provided by HW for a specific RX WQE */ 29 struct hinic3_rq_cqe { 30 __le32 status; 31 __le32 vlan_len; 32 __le32 offload_type; 33 __le32 rsvd3; 34 __le32 rsvd4; 35 __le32 rsvd5; 36 __le32 rsvd6; 37 __le32 pkt_info; 38 }; 39 40 struct hinic3_rq_wqe { 41 __le32 buf_hi_addr; 42 __le32 buf_lo_addr; 43 __le32 cqe_hi_addr; 44 __le32 cqe_lo_addr; 45 }; 46 47 struct hinic3_rx_info { 48 struct page *page; 49 u32 page_offset; 50 }; 51 52 struct hinic3_rxq { 53 struct net_device *netdev; 54 55 u16 q_id; 56 u32 q_depth; 57 u32 q_mask; 58 59 u16 buf_len; 60 u32 buf_len_shift; 61 62 u32 cons_idx; 63 u32 delta; 64 65 u32 irq_id; 66 u16 msix_entry_idx; 67 68 /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is 69 * statically associated (by index) to a specific rq_wqe. 70 */ 71 struct hinic3_rq_cqe *cqe_arr; 72 struct hinic3_rx_info *rx_info; 73 struct page_pool *page_pool; 74 75 struct hinic3_io_queue *rq; 76 77 struct hinic3_irq_cfg *irq_cfg; 78 u16 next_to_alloc; 79 u16 next_to_update; 80 struct device *dev; /* device for DMA mapping */ 81 82 dma_addr_t cqe_start_paddr; 83 } ____cacheline_aligned; 84 85 struct hinic3_dyna_rxq_res { 86 u16 next_to_alloc; 87 struct hinic3_rx_info *rx_info; 88 dma_addr_t cqe_start_paddr; 89 void *cqe_start_vaddr; 90 struct page_pool *page_pool; 91 }; 92 93 int hinic3_alloc_rxqs(struct net_device *netdev); 94 void hinic3_free_rxqs(struct net_device *netdev); 95 96 int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq, 97 u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 98 void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, 99 u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 100 int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, 101 u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); 102 int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); 103 104 #endif 105