1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #ifndef _ICE_XSK_H_ 5 #define _ICE_XSK_H_ 6 #include "ice_txrx.h" 7 8 #define PKTS_PER_BATCH 8 9 10 struct ice_vsi; 11 12 #ifdef CONFIG_XDP_SOCKETS 13 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, 14 u16 qid); 15 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, 16 struct xsk_buff_pool *xsk_pool, 17 int budget); 18 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); 19 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, 20 struct xsk_buff_pool *xsk_pool, u16 count); 21 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); 22 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); 23 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); 24 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); 25 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); 26 void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, 27 u16 qid); 28 void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, 29 bool enable); 30 void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector); 31 void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, 32 struct ice_q_vector *q_vector); 33 #else 34 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring, 35 struct xsk_buff_pool __always_unused *xsk_pool) 36 { 37 return false; 38 } 39 40 static inline int 41 ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, 42 struct xsk_buff_pool __always_unused *pool, 43 u16 __always_unused qid) 44 { 45 return -EOPNOTSUPP; 46 } 47 48 static inline int 49 ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, 50 struct xsk_buff_pool __always_unused *xsk_pool, 51 int __always_unused budget) 52 { 53 return 0; 54 } 55 56 static inline bool 57 ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, 58 struct xsk_buff_pool __always_unused *xsk_pool, 59 u16 __always_unused count) 60 { 61 return false; 62 } 63 64 static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi) 65 { 66 return false; 67 } 68 69 static inline int 70 ice_xsk_wakeup(struct net_device __always_unused *netdev, 71 u32 __always_unused queue_id, u32 __always_unused flags) 72 { 73 return -EOPNOTSUPP; 74 } 75 76 static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } 77 static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } 78 79 static inline int 80 ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, 81 bool __always_unused zc) 82 { 83 return 0; 84 } 85 86 static inline void 87 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, 88 u16 qid) { } 89 90 static inline void 91 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, 92 bool enable) { } 93 94 static inline void 95 ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { } 96 97 static inline void 98 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, 99 struct ice_q_vector *q_vector) { } 100 #endif /* CONFIG_XDP_SOCKETS */ 101 #endif /* !_ICE_XSK_H_ */ 102