1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #ifndef _ICE_XSK_H_
5 #define _ICE_XSK_H_
6 #include "ice_txrx.h"
7
8 #define PKTS_PER_BATCH 8
9
10 struct ice_vsi;
11
12 #ifdef CONFIG_XDP_SOCKETS
13 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
14 u16 qid);
15 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
16 struct xsk_buff_pool *xsk_pool,
17 int budget);
18 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
19 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
20 struct xsk_buff_pool *xsk_pool, u16 count);
21 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
22 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
23 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
24 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
25 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
26 #else
ice_xmit_zc(struct ice_tx_ring __always_unused * xdp_ring,struct xsk_buff_pool __always_unused * xsk_pool)27 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
28 struct xsk_buff_pool __always_unused *xsk_pool)
29 {
30 return false;
31 }
32
33 static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused * vsi,struct xsk_buff_pool __always_unused * pool,u16 __always_unused qid)34 ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
35 struct xsk_buff_pool __always_unused *pool,
36 u16 __always_unused qid)
37 {
38 return -EOPNOTSUPP;
39 }
40
41 static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused * rx_ring,struct xsk_buff_pool __always_unused * xsk_pool,int __always_unused budget)42 ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
43 struct xsk_buff_pool __always_unused *xsk_pool,
44 int __always_unused budget)
45 {
46 return 0;
47 }
48
49 static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused * rx_ring,struct xsk_buff_pool __always_unused * xsk_pool,u16 __always_unused count)50 ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
51 struct xsk_buff_pool __always_unused *xsk_pool,
52 u16 __always_unused count)
53 {
54 return false;
55 }
56
ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused * vsi)57 static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
58 {
59 return false;
60 }
61
62 static inline int
ice_xsk_wakeup(struct net_device __always_unused * netdev,u32 __always_unused queue_id,u32 __always_unused flags)63 ice_xsk_wakeup(struct net_device __always_unused *netdev,
64 u32 __always_unused queue_id, u32 __always_unused flags)
65 {
66 return -EOPNOTSUPP;
67 }
68
ice_xsk_clean_rx_ring(struct ice_rx_ring * rx_ring)69 static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
ice_xsk_clean_xdp_ring(struct ice_tx_ring * xdp_ring)70 static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
71
72 static inline int
ice_realloc_zc_buf(struct ice_vsi __always_unused * vsi,bool __always_unused zc)73 ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
74 bool __always_unused zc)
75 {
76 return 0;
77 }
78 #endif /* CONFIG_XDP_SOCKETS */
79 #endif /* !_ICE_XSK_H_ */
80