1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #ifndef _ICE_XSK_H_
5 #define _ICE_XSK_H_
6 #include "ice_txrx.h"
7
8 #define PKTS_PER_BATCH 8
9
10 #ifdef __clang__
11 #define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
12 #elif __GNUC__ >= 8
13 #define loop_unrolled_for _Pragma("GCC unroll 8") for
14 #else
15 #define loop_unrolled_for for
16 #endif
17
18 struct ice_vsi;
19
20 #ifdef CONFIG_XDP_SOCKETS
21 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
22 u16 qid);
23 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
24 struct xsk_buff_pool *xsk_pool,
25 int budget);
26 int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
27 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
28 struct xsk_buff_pool *xsk_pool, u16 count);
29 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
30 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
31 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
32 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
33 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
34 #else
ice_xmit_zc(struct ice_tx_ring __always_unused * xdp_ring,struct xsk_buff_pool __always_unused * xsk_pool)35 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
36 struct xsk_buff_pool __always_unused *xsk_pool)
37 {
38 return false;
39 }
40
41 static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused * vsi,struct xsk_buff_pool __always_unused * pool,u16 __always_unused qid)42 ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
43 struct xsk_buff_pool __always_unused *pool,
44 u16 __always_unused qid)
45 {
46 return -EOPNOTSUPP;
47 }
48
49 static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused * rx_ring,struct xsk_buff_pool __always_unused * xsk_pool,int __always_unused budget)50 ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
51 struct xsk_buff_pool __always_unused *xsk_pool,
52 int __always_unused budget)
53 {
54 return 0;
55 }
56
57 static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused * rx_ring,struct xsk_buff_pool __always_unused * xsk_pool,u16 __always_unused count)58 ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
59 struct xsk_buff_pool __always_unused *xsk_pool,
60 u16 __always_unused count)
61 {
62 return false;
63 }
64
ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused * vsi)65 static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
66 {
67 return false;
68 }
69
70 static inline int
ice_xsk_wakeup(struct net_device __always_unused * netdev,u32 __always_unused queue_id,u32 __always_unused flags)71 ice_xsk_wakeup(struct net_device __always_unused *netdev,
72 u32 __always_unused queue_id, u32 __always_unused flags)
73 {
74 return -EOPNOTSUPP;
75 }
76
ice_xsk_clean_rx_ring(struct ice_rx_ring * rx_ring)77 static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
ice_xsk_clean_xdp_ring(struct ice_tx_ring * xdp_ring)78 static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
79
80 static inline int
ice_realloc_zc_buf(struct ice_vsi __always_unused * vsi,bool __always_unused zc)81 ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
82 bool __always_unused zc)
83 {
84 return 0;
85 }
86 #endif /* CONFIG_XDP_SOCKETS */
87 #endif /* !_ICE_XSK_H_ */
88