xref: /linux/include/net/xdp_sock_drv.h (revision 132db93572821ec2fdf81e354cc40f558faf7e4f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #ifdef CONFIG_XDP_SOCKETS
13 
14 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
15 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
16 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
17 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
18 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
19 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
20 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
21 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
22 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
23 
24 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
25 {
26 	return XDP_PACKET_HEADROOM + umem->headroom;
27 }
28 
29 static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
30 {
31 	return umem->chunk_size;
32 }
33 
34 static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
35 {
36 	return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
37 }
38 
39 static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
40 					 struct xdp_rxq_info *rxq)
41 {
42 	xp_set_rxq_info(umem->pool, rxq);
43 }
44 
45 static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
46 				      unsigned long attrs)
47 {
48 	xp_dma_unmap(umem->pool, attrs);
49 }
50 
51 static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
52 				   unsigned long attrs)
53 {
54 	return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
55 }
56 
57 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
58 {
59 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
60 
61 	return xp_get_dma(xskb);
62 }
63 
64 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
65 {
66 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
67 
68 	return xp_get_frame_dma(xskb);
69 }
70 
71 static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
72 {
73 	return xp_alloc(umem->pool);
74 }
75 
76 static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
77 {
78 	return xp_can_alloc(umem->pool, count);
79 }
80 
81 static inline void xsk_buff_free(struct xdp_buff *xdp)
82 {
83 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
84 
85 	xp_free(xskb);
86 }
87 
88 static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
89 {
90 	return xp_raw_get_dma(umem->pool, addr);
91 }
92 
93 static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
94 {
95 	return xp_raw_get_data(umem->pool, addr);
96 }
97 
98 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
99 {
100 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
101 
102 	xp_dma_sync_for_cpu(xskb);
103 }
104 
105 static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
106 						    dma_addr_t dma,
107 						    size_t size)
108 {
109 	xp_dma_sync_for_device(umem->pool, dma, size);
110 }
111 
112 #else
113 
114 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
115 {
116 }
117 
118 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
119 				       struct xdp_desc *desc)
120 {
121 	return false;
122 }
123 
124 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
125 {
126 }
127 
128 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
129 						     u16 queue_id)
130 {
131 	return NULL;
132 }
133 
134 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
135 {
136 }
137 
138 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
139 {
140 }
141 
142 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
143 {
144 }
145 
146 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
147 {
148 }
149 
150 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
151 {
152 	return false;
153 }
154 
155 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
156 {
157 	return 0;
158 }
159 
160 static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
161 {
162 	return 0;
163 }
164 
165 static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
166 {
167 	return 0;
168 }
169 
170 static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
171 					 struct xdp_rxq_info *rxq)
172 {
173 }
174 
175 static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
176 				      unsigned long attrs)
177 {
178 }
179 
180 static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
181 				   unsigned long attrs)
182 {
183 	return 0;
184 }
185 
186 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
187 {
188 	return 0;
189 }
190 
191 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
192 {
193 	return 0;
194 }
195 
196 static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
197 {
198 	return NULL;
199 }
200 
201 static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
202 {
203 	return false;
204 }
205 
206 static inline void xsk_buff_free(struct xdp_buff *xdp)
207 {
208 }
209 
210 static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
211 {
212 	return 0;
213 }
214 
215 static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
216 {
217 	return NULL;
218 }
219 
220 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
221 {
222 }
223 
224 static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
225 						    dma_addr_t dma,
226 						    size_t size)
227 {
228 }
229 
230 #endif /* CONFIG_XDP_SOCKETS */
231 
232 #endif /* _LINUX_XDP_SOCK_DRV_H */
233