xref: /linux/include/net/xdp_sock_drv.h (revision 24b10e5f8e0d2bee1a10fc67011ea5d936c1a389)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 struct xsk_cb_desc {
16 	void *src;
17 	u8 off;
18 	u8 bytes;
19 };
20 
21 #ifdef CONFIG_XDP_SOCKETS
22 
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 					    u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34 
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 	return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39 
40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 	return pool->chunk_size;
43 }
44 
45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49 
50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 					 struct xdp_rxq_info *rxq)
52 {
53 	xp_set_rxq_info(pool, rxq);
54 }
55 
56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 				    struct xsk_cb_desc *desc)
58 {
59 	xp_fill_cb(pool, desc);
60 }
61 
62 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
63 {
64 #ifdef CONFIG_NET_RX_BUSY_POLL
65 	return pool->heads[0].xdp.rxq->napi_id;
66 #else
67 	return 0;
68 #endif
69 }
70 
71 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
72 				      unsigned long attrs)
73 {
74 	xp_dma_unmap(pool, attrs);
75 }
76 
77 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
78 				   struct device *dev, unsigned long attrs)
79 {
80 	struct xdp_umem *umem = pool->umem;
81 
82 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
83 }
84 
85 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
86 {
87 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88 
89 	return xp_get_dma(xskb);
90 }
91 
92 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
93 {
94 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
95 
96 	return xp_get_frame_dma(xskb);
97 }
98 
99 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
100 {
101 	return xp_alloc(pool);
102 }
103 
104 static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
105 {
106 	return !xp_mb_desc(desc);
107 }
108 
109 /* Returns as many entries as possible up to max. 0 <= N <= max. */
110 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
111 {
112 	return xp_alloc_batch(pool, xdp, max);
113 }
114 
115 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
116 {
117 	return xp_can_alloc(pool, count);
118 }
119 
120 static inline void xsk_buff_free(struct xdp_buff *xdp)
121 {
122 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 	struct list_head *xskb_list = &xskb->pool->xskb_list;
124 	struct xdp_buff_xsk *pos, *tmp;
125 
126 	if (likely(!xdp_buff_has_frags(xdp)))
127 		goto out;
128 
129 	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
130 		list_del(&pos->xskb_list_node);
131 		xp_free(pos);
132 	}
133 
134 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
135 out:
136 	xp_free(xskb);
137 }
138 
139 static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
140 {
141 	struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 
143 	list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
144 }
145 
146 static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
147 {
148 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
149 	struct xdp_buff *ret = NULL;
150 	struct xdp_buff_xsk *frag;
151 
152 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
153 					struct xdp_buff_xsk, xskb_list_node);
154 	if (frag) {
155 		list_del(&frag->xskb_list_node);
156 		ret = &frag->xdp;
157 	}
158 
159 	return ret;
160 }
161 
162 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
163 {
164 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
165 	xdp->data_meta = xdp->data;
166 	xdp->data_end = xdp->data + size;
167 }
168 
169 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
170 					      u64 addr)
171 {
172 	return xp_raw_get_dma(pool, addr);
173 }
174 
175 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
176 {
177 	return xp_raw_get_data(pool, addr);
178 }
179 
180 #define XDP_TXMD_FLAGS_VALID ( \
181 		XDP_TXMD_FLAGS_TIMESTAMP | \
182 		XDP_TXMD_FLAGS_CHECKSUM | \
183 	0)
184 
185 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
186 {
187 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
188 }
189 
190 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
191 {
192 	struct xsk_tx_metadata *meta;
193 
194 	if (!pool->tx_metadata_len)
195 		return NULL;
196 
197 	meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
198 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
199 		return NULL; /* no way to signal the error to the user */
200 
201 	return meta;
202 }
203 
204 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
205 {
206 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
207 
208 	if (!pool->dma_need_sync)
209 		return;
210 
211 	xp_dma_sync_for_cpu(xskb);
212 }
213 
214 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
215 						    dma_addr_t dma,
216 						    size_t size)
217 {
218 	xp_dma_sync_for_device(pool, dma, size);
219 }
220 
221 #else
222 
223 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
224 {
225 }
226 
227 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
228 				    struct xdp_desc *desc)
229 {
230 	return false;
231 }
232 
233 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
234 {
235 	return 0;
236 }
237 
238 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
239 {
240 }
241 
242 static inline struct xsk_buff_pool *
243 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
244 {
245 	return NULL;
246 }
247 
248 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
249 {
250 }
251 
252 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
253 {
254 }
255 
256 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
257 {
258 }
259 
260 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
261 {
262 }
263 
264 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
265 {
266 	return false;
267 }
268 
269 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
270 {
271 	return 0;
272 }
273 
274 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
275 {
276 	return 0;
277 }
278 
279 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
280 {
281 	return 0;
282 }
283 
284 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
285 					 struct xdp_rxq_info *rxq)
286 {
287 }
288 
289 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
290 				    struct xsk_cb_desc *desc)
291 {
292 }
293 
294 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
295 {
296 	return 0;
297 }
298 
299 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
300 				      unsigned long attrs)
301 {
302 }
303 
304 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
305 				   struct device *dev, unsigned long attrs)
306 {
307 	return 0;
308 }
309 
310 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
311 {
312 	return 0;
313 }
314 
315 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
316 {
317 	return 0;
318 }
319 
320 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
321 {
322 	return NULL;
323 }
324 
325 static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
326 {
327 	return false;
328 }
329 
330 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
331 {
332 	return 0;
333 }
334 
335 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
336 {
337 	return false;
338 }
339 
340 static inline void xsk_buff_free(struct xdp_buff *xdp)
341 {
342 }
343 
344 static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
345 {
346 }
347 
348 static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
349 {
350 	return NULL;
351 }
352 
353 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
354 {
355 }
356 
357 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
358 					      u64 addr)
359 {
360 	return 0;
361 }
362 
363 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
364 {
365 	return NULL;
366 }
367 
368 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
369 {
370 	return false;
371 }
372 
373 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
374 {
375 	return NULL;
376 }
377 
378 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
379 {
380 }
381 
382 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
383 						    dma_addr_t dma,
384 						    size_t size)
385 {
386 }
387 
388 #endif /* CONFIG_XDP_SOCKETS */
389 
390 #endif /* _LINUX_XDP_SOCK_DRV_H */
391