xref: /linux/include/net/xdp_sock_drv.h (revision 57885276cc16a2e2b76282c808a4e84cbecb3aae)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 #define NETDEV_XDP_ACT_XSK	(NETDEV_XDP_ACT_BASIC |		\
16 				 NETDEV_XDP_ACT_REDIRECT |	\
17 				 NETDEV_XDP_ACT_XSK_ZEROCOPY)
18 
19 struct xsk_cb_desc {
20 	void *src;
21 	u8 off;
22 	u8 bytes;
23 };
24 
25 #ifdef CONFIG_XDP_SOCKETS
26 
27 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
28 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
29 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
30 void xsk_tx_release(struct xsk_buff_pool *pool);
31 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
32 					    u16 queue_id);
33 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
34 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
35 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
36 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
37 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
38 
39 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
40 {
41 	return XDP_PACKET_HEADROOM + pool->headroom;
42 }
43 
44 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
45 {
46 	return pool->chunk_size;
47 }
48 
49 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
50 {
51 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
52 }
53 
54 static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
55 {
56 	return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
57 }
58 
59 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
60 					 struct xdp_rxq_info *rxq)
61 {
62 	xp_set_rxq_info(pool, rxq);
63 }
64 
65 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
66 				    struct xsk_cb_desc *desc)
67 {
68 	xp_fill_cb(pool, desc);
69 }
70 
71 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
72 				      unsigned long attrs)
73 {
74 	xp_dma_unmap(pool, attrs);
75 }
76 
77 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
78 				   struct device *dev, unsigned long attrs)
79 {
80 	struct xdp_umem *umem = pool->umem;
81 
82 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
83 }
84 
85 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
86 {
87 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88 
89 	return xp_get_dma(xskb);
90 }
91 
92 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
93 {
94 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
95 
96 	return xp_get_frame_dma(xskb);
97 }
98 
99 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
100 {
101 	return xp_alloc(pool);
102 }
103 
104 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
105 {
106 	return !xp_mb_desc(desc);
107 }
108 
109 /* Returns as many entries as possible up to max. 0 <= N <= max. */
110 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
111 {
112 	return xp_alloc_batch(pool, xdp, max);
113 }
114 
115 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
116 {
117 	return xp_can_alloc(pool, count);
118 }
119 
120 static inline void xsk_buff_free(struct xdp_buff *xdp)
121 {
122 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 	struct list_head *xskb_list = &xskb->pool->xskb_list;
124 	struct xdp_buff_xsk *pos, *tmp;
125 
126 	if (likely(!xdp_buff_has_frags(xdp)))
127 		goto out;
128 
129 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
130 		list_del_init(&pos->list_node);
131 		xp_free(pos);
132 	}
133 
134 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
135 out:
136 	xp_free(xskb);
137 }
138 
139 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
140 				     struct xdp_buff *xdp)
141 {
142 	const void *data = xdp->data;
143 	struct xdp_buff_xsk *frag;
144 
145 	if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
146 				 offset_in_page(data), xdp->data_end - data,
147 				 xdp->frame_sz, false))
148 		return false;
149 
150 	frag = container_of(xdp, struct xdp_buff_xsk, xdp);
151 	list_add_tail(&frag->list_node, &frag->pool->xskb_list);
152 
153 	return true;
154 }
155 
156 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
157 {
158 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
159 	struct xdp_buff *ret = NULL;
160 	struct xdp_buff_xsk *frag;
161 
162 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
163 					struct xdp_buff_xsk, list_node);
164 	if (frag) {
165 		list_del_init(&frag->list_node);
166 		ret = &frag->xdp;
167 	}
168 
169 	return ret;
170 }
171 
172 static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
173 {
174 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
175 
176 	list_del_init(&xskb->list_node);
177 }
178 
179 static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
180 {
181 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
182 	struct xdp_buff_xsk *frag;
183 
184 	frag = list_first_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
185 				list_node);
186 	return &frag->xdp;
187 }
188 
189 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
190 {
191 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
192 	struct xdp_buff_xsk *frag;
193 
194 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
195 			       list_node);
196 	return &frag->xdp;
197 }
198 
199 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
200 {
201 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
202 	xdp->data_meta = xdp->data;
203 	xdp->data_end = xdp->data + size;
204 	xdp->flags = 0;
205 }
206 
207 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
208 					      u64 addr)
209 {
210 	return xp_raw_get_dma(pool, addr);
211 }
212 
213 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
214 {
215 	return xp_raw_get_data(pool, addr);
216 }
217 
218 /**
219  * xsk_buff_raw_get_ctx - get &xdp_desc context
220  * @pool: XSk buff pool desc address belongs to
221  * @addr: desc address (from userspace)
222  *
223  * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for
224  * details.
225  *
226  * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
227  * pointer, if it is present and valid (initialized to %NULL otherwise).
228  */
229 static inline struct xdp_desc_ctx
230 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
231 {
232 	return xp_raw_get_ctx(pool, addr);
233 }
234 
235 #define XDP_TXMD_FLAGS_VALID ( \
236 		XDP_TXMD_FLAGS_TIMESTAMP | \
237 		XDP_TXMD_FLAGS_CHECKSUM | \
238 		XDP_TXMD_FLAGS_LAUNCH_TIME | \
239 	0)
240 
241 static inline bool
242 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
243 {
244 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
245 }
246 
247 static inline struct xsk_tx_metadata *
248 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
249 {
250 	struct xsk_tx_metadata *meta;
251 
252 	if (!pool->tx_metadata_len)
253 		return NULL;
254 
255 	meta = data - pool->tx_metadata_len;
256 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
257 		return NULL; /* no way to signal the error to the user */
258 
259 	return meta;
260 }
261 
262 static inline struct xsk_tx_metadata *
263 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
264 {
265 	return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr));
266 }
267 
268 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
269 {
270 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
271 
272 	xp_dma_sync_for_cpu(xskb);
273 }
274 
275 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
276 						    dma_addr_t dma,
277 						    size_t size)
278 {
279 	xp_dma_sync_for_device(pool, dma, size);
280 }
281 
282 #else
283 
284 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
285 {
286 }
287 
288 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
289 				    struct xdp_desc *desc)
290 {
291 	return false;
292 }
293 
294 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
295 {
296 	return 0;
297 }
298 
299 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
300 {
301 }
302 
303 static inline struct xsk_buff_pool *
304 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
305 {
306 	return NULL;
307 }
308 
309 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
310 {
311 }
312 
313 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
314 {
315 }
316 
317 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
318 {
319 }
320 
321 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
322 {
323 }
324 
325 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
326 {
327 	return false;
328 }
329 
330 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
331 {
332 	return 0;
333 }
334 
335 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
336 {
337 	return 0;
338 }
339 
340 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
341 {
342 	return 0;
343 }
344 
345 static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
346 {
347 	return 0;
348 }
349 
350 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
351 					 struct xdp_rxq_info *rxq)
352 {
353 }
354 
355 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
356 				    struct xsk_cb_desc *desc)
357 {
358 }
359 
360 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
361 				      unsigned long attrs)
362 {
363 }
364 
365 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
366 				   struct device *dev, unsigned long attrs)
367 {
368 	return 0;
369 }
370 
371 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
372 {
373 	return 0;
374 }
375 
376 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
377 {
378 	return 0;
379 }
380 
381 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
382 {
383 	return NULL;
384 }
385 
386 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
387 {
388 	return false;
389 }
390 
391 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
392 {
393 	return 0;
394 }
395 
396 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
397 {
398 	return false;
399 }
400 
401 static inline void xsk_buff_free(struct xdp_buff *xdp)
402 {
403 }
404 
405 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
406 				     struct xdp_buff *xdp)
407 {
408 	return false;
409 }
410 
411 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
412 {
413 	return NULL;
414 }
415 
416 static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
417 {
418 }
419 
420 static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
421 {
422 	return NULL;
423 }
424 
425 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
426 {
427 	return NULL;
428 }
429 
430 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
431 {
432 }
433 
434 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
435 					      u64 addr)
436 {
437 	return 0;
438 }
439 
440 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
441 {
442 	return NULL;
443 }
444 
445 static inline struct xdp_desc_ctx
446 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
447 {
448 	return (struct xdp_desc_ctx){ };
449 }
450 
451 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
452 {
453 	return false;
454 }
455 
456 static inline struct xsk_tx_metadata *
457 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
458 {
459 	return NULL;
460 }
461 
462 static inline struct xsk_tx_metadata *
463 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
464 {
465 	return NULL;
466 }
467 
468 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
469 {
470 }
471 
472 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
473 						    dma_addr_t dma,
474 						    size_t size)
475 {
476 }
477 
478 #endif /* CONFIG_XDP_SOCKETS */
479 
480 #endif /* _LINUX_XDP_SOCK_DRV_H */
481