xref: /linux/include/net/xdp_sock_drv.h (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 #define NETDEV_XDP_ACT_XSK	(NETDEV_XDP_ACT_BASIC |		\
16 				 NETDEV_XDP_ACT_REDIRECT |	\
17 				 NETDEV_XDP_ACT_XSK_ZEROCOPY)
18 
19 struct xsk_cb_desc {
20 	void *src;
21 	u8 off;
22 	u8 bytes;
23 };
24 
25 #ifdef CONFIG_XDP_SOCKETS
26 
27 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
28 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
29 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
30 void xsk_tx_release(struct xsk_buff_pool *pool);
31 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
32 					    u16 queue_id);
33 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
34 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
35 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
36 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
37 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
38 
39 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
40 {
41 	return XDP_PACKET_HEADROOM + pool->headroom;
42 }
43 
44 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
45 {
46 	return pool->chunk_size;
47 }
48 
49 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
50 {
51 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
52 }
53 
54 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
55 					 struct xdp_rxq_info *rxq)
56 {
57 	xp_set_rxq_info(pool, rxq);
58 }
59 
60 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
61 				    struct xsk_cb_desc *desc)
62 {
63 	xp_fill_cb(pool, desc);
64 }
65 
66 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
67 				      unsigned long attrs)
68 {
69 	xp_dma_unmap(pool, attrs);
70 }
71 
72 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
73 				   struct device *dev, unsigned long attrs)
74 {
75 	struct xdp_umem *umem = pool->umem;
76 
77 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
78 }
79 
80 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
81 {
82 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
83 
84 	return xp_get_dma(xskb);
85 }
86 
87 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
88 {
89 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
90 
91 	return xp_get_frame_dma(xskb);
92 }
93 
94 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
95 {
96 	return xp_alloc(pool);
97 }
98 
99 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
100 {
101 	return !xp_mb_desc(desc);
102 }
103 
104 /* Returns as many entries as possible up to max. 0 <= N <= max. */
105 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
106 {
107 	return xp_alloc_batch(pool, xdp, max);
108 }
109 
110 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
111 {
112 	return xp_can_alloc(pool, count);
113 }
114 
115 static inline void xsk_buff_free(struct xdp_buff *xdp)
116 {
117 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
118 	struct list_head *xskb_list = &xskb->pool->xskb_list;
119 	struct xdp_buff_xsk *pos, *tmp;
120 
121 	if (likely(!xdp_buff_has_frags(xdp)))
122 		goto out;
123 
124 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
125 		list_del(&pos->list_node);
126 		xp_free(pos);
127 	}
128 
129 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
130 out:
131 	xp_free(xskb);
132 }
133 
134 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
135 				     struct xdp_buff *xdp)
136 {
137 	const void *data = xdp->data;
138 	struct xdp_buff_xsk *frag;
139 
140 	if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
141 				 offset_in_page(data), xdp->data_end - data,
142 				 xdp->frame_sz, false))
143 		return false;
144 
145 	frag = container_of(xdp, struct xdp_buff_xsk, xdp);
146 	list_add_tail(&frag->list_node, &frag->pool->xskb_list);
147 
148 	return true;
149 }
150 
151 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
152 {
153 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
154 	struct xdp_buff *ret = NULL;
155 	struct xdp_buff_xsk *frag;
156 
157 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
158 					struct xdp_buff_xsk, list_node);
159 	if (frag) {
160 		list_del(&frag->list_node);
161 		ret = &frag->xdp;
162 	}
163 
164 	return ret;
165 }
166 
167 static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
168 {
169 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
170 
171 	list_del(&xskb->list_node);
172 }
173 
174 static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
175 {
176 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
177 	struct xdp_buff_xsk *frag;
178 
179 	frag = list_first_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
180 				list_node);
181 	return &frag->xdp;
182 }
183 
184 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
185 {
186 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
187 	struct xdp_buff_xsk *frag;
188 
189 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
190 			       list_node);
191 	return &frag->xdp;
192 }
193 
194 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
195 {
196 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
197 	xdp->data_meta = xdp->data;
198 	xdp->data_end = xdp->data + size;
199 	xdp->flags = 0;
200 }
201 
202 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
203 					      u64 addr)
204 {
205 	return xp_raw_get_dma(pool, addr);
206 }
207 
208 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
209 {
210 	return xp_raw_get_data(pool, addr);
211 }
212 
213 /**
214  * xsk_buff_raw_get_ctx - get &xdp_desc context
215  * @pool: XSk buff pool desc address belongs to
216  * @addr: desc address (from userspace)
217  *
218  * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for
219  * details.
220  *
221  * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
222  * pointer, if it is present and valid (initialized to %NULL otherwise).
223  */
224 static inline struct xdp_desc_ctx
225 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
226 {
227 	return xp_raw_get_ctx(pool, addr);
228 }
229 
230 #define XDP_TXMD_FLAGS_VALID ( \
231 		XDP_TXMD_FLAGS_TIMESTAMP | \
232 		XDP_TXMD_FLAGS_CHECKSUM | \
233 		XDP_TXMD_FLAGS_LAUNCH_TIME | \
234 	0)
235 
236 static inline bool
237 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
238 {
239 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
240 }
241 
242 static inline struct xsk_tx_metadata *
243 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
244 {
245 	struct xsk_tx_metadata *meta;
246 
247 	if (!pool->tx_metadata_len)
248 		return NULL;
249 
250 	meta = data - pool->tx_metadata_len;
251 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
252 		return NULL; /* no way to signal the error to the user */
253 
254 	return meta;
255 }
256 
257 static inline struct xsk_tx_metadata *
258 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
259 {
260 	return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr));
261 }
262 
263 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
264 {
265 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
266 
267 	xp_dma_sync_for_cpu(xskb);
268 }
269 
270 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
271 						    dma_addr_t dma,
272 						    size_t size)
273 {
274 	xp_dma_sync_for_device(pool, dma, size);
275 }
276 
277 #else
278 
279 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
280 {
281 }
282 
283 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
284 				    struct xdp_desc *desc)
285 {
286 	return false;
287 }
288 
289 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
290 {
291 	return 0;
292 }
293 
294 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
295 {
296 }
297 
298 static inline struct xsk_buff_pool *
299 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
300 {
301 	return NULL;
302 }
303 
304 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
305 {
306 }
307 
308 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
309 {
310 }
311 
312 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
313 {
314 }
315 
316 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
317 {
318 }
319 
320 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
321 {
322 	return false;
323 }
324 
325 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
326 {
327 	return 0;
328 }
329 
330 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
331 {
332 	return 0;
333 }
334 
335 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
336 {
337 	return 0;
338 }
339 
340 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
341 					 struct xdp_rxq_info *rxq)
342 {
343 }
344 
345 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
346 				    struct xsk_cb_desc *desc)
347 {
348 }
349 
350 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
351 				      unsigned long attrs)
352 {
353 }
354 
355 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
356 				   struct device *dev, unsigned long attrs)
357 {
358 	return 0;
359 }
360 
361 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
362 {
363 	return 0;
364 }
365 
366 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
367 {
368 	return 0;
369 }
370 
371 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
372 {
373 	return NULL;
374 }
375 
376 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
377 {
378 	return false;
379 }
380 
381 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
382 {
383 	return 0;
384 }
385 
386 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
387 {
388 	return false;
389 }
390 
391 static inline void xsk_buff_free(struct xdp_buff *xdp)
392 {
393 }
394 
395 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
396 				     struct xdp_buff *xdp)
397 {
398 	return false;
399 }
400 
401 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
402 {
403 	return NULL;
404 }
405 
406 static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
407 {
408 }
409 
410 static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
411 {
412 	return NULL;
413 }
414 
415 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
416 {
417 	return NULL;
418 }
419 
420 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
421 {
422 }
423 
424 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
425 					      u64 addr)
426 {
427 	return 0;
428 }
429 
430 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
431 {
432 	return NULL;
433 }
434 
435 static inline struct xdp_desc_ctx
436 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
437 {
438 	return (struct xdp_desc_ctx){ };
439 }
440 
441 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
442 {
443 	return false;
444 }
445 
446 static inline struct xsk_tx_metadata *
447 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
448 {
449 	return NULL;
450 }
451 
452 static inline struct xsk_tx_metadata *
453 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
454 {
455 	return NULL;
456 }
457 
458 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
459 {
460 }
461 
462 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
463 						    dma_addr_t dma,
464 						    size_t size)
465 {
466 }
467 
468 #endif /* CONFIG_XDP_SOCKETS */
469 
470 #endif /* _LINUX_XDP_SOCK_DRV_H */
471