xref: /linux/include/net/xdp_sock_drv.h (revision 2f435137a0484f11b47554281091ef4908f8cb31)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 struct xsk_cb_desc {
16 	void *src;
17 	u8 off;
18 	u8 bytes;
19 };
20 
21 #ifdef CONFIG_XDP_SOCKETS
22 
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 					    u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34 
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 	return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39 
40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 	return pool->chunk_size;
43 }
44 
45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49 
50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 					 struct xdp_rxq_info *rxq)
52 {
53 	xp_set_rxq_info(pool, rxq);
54 }
55 
56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 				    struct xsk_cb_desc *desc)
58 {
59 	xp_fill_cb(pool, desc);
60 }
61 
62 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
63 				      unsigned long attrs)
64 {
65 	xp_dma_unmap(pool, attrs);
66 }
67 
68 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
69 				   struct device *dev, unsigned long attrs)
70 {
71 	struct xdp_umem *umem = pool->umem;
72 
73 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
74 }
75 
76 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
77 {
78 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
79 
80 	return xp_get_dma(xskb);
81 }
82 
83 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
84 {
85 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
86 
87 	return xp_get_frame_dma(xskb);
88 }
89 
90 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
91 {
92 	return xp_alloc(pool);
93 }
94 
95 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
96 {
97 	return !xp_mb_desc(desc);
98 }
99 
100 /* Returns as many entries as possible up to max. 0 <= N <= max. */
101 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
102 {
103 	return xp_alloc_batch(pool, xdp, max);
104 }
105 
106 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
107 {
108 	return xp_can_alloc(pool, count);
109 }
110 
111 static inline void xsk_buff_free(struct xdp_buff *xdp)
112 {
113 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
114 	struct list_head *xskb_list = &xskb->pool->xskb_list;
115 	struct xdp_buff_xsk *pos, *tmp;
116 
117 	if (likely(!xdp_buff_has_frags(xdp)))
118 		goto out;
119 
120 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
121 		list_del(&pos->list_node);
122 		xp_free(pos);
123 	}
124 
125 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
126 out:
127 	xp_free(xskb);
128 }
129 
130 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
131 				     struct xdp_buff *xdp)
132 {
133 	const void *data = xdp->data;
134 	struct xdp_buff_xsk *frag;
135 
136 	if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
137 				 offset_in_page(data), xdp->data_end - data,
138 				 xdp->frame_sz, false))
139 		return false;
140 
141 	frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 	list_add_tail(&frag->list_node, &frag->pool->xskb_list);
143 
144 	return true;
145 }
146 
147 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
148 {
149 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
150 	struct xdp_buff *ret = NULL;
151 	struct xdp_buff_xsk *frag;
152 
153 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
154 					struct xdp_buff_xsk, list_node);
155 	if (frag) {
156 		list_del(&frag->list_node);
157 		ret = &frag->xdp;
158 	}
159 
160 	return ret;
161 }
162 
163 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
164 {
165 	struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
166 
167 	list_del(&xskb->list_node);
168 }
169 
170 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
171 {
172 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
173 	struct xdp_buff_xsk *frag;
174 
175 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
176 			       list_node);
177 	return &frag->xdp;
178 }
179 
180 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
181 {
182 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
183 	xdp->data_meta = xdp->data;
184 	xdp->data_end = xdp->data + size;
185 	xdp->flags = 0;
186 }
187 
188 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
189 					      u64 addr)
190 {
191 	return xp_raw_get_dma(pool, addr);
192 }
193 
194 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
195 {
196 	return xp_raw_get_data(pool, addr);
197 }
198 
199 /**
200  * xsk_buff_raw_get_ctx - get &xdp_desc context
201  * @pool: XSk buff pool desc address belongs to
202  * @addr: desc address (from userspace)
203  *
204  * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for
205  * details.
206  *
207  * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
208  * pointer, if it is present and valid (initialized to %NULL otherwise).
209  */
210 static inline struct xdp_desc_ctx
211 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
212 {
213 	return xp_raw_get_ctx(pool, addr);
214 }
215 
216 #define XDP_TXMD_FLAGS_VALID ( \
217 		XDP_TXMD_FLAGS_TIMESTAMP | \
218 		XDP_TXMD_FLAGS_CHECKSUM | \
219 	0)
220 
221 static inline bool
222 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
223 {
224 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
225 }
226 
227 static inline struct xsk_tx_metadata *
228 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
229 {
230 	struct xsk_tx_metadata *meta;
231 
232 	if (!pool->tx_metadata_len)
233 		return NULL;
234 
235 	meta = data - pool->tx_metadata_len;
236 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
237 		return NULL; /* no way to signal the error to the user */
238 
239 	return meta;
240 }
241 
242 static inline struct xsk_tx_metadata *
243 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
244 {
245 	return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr));
246 }
247 
248 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
249 {
250 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
251 
252 	xp_dma_sync_for_cpu(xskb);
253 }
254 
255 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
256 						    dma_addr_t dma,
257 						    size_t size)
258 {
259 	xp_dma_sync_for_device(pool, dma, size);
260 }
261 
262 #else
263 
264 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
265 {
266 }
267 
268 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
269 				    struct xdp_desc *desc)
270 {
271 	return false;
272 }
273 
274 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
275 {
276 	return 0;
277 }
278 
279 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
280 {
281 }
282 
283 static inline struct xsk_buff_pool *
284 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
285 {
286 	return NULL;
287 }
288 
289 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
290 {
291 }
292 
293 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
294 {
295 }
296 
297 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
298 {
299 }
300 
301 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
302 {
303 }
304 
305 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
306 {
307 	return false;
308 }
309 
310 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
311 {
312 	return 0;
313 }
314 
315 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
316 {
317 	return 0;
318 }
319 
320 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
321 {
322 	return 0;
323 }
324 
325 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
326 					 struct xdp_rxq_info *rxq)
327 {
328 }
329 
330 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
331 				    struct xsk_cb_desc *desc)
332 {
333 }
334 
335 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
336 				      unsigned long attrs)
337 {
338 }
339 
340 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
341 				   struct device *dev, unsigned long attrs)
342 {
343 	return 0;
344 }
345 
346 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
347 {
348 	return 0;
349 }
350 
351 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
352 {
353 	return 0;
354 }
355 
356 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
357 {
358 	return NULL;
359 }
360 
361 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
362 {
363 	return false;
364 }
365 
366 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
367 {
368 	return 0;
369 }
370 
371 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
372 {
373 	return false;
374 }
375 
376 static inline void xsk_buff_free(struct xdp_buff *xdp)
377 {
378 }
379 
380 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
381 				     struct xdp_buff *xdp)
382 {
383 	return false;
384 }
385 
386 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
387 {
388 	return NULL;
389 }
390 
391 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
392 {
393 }
394 
395 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
396 {
397 	return NULL;
398 }
399 
400 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
401 {
402 }
403 
404 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
405 					      u64 addr)
406 {
407 	return 0;
408 }
409 
410 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
411 {
412 	return NULL;
413 }
414 
415 static inline struct xdp_desc_ctx
416 xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
417 {
418 	return (struct xdp_desc_ctx){ };
419 }
420 
421 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
422 {
423 	return false;
424 }
425 
426 static inline struct xsk_tx_metadata *
427 __xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
428 {
429 	return NULL;
430 }
431 
432 static inline struct xsk_tx_metadata *
433 xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
434 {
435 	return NULL;
436 }
437 
438 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
439 {
440 }
441 
442 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
443 						    dma_addr_t dma,
444 						    size_t size)
445 {
446 }
447 
448 #endif /* CONFIG_XDP_SOCKETS */
449 
450 #endif /* _LINUX_XDP_SOCK_DRV_H */
451