xref: /linux/include/net/xdp_sock_drv.h (revision 95f68e06b41b9e88291796efa3969409d13fdd4c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 struct xsk_cb_desc {
16 	void *src;
17 	u8 off;
18 	u8 bytes;
19 };
20 
21 #ifdef CONFIG_XDP_SOCKETS
22 
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 					    u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34 
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 	return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39 
40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 	return pool->chunk_size;
43 }
44 
45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49 
50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 					 struct xdp_rxq_info *rxq)
52 {
53 	xp_set_rxq_info(pool, rxq);
54 }
55 
56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 				    struct xsk_cb_desc *desc)
58 {
59 	xp_fill_cb(pool, desc);
60 }
61 
62 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
63 {
64 #ifdef CONFIG_NET_RX_BUSY_POLL
65 	return pool->heads[0].xdp.rxq->napi_id;
66 #else
67 	return 0;
68 #endif
69 }
70 
71 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
72 				      unsigned long attrs)
73 {
74 	xp_dma_unmap(pool, attrs);
75 }
76 
77 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
78 				   struct device *dev, unsigned long attrs)
79 {
80 	struct xdp_umem *umem = pool->umem;
81 
82 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
83 }
84 
85 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
86 {
87 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88 
89 	return xp_get_dma(xskb);
90 }
91 
92 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
93 {
94 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
95 
96 	return xp_get_frame_dma(xskb);
97 }
98 
99 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
100 {
101 	return xp_alloc(pool);
102 }
103 
104 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
105 {
106 	return !xp_mb_desc(desc);
107 }
108 
109 /* Returns as many entries as possible up to max. 0 <= N <= max. */
110 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
111 {
112 	return xp_alloc_batch(pool, xdp, max);
113 }
114 
115 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
116 {
117 	return xp_can_alloc(pool, count);
118 }
119 
120 static inline void xsk_buff_free(struct xdp_buff *xdp)
121 {
122 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 	struct list_head *xskb_list = &xskb->pool->xskb_list;
124 	struct xdp_buff_xsk *pos, *tmp;
125 
126 	if (likely(!xdp_buff_has_frags(xdp)))
127 		goto out;
128 
129 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
130 		list_del(&pos->list_node);
131 		xp_free(pos);
132 	}
133 
134 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
135 out:
136 	xp_free(xskb);
137 }
138 
139 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
140 				     struct xdp_buff *xdp)
141 {
142 	const void *data = xdp->data;
143 	struct xdp_buff_xsk *frag;
144 
145 	if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
146 				 offset_in_page(data), xdp->data_end - data,
147 				 xdp->frame_sz, false))
148 		return false;
149 
150 	frag = container_of(xdp, struct xdp_buff_xsk, xdp);
151 	list_add_tail(&frag->list_node, &frag->pool->xskb_list);
152 
153 	return true;
154 }
155 
156 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
157 {
158 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
159 	struct xdp_buff *ret = NULL;
160 	struct xdp_buff_xsk *frag;
161 
162 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
163 					struct xdp_buff_xsk, list_node);
164 	if (frag) {
165 		list_del(&frag->list_node);
166 		ret = &frag->xdp;
167 	}
168 
169 	return ret;
170 }
171 
172 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
173 {
174 	struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
175 
176 	list_del(&xskb->list_node);
177 }
178 
179 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
180 {
181 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
182 	struct xdp_buff_xsk *frag;
183 
184 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
185 			       list_node);
186 	return &frag->xdp;
187 }
188 
189 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
190 {
191 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
192 	xdp->data_meta = xdp->data;
193 	xdp->data_end = xdp->data + size;
194 	xdp->flags = 0;
195 }
196 
197 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
198 					      u64 addr)
199 {
200 	return xp_raw_get_dma(pool, addr);
201 }
202 
203 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
204 {
205 	return xp_raw_get_data(pool, addr);
206 }
207 
208 #define XDP_TXMD_FLAGS_VALID ( \
209 		XDP_TXMD_FLAGS_TIMESTAMP | \
210 		XDP_TXMD_FLAGS_CHECKSUM | \
211 	0)
212 
213 static inline bool
214 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
215 {
216 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
217 }
218 
219 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
220 {
221 	struct xsk_tx_metadata *meta;
222 
223 	if (!pool->tx_metadata_len)
224 		return NULL;
225 
226 	meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
227 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
228 		return NULL; /* no way to signal the error to the user */
229 
230 	return meta;
231 }
232 
233 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
234 {
235 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
236 
237 	xp_dma_sync_for_cpu(xskb);
238 }
239 
240 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
241 						    dma_addr_t dma,
242 						    size_t size)
243 {
244 	xp_dma_sync_for_device(pool, dma, size);
245 }
246 
247 #else
248 
249 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
250 {
251 }
252 
253 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
254 				    struct xdp_desc *desc)
255 {
256 	return false;
257 }
258 
259 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
260 {
261 	return 0;
262 }
263 
264 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
265 {
266 }
267 
268 static inline struct xsk_buff_pool *
269 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
270 {
271 	return NULL;
272 }
273 
274 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
275 {
276 }
277 
278 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
279 {
280 }
281 
282 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
283 {
284 }
285 
286 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
287 {
288 }
289 
290 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
291 {
292 	return false;
293 }
294 
295 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
296 {
297 	return 0;
298 }
299 
300 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
301 {
302 	return 0;
303 }
304 
305 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
306 {
307 	return 0;
308 }
309 
310 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
311 					 struct xdp_rxq_info *rxq)
312 {
313 }
314 
315 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
316 				    struct xsk_cb_desc *desc)
317 {
318 }
319 
320 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
321 {
322 	return 0;
323 }
324 
325 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
326 				      unsigned long attrs)
327 {
328 }
329 
330 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
331 				   struct device *dev, unsigned long attrs)
332 {
333 	return 0;
334 }
335 
336 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
337 {
338 	return 0;
339 }
340 
341 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
342 {
343 	return 0;
344 }
345 
346 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
347 {
348 	return NULL;
349 }
350 
351 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
352 {
353 	return false;
354 }
355 
356 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
357 {
358 	return 0;
359 }
360 
361 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
362 {
363 	return false;
364 }
365 
366 static inline void xsk_buff_free(struct xdp_buff *xdp)
367 {
368 }
369 
370 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
371 				     struct xdp_buff *xdp)
372 {
373 	return false;
374 }
375 
376 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
377 {
378 	return NULL;
379 }
380 
381 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
382 {
383 }
384 
385 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
386 {
387 	return NULL;
388 }
389 
390 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
391 {
392 }
393 
394 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
395 					      u64 addr)
396 {
397 	return 0;
398 }
399 
400 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
401 {
402 	return NULL;
403 }
404 
405 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
406 {
407 	return false;
408 }
409 
410 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
411 {
412 	return NULL;
413 }
414 
415 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
416 {
417 }
418 
419 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
420 						    dma_addr_t dma,
421 						    size_t size)
422 {
423 }
424 
425 #endif /* CONFIG_XDP_SOCKETS */
426 
427 #endif /* _LINUX_XDP_SOCK_DRV_H */
428