xref: /linux/include/net/xdp_sock_drv.h (revision da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 struct xsk_cb_desc {
16 	void *src;
17 	u8 off;
18 	u8 bytes;
19 };
20 
21 #ifdef CONFIG_XDP_SOCKETS
22 
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 					    u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34 
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 	return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39 
40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 	return pool->chunk_size;
43 }
44 
45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49 
50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 					 struct xdp_rxq_info *rxq)
52 {
53 	xp_set_rxq_info(pool, rxq);
54 }
55 
56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 				    struct xsk_cb_desc *desc)
58 {
59 	xp_fill_cb(pool, desc);
60 }
61 
62 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
63 {
64 #ifdef CONFIG_NET_RX_BUSY_POLL
65 	return pool->heads[0].xdp.rxq->napi_id;
66 #else
67 	return 0;
68 #endif
69 }
70 
71 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
72 				      unsigned long attrs)
73 {
74 	xp_dma_unmap(pool, attrs);
75 }
76 
77 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
78 				   struct device *dev, unsigned long attrs)
79 {
80 	struct xdp_umem *umem = pool->umem;
81 
82 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
83 }
84 
85 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
86 {
87 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88 
89 	return xp_get_dma(xskb);
90 }
91 
92 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
93 {
94 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
95 
96 	return xp_get_frame_dma(xskb);
97 }
98 
99 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
100 {
101 	return xp_alloc(pool);
102 }
103 
104 static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
105 {
106 	return !xp_mb_desc(desc);
107 }
108 
109 /* Returns as many entries as possible up to max. 0 <= N <= max. */
110 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
111 {
112 	return xp_alloc_batch(pool, xdp, max);
113 }
114 
115 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
116 {
117 	return xp_can_alloc(pool, count);
118 }
119 
120 static inline void xsk_buff_free(struct xdp_buff *xdp)
121 {
122 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 	struct list_head *xskb_list = &xskb->pool->xskb_list;
124 	struct xdp_buff_xsk *pos, *tmp;
125 
126 	if (likely(!xdp_buff_has_frags(xdp)))
127 		goto out;
128 
129 	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
130 		list_del(&pos->xskb_list_node);
131 		xp_free(pos);
132 	}
133 
134 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
135 out:
136 	xp_free(xskb);
137 }
138 
139 static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
140 {
141 	struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 
143 	list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
144 }
145 
146 static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
147 {
148 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
149 	struct xdp_buff *ret = NULL;
150 	struct xdp_buff_xsk *frag;
151 
152 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
153 					struct xdp_buff_xsk, xskb_list_node);
154 	if (frag) {
155 		list_del(&frag->xskb_list_node);
156 		ret = &frag->xdp;
157 	}
158 
159 	return ret;
160 }
161 
162 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
163 {
164 	struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
165 
166 	list_del(&xskb->xskb_list_node);
167 }
168 
169 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
170 {
171 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
172 	struct xdp_buff_xsk *frag;
173 
174 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
175 			       xskb_list_node);
176 	return &frag->xdp;
177 }
178 
179 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
180 {
181 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
182 	xdp->data_meta = xdp->data;
183 	xdp->data_end = xdp->data + size;
184 	xdp->flags = 0;
185 }
186 
187 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
188 					      u64 addr)
189 {
190 	return xp_raw_get_dma(pool, addr);
191 }
192 
193 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
194 {
195 	return xp_raw_get_data(pool, addr);
196 }
197 
198 #define XDP_TXMD_FLAGS_VALID ( \
199 		XDP_TXMD_FLAGS_TIMESTAMP | \
200 		XDP_TXMD_FLAGS_CHECKSUM | \
201 	0)
202 
203 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
204 {
205 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
206 }
207 
208 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
209 {
210 	struct xsk_tx_metadata *meta;
211 
212 	if (!pool->tx_metadata_len)
213 		return NULL;
214 
215 	meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
216 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
217 		return NULL; /* no way to signal the error to the user */
218 
219 	return meta;
220 }
221 
222 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
223 {
224 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
225 
226 	xp_dma_sync_for_cpu(xskb);
227 }
228 
229 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
230 						    dma_addr_t dma,
231 						    size_t size)
232 {
233 	xp_dma_sync_for_device(pool, dma, size);
234 }
235 
236 #else
237 
238 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
239 {
240 }
241 
242 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
243 				    struct xdp_desc *desc)
244 {
245 	return false;
246 }
247 
248 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
249 {
250 	return 0;
251 }
252 
253 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
254 {
255 }
256 
257 static inline struct xsk_buff_pool *
258 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
259 {
260 	return NULL;
261 }
262 
263 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
264 {
265 }
266 
267 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
268 {
269 }
270 
271 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
272 {
273 }
274 
275 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
276 {
277 }
278 
279 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
280 {
281 	return false;
282 }
283 
284 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
285 {
286 	return 0;
287 }
288 
289 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
290 {
291 	return 0;
292 }
293 
294 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
295 {
296 	return 0;
297 }
298 
299 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
300 					 struct xdp_rxq_info *rxq)
301 {
302 }
303 
304 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
305 				    struct xsk_cb_desc *desc)
306 {
307 }
308 
309 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
310 {
311 	return 0;
312 }
313 
314 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
315 				      unsigned long attrs)
316 {
317 }
318 
319 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
320 				   struct device *dev, unsigned long attrs)
321 {
322 	return 0;
323 }
324 
325 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
326 {
327 	return 0;
328 }
329 
330 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
331 {
332 	return 0;
333 }
334 
335 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
336 {
337 	return NULL;
338 }
339 
340 static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
341 {
342 	return false;
343 }
344 
345 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
346 {
347 	return 0;
348 }
349 
350 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
351 {
352 	return false;
353 }
354 
355 static inline void xsk_buff_free(struct xdp_buff *xdp)
356 {
357 }
358 
359 static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
360 {
361 }
362 
363 static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
364 {
365 	return NULL;
366 }
367 
368 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
369 {
370 }
371 
372 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
373 {
374 	return NULL;
375 }
376 
377 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
378 {
379 }
380 
381 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
382 					      u64 addr)
383 {
384 	return 0;
385 }
386 
387 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
388 {
389 	return NULL;
390 }
391 
392 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
393 {
394 	return false;
395 }
396 
397 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
398 {
399 	return NULL;
400 }
401 
402 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
403 {
404 }
405 
406 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
407 						    dma_addr_t dma,
408 						    size_t size)
409 {
410 }
411 
412 #endif /* CONFIG_XDP_SOCKETS */
413 
414 #endif /* _LINUX_XDP_SOCK_DRV_H */
415