xref: /linux/include/net/xdp_sock_drv.h (revision 024bfd2e9d80d7131f1178eb2235030b96f7ef0e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 struct xsk_cb_desc {
16 	void *src;
17 	u8 off;
18 	u8 bytes;
19 };
20 
21 #ifdef CONFIG_XDP_SOCKETS
22 
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 					    u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34 
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 	return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39 
40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 	return pool->chunk_size;
43 }
44 
45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49 
50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 					 struct xdp_rxq_info *rxq)
52 {
53 	xp_set_rxq_info(pool, rxq);
54 }
55 
56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 				    struct xsk_cb_desc *desc)
58 {
59 	xp_fill_cb(pool, desc);
60 }
61 
62 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
63 {
64 #ifdef CONFIG_NET_RX_BUSY_POLL
65 	return pool->heads[0].xdp.rxq->napi_id;
66 #else
67 	return 0;
68 #endif
69 }
70 
71 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
72 				      unsigned long attrs)
73 {
74 	xp_dma_unmap(pool, attrs);
75 }
76 
77 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
78 				   struct device *dev, unsigned long attrs)
79 {
80 	struct xdp_umem *umem = pool->umem;
81 
82 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
83 }
84 
85 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
86 {
87 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88 
89 	return xp_get_dma(xskb);
90 }
91 
92 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
93 {
94 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
95 
96 	return xp_get_frame_dma(xskb);
97 }
98 
99 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
100 {
101 	return xp_alloc(pool);
102 }
103 
104 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
105 {
106 	return !xp_mb_desc(desc);
107 }
108 
109 /* Returns as many entries as possible up to max. 0 <= N <= max. */
110 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
111 {
112 	return xp_alloc_batch(pool, xdp, max);
113 }
114 
115 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
116 {
117 	return xp_can_alloc(pool, count);
118 }
119 
120 static inline void xsk_buff_free(struct xdp_buff *xdp)
121 {
122 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 	struct list_head *xskb_list = &xskb->pool->xskb_list;
124 	struct xdp_buff_xsk *pos, *tmp;
125 
126 	if (likely(!xdp_buff_has_frags(xdp)))
127 		goto out;
128 
129 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
130 		list_del(&pos->list_node);
131 		xp_free(pos);
132 	}
133 
134 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
135 out:
136 	xp_free(xskb);
137 }
138 
139 static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
140 {
141 	struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 
143 	list_add_tail(&frag->list_node, &frag->pool->xskb_list);
144 }
145 
146 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
147 {
148 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
149 	struct xdp_buff *ret = NULL;
150 	struct xdp_buff_xsk *frag;
151 
152 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
153 					struct xdp_buff_xsk, list_node);
154 	if (frag) {
155 		list_del(&frag->list_node);
156 		ret = &frag->xdp;
157 	}
158 
159 	return ret;
160 }
161 
162 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
163 {
164 	struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
165 
166 	list_del(&xskb->list_node);
167 }
168 
169 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
170 {
171 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
172 	struct xdp_buff_xsk *frag;
173 
174 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
175 			       list_node);
176 	return &frag->xdp;
177 }
178 
179 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
180 {
181 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
182 	xdp->data_meta = xdp->data;
183 	xdp->data_end = xdp->data + size;
184 	xdp->flags = 0;
185 }
186 
187 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
188 					      u64 addr)
189 {
190 	return xp_raw_get_dma(pool, addr);
191 }
192 
193 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
194 {
195 	return xp_raw_get_data(pool, addr);
196 }
197 
198 #define XDP_TXMD_FLAGS_VALID ( \
199 		XDP_TXMD_FLAGS_TIMESTAMP | \
200 		XDP_TXMD_FLAGS_CHECKSUM | \
201 	0)
202 
203 static inline bool
204 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
205 {
206 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
207 }
208 
209 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
210 {
211 	struct xsk_tx_metadata *meta;
212 
213 	if (!pool->tx_metadata_len)
214 		return NULL;
215 
216 	meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
217 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
218 		return NULL; /* no way to signal the error to the user */
219 
220 	return meta;
221 }
222 
223 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
224 {
225 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
226 
227 	xp_dma_sync_for_cpu(xskb);
228 }
229 
230 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
231 						    dma_addr_t dma,
232 						    size_t size)
233 {
234 	xp_dma_sync_for_device(pool, dma, size);
235 }
236 
237 #else
238 
239 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
240 {
241 }
242 
243 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
244 				    struct xdp_desc *desc)
245 {
246 	return false;
247 }
248 
249 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
250 {
251 	return 0;
252 }
253 
254 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
255 {
256 }
257 
258 static inline struct xsk_buff_pool *
259 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
260 {
261 	return NULL;
262 }
263 
264 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
265 {
266 }
267 
268 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
269 {
270 }
271 
272 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
273 {
274 }
275 
276 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
277 {
278 }
279 
280 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
281 {
282 	return false;
283 }
284 
285 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
286 {
287 	return 0;
288 }
289 
290 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
291 {
292 	return 0;
293 }
294 
295 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
296 {
297 	return 0;
298 }
299 
300 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
301 					 struct xdp_rxq_info *rxq)
302 {
303 }
304 
305 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
306 				    struct xsk_cb_desc *desc)
307 {
308 }
309 
310 static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
311 {
312 	return 0;
313 }
314 
315 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
316 				      unsigned long attrs)
317 {
318 }
319 
320 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
321 				   struct device *dev, unsigned long attrs)
322 {
323 	return 0;
324 }
325 
326 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
327 {
328 	return 0;
329 }
330 
331 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
332 {
333 	return 0;
334 }
335 
336 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
337 {
338 	return NULL;
339 }
340 
341 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
342 {
343 	return false;
344 }
345 
346 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
347 {
348 	return 0;
349 }
350 
351 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
352 {
353 	return false;
354 }
355 
356 static inline void xsk_buff_free(struct xdp_buff *xdp)
357 {
358 }
359 
360 static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
361 {
362 }
363 
364 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
365 {
366 	return NULL;
367 }
368 
369 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
370 {
371 }
372 
373 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
374 {
375 	return NULL;
376 }
377 
378 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
379 {
380 }
381 
382 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
383 					      u64 addr)
384 {
385 	return 0;
386 }
387 
388 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
389 {
390 	return NULL;
391 }
392 
393 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
394 {
395 	return false;
396 }
397 
398 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
399 {
400 	return NULL;
401 }
402 
403 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
404 {
405 }
406 
407 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
408 						    dma_addr_t dma,
409 						    size_t size)
410 {
411 }
412 
413 #endif /* CONFIG_XDP_SOCKETS */
414 
415 #endif /* _LINUX_XDP_SOCK_DRV_H */
416