xref: /linux/include/net/xdp_sock_drv.h (revision 0ad9617c78acbc71373fb341a6f75d4012b01d69)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3  * Copyright(c) 2020 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8 
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11 
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14 
15 struct xsk_cb_desc {
16 	void *src;
17 	u8 off;
18 	u8 bytes;
19 };
20 
21 #ifdef CONFIG_XDP_SOCKETS
22 
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 					    u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34 
35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 	return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39 
40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 	return pool->chunk_size;
43 }
44 
45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49 
50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 					 struct xdp_rxq_info *rxq)
52 {
53 	xp_set_rxq_info(pool, rxq);
54 }
55 
56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 				    struct xsk_cb_desc *desc)
58 {
59 	xp_fill_cb(pool, desc);
60 }
61 
62 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
63 				      unsigned long attrs)
64 {
65 	xp_dma_unmap(pool, attrs);
66 }
67 
68 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
69 				   struct device *dev, unsigned long attrs)
70 {
71 	struct xdp_umem *umem = pool->umem;
72 
73 	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
74 }
75 
76 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
77 {
78 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
79 
80 	return xp_get_dma(xskb);
81 }
82 
83 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
84 {
85 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
86 
87 	return xp_get_frame_dma(xskb);
88 }
89 
90 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
91 {
92 	return xp_alloc(pool);
93 }
94 
95 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
96 {
97 	return !xp_mb_desc(desc);
98 }
99 
100 /* Returns as many entries as possible up to max. 0 <= N <= max. */
101 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
102 {
103 	return xp_alloc_batch(pool, xdp, max);
104 }
105 
106 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
107 {
108 	return xp_can_alloc(pool, count);
109 }
110 
111 static inline void xsk_buff_free(struct xdp_buff *xdp)
112 {
113 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
114 	struct list_head *xskb_list = &xskb->pool->xskb_list;
115 	struct xdp_buff_xsk *pos, *tmp;
116 
117 	if (likely(!xdp_buff_has_frags(xdp)))
118 		goto out;
119 
120 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
121 		list_del(&pos->list_node);
122 		xp_free(pos);
123 	}
124 
125 	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
126 out:
127 	xp_free(xskb);
128 }
129 
130 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
131 				     struct xdp_buff *xdp)
132 {
133 	const void *data = xdp->data;
134 	struct xdp_buff_xsk *frag;
135 
136 	if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
137 				 offset_in_page(data), xdp->data_end - data,
138 				 xdp->frame_sz, false))
139 		return false;
140 
141 	frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 	list_add_tail(&frag->list_node, &frag->pool->xskb_list);
143 
144 	return true;
145 }
146 
147 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
148 {
149 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
150 	struct xdp_buff *ret = NULL;
151 	struct xdp_buff_xsk *frag;
152 
153 	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
154 					struct xdp_buff_xsk, list_node);
155 	if (frag) {
156 		list_del(&frag->list_node);
157 		ret = &frag->xdp;
158 	}
159 
160 	return ret;
161 }
162 
163 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
164 {
165 	struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
166 
167 	list_del(&xskb->list_node);
168 }
169 
170 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
171 {
172 	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
173 	struct xdp_buff_xsk *frag;
174 
175 	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
176 			       list_node);
177 	return &frag->xdp;
178 }
179 
180 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
181 {
182 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
183 	xdp->data_meta = xdp->data;
184 	xdp->data_end = xdp->data + size;
185 	xdp->flags = 0;
186 }
187 
188 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
189 					      u64 addr)
190 {
191 	return xp_raw_get_dma(pool, addr);
192 }
193 
194 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
195 {
196 	return xp_raw_get_data(pool, addr);
197 }
198 
199 #define XDP_TXMD_FLAGS_VALID ( \
200 		XDP_TXMD_FLAGS_TIMESTAMP | \
201 		XDP_TXMD_FLAGS_CHECKSUM | \
202 	0)
203 
204 static inline bool
205 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
206 {
207 	return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
208 }
209 
210 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
211 {
212 	struct xsk_tx_metadata *meta;
213 
214 	if (!pool->tx_metadata_len)
215 		return NULL;
216 
217 	meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
218 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
219 		return NULL; /* no way to signal the error to the user */
220 
221 	return meta;
222 }
223 
224 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
225 {
226 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
227 
228 	xp_dma_sync_for_cpu(xskb);
229 }
230 
231 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
232 						    dma_addr_t dma,
233 						    size_t size)
234 {
235 	xp_dma_sync_for_device(pool, dma, size);
236 }
237 
238 #else
239 
240 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
241 {
242 }
243 
244 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
245 				    struct xdp_desc *desc)
246 {
247 	return false;
248 }
249 
250 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
251 {
252 	return 0;
253 }
254 
255 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
256 {
257 }
258 
259 static inline struct xsk_buff_pool *
260 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
261 {
262 	return NULL;
263 }
264 
265 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
266 {
267 }
268 
269 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
270 {
271 }
272 
273 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
274 {
275 }
276 
277 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
278 {
279 }
280 
281 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
282 {
283 	return false;
284 }
285 
286 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
287 {
288 	return 0;
289 }
290 
291 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
292 {
293 	return 0;
294 }
295 
296 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
297 {
298 	return 0;
299 }
300 
301 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
302 					 struct xdp_rxq_info *rxq)
303 {
304 }
305 
306 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
307 				    struct xsk_cb_desc *desc)
308 {
309 }
310 
311 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
312 				      unsigned long attrs)
313 {
314 }
315 
316 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
317 				   struct device *dev, unsigned long attrs)
318 {
319 	return 0;
320 }
321 
322 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
323 {
324 	return 0;
325 }
326 
327 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
328 {
329 	return 0;
330 }
331 
332 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
333 {
334 	return NULL;
335 }
336 
337 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
338 {
339 	return false;
340 }
341 
342 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
343 {
344 	return 0;
345 }
346 
347 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
348 {
349 	return false;
350 }
351 
352 static inline void xsk_buff_free(struct xdp_buff *xdp)
353 {
354 }
355 
356 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
357 				     struct xdp_buff *xdp)
358 {
359 	return false;
360 }
361 
362 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
363 {
364 	return NULL;
365 }
366 
367 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
368 {
369 }
370 
371 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
372 {
373 	return NULL;
374 }
375 
376 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
377 {
378 }
379 
380 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
381 					      u64 addr)
382 {
383 	return 0;
384 }
385 
386 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
387 {
388 	return NULL;
389 }
390 
391 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
392 {
393 	return false;
394 }
395 
396 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
397 {
398 	return NULL;
399 }
400 
401 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
402 {
403 }
404 
405 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
406 						    dma_addr_t dma,
407 						    size_t size)
408 {
409 }
410 
411 #endif /* CONFIG_XDP_SOCKETS */
412 
413 #endif /* _LINUX_XDP_SOCK_DRV_H */
414