xref: /linux/include/net/xdp_sock.h (revision 05ee19c18c2bb3dea69e29219017367c4a77e65a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8 
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
15 
16 struct net_device;
17 struct xsk_queue;
18 
19 /* Masks for xdp_umem_page flags.
20  * The low 12-bits of the addr will be 0 since this is the page address, so we
21  * can use them for flags.
22  */
23 #define XSK_NEXT_PG_CONTIG_SHIFT 0
24 #define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT)
25 
26 struct xdp_umem_page {
27 	void *addr;
28 	dma_addr_t dma;
29 };
30 
31 struct xdp_umem_fq_reuse {
32 	u32 nentries;
33 	u32 length;
34 	u64 handles[];
35 };
36 
37 /* Flags for the umem flags field.
38  *
39  * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
40  * flags. See inlude/uapi/include/linux/if_xdp.h.
41  */
42 #define XDP_UMEM_USES_NEED_WAKEUP (1 << 1)
43 
44 struct xdp_umem {
45 	struct xsk_queue *fq;
46 	struct xsk_queue *cq;
47 	struct xdp_umem_page *pages;
48 	u64 chunk_mask;
49 	u64 size;
50 	u32 headroom;
51 	u32 chunk_size_nohr;
52 	struct user_struct *user;
53 	refcount_t users;
54 	struct work_struct work;
55 	struct page **pgs;
56 	u32 npgs;
57 	u16 queue_id;
58 	u8 need_wakeup;
59 	u8 flags;
60 	int id;
61 	struct net_device *dev;
62 	struct xdp_umem_fq_reuse *fq_reuse;
63 	bool zc;
64 	spinlock_t xsk_tx_list_lock;
65 	struct list_head xsk_tx_list;
66 };
67 
68 /* Nodes are linked in the struct xdp_sock map_list field, and used to
69  * track which maps a certain socket reside in.
70  */
71 
72 struct xsk_map {
73 	struct bpf_map map;
74 	spinlock_t lock; /* Synchronize map updates */
75 	struct xdp_sock *xsk_map[];
76 };
77 
78 struct xsk_map_node {
79 	struct list_head node;
80 	struct xsk_map *map;
81 	struct xdp_sock **map_entry;
82 };
83 
84 struct xdp_sock {
85 	/* struct sock must be the first member of struct xdp_sock */
86 	struct sock sk;
87 	struct xsk_queue *rx;
88 	struct net_device *dev;
89 	struct xdp_umem *umem;
90 	struct list_head flush_node;
91 	u16 queue_id;
92 	bool zc;
93 	enum {
94 		XSK_READY = 0,
95 		XSK_BOUND,
96 		XSK_UNBOUND,
97 	} state;
98 	/* Protects multiple processes in the control path */
99 	struct mutex mutex;
100 	struct xsk_queue *tx ____cacheline_aligned_in_smp;
101 	struct list_head list;
102 	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
103 	 * in the SKB destructor callback.
104 	 */
105 	spinlock_t tx_completion_lock;
106 	/* Protects generic receive. */
107 	spinlock_t rx_lock;
108 	u64 rx_dropped;
109 	struct list_head map_list;
110 	/* Protects map_list */
111 	spinlock_t map_list_lock;
112 };
113 
114 struct xdp_buff;
115 #ifdef CONFIG_XDP_SOCKETS
116 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
117 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
118 /* Used from netdev driver */
119 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
120 bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
121 void xsk_umem_release_addr(struct xdp_umem *umem);
122 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
123 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
124 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
125 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
126 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
127 					  struct xdp_umem_fq_reuse *newq);
128 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
129 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
130 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
131 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
132 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
133 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
134 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
135 
136 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
137 			     struct xdp_sock **map_entry);
138 int xsk_map_inc(struct xsk_map *map);
139 void xsk_map_put(struct xsk_map *map);
140 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
141 void __xsk_map_flush(void);
142 
143 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
144 						     u32 key)
145 {
146 	struct xsk_map *m = container_of(map, struct xsk_map, map);
147 	struct xdp_sock *xs;
148 
149 	if (key >= map->max_entries)
150 		return NULL;
151 
152 	xs = READ_ONCE(m->xsk_map[key]);
153 	return xs;
154 }
155 
156 static inline u64 xsk_umem_extract_addr(u64 addr)
157 {
158 	return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
159 }
160 
161 static inline u64 xsk_umem_extract_offset(u64 addr)
162 {
163 	return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
164 }
165 
166 static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
167 {
168 	return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
169 }
170 
171 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
172 {
173 	unsigned long page_addr;
174 
175 	addr = xsk_umem_add_offset_to_addr(addr);
176 	page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
177 
178 	return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
179 }
180 
181 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
182 {
183 	addr = xsk_umem_add_offset_to_addr(addr);
184 
185 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
186 }
187 
188 /* Reuse-queue aware version of FILL queue helpers */
189 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
190 {
191 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
192 
193 	if (rq->length >= cnt)
194 		return true;
195 
196 	return xsk_umem_has_addrs(umem, cnt - rq->length);
197 }
198 
199 static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
200 {
201 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
202 
203 	if (!rq->length)
204 		return xsk_umem_peek_addr(umem, addr);
205 
206 	*addr = rq->handles[rq->length - 1];
207 	return addr;
208 }
209 
210 static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
211 {
212 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
213 
214 	if (!rq->length)
215 		xsk_umem_release_addr(umem);
216 	else
217 		rq->length--;
218 }
219 
220 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
221 {
222 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
223 
224 	rq->handles[rq->length++] = addr;
225 }
226 
227 /* Handle the offset appropriately depending on aligned or unaligned mode.
228  * For unaligned mode, we store the offset in the upper 16-bits of the address.
229  * For aligned mode, we simply add the offset to the address.
230  */
231 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
232 					 u64 offset)
233 {
234 	if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
235 		return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
236 	else
237 		return address + offset;
238 }
239 
240 static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
241 {
242 	return umem->chunk_size_nohr + umem->headroom;
243 }
244 
245 #else
246 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
247 {
248 	return -ENOTSUPP;
249 }
250 
251 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
252 {
253 	return false;
254 }
255 
256 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
257 {
258 	return false;
259 }
260 
261 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
262 {
263 	return NULL;
264 }
265 
266 static inline void xsk_umem_release_addr(struct xdp_umem *umem)
267 {
268 }
269 
270 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
271 {
272 }
273 
274 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
275 				       struct xdp_desc *desc)
276 {
277 	return false;
278 }
279 
280 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
281 {
282 }
283 
284 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
285 {
286 	return NULL;
287 }
288 
289 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
290 	struct xdp_umem *umem,
291 	struct xdp_umem_fq_reuse *newq)
292 {
293 	return NULL;
294 }
295 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
296 {
297 }
298 
299 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
300 						     u16 queue_id)
301 {
302 	return NULL;
303 }
304 
305 static inline u64 xsk_umem_extract_addr(u64 addr)
306 {
307 	return 0;
308 }
309 
310 static inline u64 xsk_umem_extract_offset(u64 addr)
311 {
312 	return 0;
313 }
314 
315 static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
316 {
317 	return 0;
318 }
319 
320 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
321 {
322 	return NULL;
323 }
324 
325 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
326 {
327 	return 0;
328 }
329 
330 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
331 {
332 	return false;
333 }
334 
335 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
336 {
337 	return NULL;
338 }
339 
340 static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
341 {
342 }
343 
344 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
345 {
346 }
347 
348 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
349 {
350 }
351 
352 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
353 {
354 }
355 
356 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
357 {
358 }
359 
360 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
361 {
362 }
363 
364 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
365 {
366 	return false;
367 }
368 
369 static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
370 					 u64 offset)
371 {
372 	return 0;
373 }
374 
375 static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
376 {
377 	return 0;
378 }
379 
380 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
381 {
382 	return -EOPNOTSUPP;
383 }
384 
385 static inline void __xsk_map_flush(void)
386 {
387 }
388 
389 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
390 						     u32 key)
391 {
392 	return NULL;
393 }
394 #endif /* CONFIG_XDP_SOCKETS */
395 
396 #endif /* _LINUX_XDP_SOCK_H */
397