xref: /linux/include/net/xdp_sock.h (revision 4d374ba0bf30a2a372167ee4b7cdd527e7b47b3b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8 
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
15 
16 struct net_device;
17 struct xsk_queue;
18 
19 struct xdp_umem_page {
20 	void *addr;
21 	dma_addr_t dma;
22 };
23 
24 struct xdp_umem_fq_reuse {
25 	u32 nentries;
26 	u32 length;
27 	u64 handles[];
28 };
29 
30 /* Flags for the umem flags field. */
31 #define XDP_UMEM_USES_NEED_WAKEUP (1 << 0)
32 
33 struct xdp_umem {
34 	struct xsk_queue *fq;
35 	struct xsk_queue *cq;
36 	struct xdp_umem_page *pages;
37 	u64 chunk_mask;
38 	u64 size;
39 	u32 headroom;
40 	u32 chunk_size_nohr;
41 	struct user_struct *user;
42 	unsigned long address;
43 	refcount_t users;
44 	struct work_struct work;
45 	struct page **pgs;
46 	u32 npgs;
47 	u16 queue_id;
48 	u8 need_wakeup;
49 	u8 flags;
50 	int id;
51 	struct net_device *dev;
52 	struct xdp_umem_fq_reuse *fq_reuse;
53 	bool zc;
54 	spinlock_t xsk_list_lock;
55 	struct list_head xsk_list;
56 };
57 
58 /* Nodes are linked in the struct xdp_sock map_list field, and used to
59  * track which maps a certain socket reside in.
60  */
61 struct xsk_map;
62 struct xsk_map_node {
63 	struct list_head node;
64 	struct xsk_map *map;
65 	struct xdp_sock **map_entry;
66 };
67 
68 struct xdp_sock {
69 	/* struct sock must be the first member of struct xdp_sock */
70 	struct sock sk;
71 	struct xsk_queue *rx;
72 	struct net_device *dev;
73 	struct xdp_umem *umem;
74 	struct list_head flush_node;
75 	u16 queue_id;
76 	bool zc;
77 	enum {
78 		XSK_READY = 0,
79 		XSK_BOUND,
80 		XSK_UNBOUND,
81 	} state;
82 	/* Protects multiple processes in the control path */
83 	struct mutex mutex;
84 	struct xsk_queue *tx ____cacheline_aligned_in_smp;
85 	struct list_head list;
86 	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
87 	 * in the SKB destructor callback.
88 	 */
89 	spinlock_t tx_completion_lock;
90 	/* Protects generic receive. */
91 	spinlock_t rx_lock;
92 	u64 rx_dropped;
93 	struct list_head map_list;
94 	/* Protects map_list */
95 	spinlock_t map_list_lock;
96 };
97 
98 struct xdp_buff;
99 #ifdef CONFIG_XDP_SOCKETS
100 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
101 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
102 void xsk_flush(struct xdp_sock *xs);
103 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
104 /* Used from netdev driver */
105 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
106 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
107 void xsk_umem_discard_addr(struct xdp_umem *umem);
108 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
109 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
110 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
111 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
112 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
113 					  struct xdp_umem_fq_reuse *newq);
114 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
115 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
116 void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
117 void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
118 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
119 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
120 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
121 
122 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
123 			     struct xdp_sock **map_entry);
124 int xsk_map_inc(struct xsk_map *map);
125 void xsk_map_put(struct xsk_map *map);
126 
127 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
128 {
129 	return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
130 }
131 
132 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
133 {
134 	return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
135 }
136 
137 /* Reuse-queue aware version of FILL queue helpers */
138 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
139 {
140 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
141 
142 	if (rq->length >= cnt)
143 		return true;
144 
145 	return xsk_umem_has_addrs(umem, cnt - rq->length);
146 }
147 
148 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
149 {
150 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
151 
152 	if (!rq->length)
153 		return xsk_umem_peek_addr(umem, addr);
154 
155 	*addr = rq->handles[rq->length - 1];
156 	return addr;
157 }
158 
159 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
160 {
161 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
162 
163 	if (!rq->length)
164 		xsk_umem_discard_addr(umem);
165 	else
166 		rq->length--;
167 }
168 
169 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
170 {
171 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
172 
173 	rq->handles[rq->length++] = addr;
174 }
175 #else
176 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
177 {
178 	return -ENOTSUPP;
179 }
180 
181 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
182 {
183 	return -ENOTSUPP;
184 }
185 
186 static inline void xsk_flush(struct xdp_sock *xs)
187 {
188 }
189 
190 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
191 {
192 	return false;
193 }
194 
195 static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
196 {
197 	return false;
198 }
199 
200 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
201 {
202 	return NULL;
203 }
204 
205 static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
206 {
207 }
208 
209 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
210 {
211 }
212 
213 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
214 				       struct xdp_desc *desc)
215 {
216 	return false;
217 }
218 
219 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
220 {
221 }
222 
223 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
224 {
225 	return NULL;
226 }
227 
228 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
229 	struct xdp_umem *umem,
230 	struct xdp_umem_fq_reuse *newq)
231 {
232 	return NULL;
233 }
234 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
235 {
236 }
237 
238 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
239 						     u16 queue_id)
240 {
241 	return NULL;
242 }
243 
244 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
245 {
246 	return NULL;
247 }
248 
249 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
250 {
251 	return 0;
252 }
253 
254 static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
255 {
256 	return false;
257 }
258 
259 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
260 {
261 	return NULL;
262 }
263 
264 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
265 {
266 }
267 
268 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
269 {
270 }
271 
272 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
273 {
274 }
275 
276 static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
277 {
278 }
279 
280 static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
281 {
282 }
283 
284 static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
285 {
286 }
287 
288 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
289 {
290 	return false;
291 }
292 
293 #endif /* CONFIG_XDP_SOCKETS */
294 
295 #endif /* _LINUX_XDP_SOCK_H */
296