xref: /linux/include/linux/skmsg.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6 
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15 
16 #define MAX_MSG_FRAGS			MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS			(MAX_MSG_FRAGS + 1)
18 
19 enum __sk_action {
20 	__SK_DROP = 0,
21 	__SK_PASS,
22 	__SK_REDIRECT,
23 	__SK_NONE,
24 };
25 
26 struct sk_msg_sg {
27 	u32				start;
28 	u32				curr;
29 	u32				end;
30 	u32				size;
31 	u32				copybreak;
32 	DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
33 	/* The extra two elements:
34 	 * 1) used for chaining the front and sections when the list becomes
35 	 *    partitioned (e.g. end < start). The crypto APIs require the
36 	 *    chaining;
37 	 * 2) to chain tailer SG entries after the message.
38 	 */
39 	struct scatterlist		data[MAX_MSG_FRAGS + 2];
40 };
41 
42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
43 struct sk_msg {
44 	struct sk_msg_sg		sg;
45 	void				*data;
46 	void				*data_end;
47 	u32				apply_bytes;
48 	u32				cork_bytes;
49 	u32				flags;
50 	struct sk_buff			*skb;
51 	struct sock			*sk_redir;
52 	struct sock			*sk;
53 	struct list_head		list;
54 };
55 
56 struct sk_psock_progs {
57 	struct bpf_prog			*msg_parser;
58 	struct bpf_prog			*stream_parser;
59 	struct bpf_prog			*stream_verdict;
60 	struct bpf_prog			*skb_verdict;
61 	struct bpf_link			*msg_parser_link;
62 	struct bpf_link			*stream_parser_link;
63 	struct bpf_link			*stream_verdict_link;
64 	struct bpf_link			*skb_verdict_link;
65 };
66 
67 enum sk_psock_state_bits {
68 	SK_PSOCK_TX_ENABLED,
69 	SK_PSOCK_RX_STRP_ENABLED,
70 };
71 
72 struct sk_psock_link {
73 	struct list_head		list;
74 	struct bpf_map			*map;
75 	void				*link_raw;
76 };
77 
78 struct sk_psock_work_state {
79 	u32				len;
80 	u32				off;
81 };
82 
83 struct sk_psock {
84 	struct sock			*sk;
85 	struct sock			*sk_redir;
86 	u32				apply_bytes;
87 	u32				cork_bytes;
88 	u32				eval;
89 	bool				redir_ingress; /* undefined if sk_redir is null */
90 	struct sk_msg			*cork;
91 	struct sk_psock_progs		progs;
92 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
93 	struct strparser		strp;
94 #endif
95 	struct sk_buff_head		ingress_skb;
96 	struct list_head		ingress_msg;
97 	spinlock_t			ingress_lock;
98 	unsigned long			state;
99 	struct list_head		link;
100 	spinlock_t			link_lock;
101 	refcount_t			refcnt;
102 	void (*saved_unhash)(struct sock *sk);
103 	void (*saved_destroy)(struct sock *sk);
104 	void (*saved_close)(struct sock *sk, long timeout);
105 	void (*saved_write_space)(struct sock *sk);
106 	void (*saved_data_ready)(struct sock *sk);
107 	/* psock_update_sk_prot may be called with restore=false many times
108 	 * so the handler must be safe for this case. It will be called
109 	 * exactly once with restore=true when the psock is being destroyed
110 	 * and psock refcnt is zero, but before an RCU grace period.
111 	 */
112 	int  (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
113 				     bool restore);
114 	struct proto			*sk_proto;
115 	struct mutex			work_mutex;
116 	struct sk_psock_work_state	work_state;
117 	struct delayed_work		work;
118 	struct sock			*sk_pair;
119 	struct rcu_work			rwork;
120 };
121 
122 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
123 		 int elem_first_coalesce);
124 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
125 		 u32 off, u32 len);
126 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
127 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
128 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
129 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
130 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
131 				  u32 bytes);
132 
133 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
134 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
135 
136 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
137 			      struct sk_msg *msg, u32 bytes);
138 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
139 			     struct sk_msg *msg, u32 bytes);
140 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
141 		   int len, int flags);
142 bool sk_msg_is_readable(struct sock *sk);
143 
sk_msg_check_to_free(struct sk_msg * msg,u32 i,u32 bytes)144 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
145 {
146 	WARN_ON(i == msg->sg.end && bytes);
147 }
148 
sk_msg_apply_bytes(struct sk_psock * psock,u32 bytes)149 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
150 {
151 	if (psock->apply_bytes) {
152 		if (psock->apply_bytes < bytes)
153 			psock->apply_bytes = 0;
154 		else
155 			psock->apply_bytes -= bytes;
156 	}
157 }
158 
sk_msg_iter_dist(u32 start,u32 end)159 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
160 {
161 	return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
162 }
163 
164 #define sk_msg_iter_var_prev(var)			\
165 	do {						\
166 		if (var == 0)				\
167 			var = NR_MSG_FRAG_IDS - 1;	\
168 		else					\
169 			var--;				\
170 	} while (0)
171 
172 #define sk_msg_iter_var_next(var)			\
173 	do {						\
174 		var++;					\
175 		if (var == NR_MSG_FRAG_IDS)		\
176 			var = 0;			\
177 	} while (0)
178 
179 #define sk_msg_iter_prev(msg, which)			\
180 	sk_msg_iter_var_prev(msg->sg.which)
181 
182 #define sk_msg_iter_next(msg, which)			\
183 	sk_msg_iter_var_next(msg->sg.which)
184 
sk_msg_init(struct sk_msg * msg)185 static inline void sk_msg_init(struct sk_msg *msg)
186 {
187 	BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
188 	memset(msg, 0, sizeof(*msg));
189 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
190 }
191 
sk_msg_xfer(struct sk_msg * dst,struct sk_msg * src,int which,u32 size)192 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
193 			       int which, u32 size)
194 {
195 	dst->sg.data[which] = src->sg.data[which];
196 	dst->sg.data[which].length  = size;
197 	dst->sg.size		   += size;
198 	src->sg.size		   -= size;
199 	src->sg.data[which].length -= size;
200 	src->sg.data[which].offset += size;
201 }
202 
sk_msg_xfer_full(struct sk_msg * dst,struct sk_msg * src)203 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
204 {
205 	memcpy(dst, src, sizeof(*src));
206 	sk_msg_init(src);
207 }
208 
sk_msg_full(const struct sk_msg * msg)209 static inline bool sk_msg_full(const struct sk_msg *msg)
210 {
211 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
212 }
213 
sk_msg_elem_used(const struct sk_msg * msg)214 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
215 {
216 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
217 }
218 
sk_msg_elem(struct sk_msg * msg,int which)219 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
220 {
221 	return &msg->sg.data[which];
222 }
223 
sk_msg_elem_cpy(struct sk_msg * msg,int which)224 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
225 {
226 	return msg->sg.data[which];
227 }
228 
sk_msg_page(struct sk_msg * msg,int which)229 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
230 {
231 	return sg_page(sk_msg_elem(msg, which));
232 }
233 
sk_msg_to_ingress(const struct sk_msg * msg)234 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
235 {
236 	return msg->flags & BPF_F_INGRESS;
237 }
238 
sk_msg_compute_data_pointers(struct sk_msg * msg)239 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
240 {
241 	struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
242 
243 	if (test_bit(msg->sg.start, msg->sg.copy)) {
244 		msg->data = NULL;
245 		msg->data_end = NULL;
246 	} else {
247 		msg->data = sg_virt(sge);
248 		msg->data_end = msg->data + sge->length;
249 	}
250 }
251 
sk_msg_page_add(struct sk_msg * msg,struct page * page,u32 len,u32 offset)252 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
253 				   u32 len, u32 offset)
254 {
255 	struct scatterlist *sge;
256 
257 	get_page(page);
258 	sge = sk_msg_elem(msg, msg->sg.end);
259 	sg_set_page(sge, page, len, offset);
260 	sg_unmark_end(sge);
261 
262 	__set_bit(msg->sg.end, msg->sg.copy);
263 	msg->sg.size += len;
264 	sk_msg_iter_next(msg, end);
265 }
266 
sk_msg_sg_copy(struct sk_msg * msg,u32 i,bool copy_state)267 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
268 {
269 	do {
270 		if (copy_state)
271 			__set_bit(i, msg->sg.copy);
272 		else
273 			__clear_bit(i, msg->sg.copy);
274 		sk_msg_iter_var_next(i);
275 		if (i == msg->sg.end)
276 			break;
277 	} while (1);
278 }
279 
sk_msg_sg_copy_set(struct sk_msg * msg,u32 start)280 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
281 {
282 	sk_msg_sg_copy(msg, start, true);
283 }
284 
sk_msg_sg_copy_clear(struct sk_msg * msg,u32 start)285 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
286 {
287 	sk_msg_sg_copy(msg, start, false);
288 }
289 
sk_psock(const struct sock * sk)290 static inline struct sk_psock *sk_psock(const struct sock *sk)
291 {
292 	return __rcu_dereference_sk_user_data_with_flags(sk,
293 							 SK_USER_DATA_PSOCK);
294 }
295 
sk_psock_set_state(struct sk_psock * psock,enum sk_psock_state_bits bit)296 static inline void sk_psock_set_state(struct sk_psock *psock,
297 				      enum sk_psock_state_bits bit)
298 {
299 	set_bit(bit, &psock->state);
300 }
301 
sk_psock_clear_state(struct sk_psock * psock,enum sk_psock_state_bits bit)302 static inline void sk_psock_clear_state(struct sk_psock *psock,
303 					enum sk_psock_state_bits bit)
304 {
305 	clear_bit(bit, &psock->state);
306 }
307 
sk_psock_test_state(const struct sk_psock * psock,enum sk_psock_state_bits bit)308 static inline bool sk_psock_test_state(const struct sk_psock *psock,
309 				       enum sk_psock_state_bits bit)
310 {
311 	return test_bit(bit, &psock->state);
312 }
313 
sock_drop(struct sock * sk,struct sk_buff * skb)314 static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
315 {
316 	sk_drops_add(sk, skb);
317 	kfree_skb(skb);
318 }
319 
sk_psock_queue_msg(struct sk_psock * psock,struct sk_msg * msg)320 static inline void sk_psock_queue_msg(struct sk_psock *psock,
321 				      struct sk_msg *msg)
322 {
323 	spin_lock_bh(&psock->ingress_lock);
324 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
325 		list_add_tail(&msg->list, &psock->ingress_msg);
326 	else {
327 		sk_msg_free(psock->sk, msg);
328 		kfree(msg);
329 	}
330 	spin_unlock_bh(&psock->ingress_lock);
331 }
332 
sk_psock_dequeue_msg(struct sk_psock * psock)333 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
334 {
335 	struct sk_msg *msg;
336 
337 	spin_lock_bh(&psock->ingress_lock);
338 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
339 	if (msg)
340 		list_del(&msg->list);
341 	spin_unlock_bh(&psock->ingress_lock);
342 	return msg;
343 }
344 
sk_psock_peek_msg(struct sk_psock * psock)345 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
346 {
347 	struct sk_msg *msg;
348 
349 	spin_lock_bh(&psock->ingress_lock);
350 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
351 	spin_unlock_bh(&psock->ingress_lock);
352 	return msg;
353 }
354 
sk_psock_next_msg(struct sk_psock * psock,struct sk_msg * msg)355 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
356 					       struct sk_msg *msg)
357 {
358 	struct sk_msg *ret;
359 
360 	spin_lock_bh(&psock->ingress_lock);
361 	if (list_is_last(&msg->list, &psock->ingress_msg))
362 		ret = NULL;
363 	else
364 		ret = list_next_entry(msg, list);
365 	spin_unlock_bh(&psock->ingress_lock);
366 	return ret;
367 }
368 
sk_psock_queue_empty(const struct sk_psock * psock)369 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
370 {
371 	return psock ? list_empty(&psock->ingress_msg) : true;
372 }
373 
kfree_sk_msg(struct sk_msg * msg)374 static inline void kfree_sk_msg(struct sk_msg *msg)
375 {
376 	if (msg->skb)
377 		consume_skb(msg->skb);
378 	kfree(msg);
379 }
380 
sk_psock_report_error(struct sk_psock * psock,int err)381 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
382 {
383 	struct sock *sk = psock->sk;
384 
385 	sk->sk_err = err;
386 	sk_error_report(sk);
387 }
388 
389 struct sk_psock *sk_psock_init(struct sock *sk, int node);
390 void sk_psock_stop(struct sk_psock *psock);
391 
392 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
393 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
394 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
395 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
396 #else
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)397 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
398 {
399 	return -EOPNOTSUPP;
400 }
401 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)402 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
403 {
404 }
405 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)406 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
407 {
408 }
409 #endif
410 
411 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
412 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
413 
414 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
415 			 struct sk_msg *msg);
416 
417 /*
418  * This specialized allocator has to be a macro for its allocations to be
419  * accounted separately (to have a separate alloc_tag). The typecast is
420  * intentional to enforce typesafety.
421  */
422 #define sk_psock_init_link()	\
423 		((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link),	\
424 						 GFP_ATOMIC | __GFP_NOWARN))
425 
sk_psock_free_link(struct sk_psock_link * link)426 static inline void sk_psock_free_link(struct sk_psock_link *link)
427 {
428 	kfree(link);
429 }
430 
431 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
432 
sk_psock_cork_free(struct sk_psock * psock)433 static inline void sk_psock_cork_free(struct sk_psock *psock)
434 {
435 	if (psock->cork) {
436 		sk_msg_free(psock->sk, psock->cork);
437 		kfree(psock->cork);
438 		psock->cork = NULL;
439 	}
440 }
441 
sk_psock_restore_proto(struct sock * sk,struct sk_psock * psock)442 static inline void sk_psock_restore_proto(struct sock *sk,
443 					  struct sk_psock *psock)
444 {
445 	if (psock->psock_update_sk_prot)
446 		psock->psock_update_sk_prot(sk, psock, true);
447 }
448 
sk_psock_get(struct sock * sk)449 static inline struct sk_psock *sk_psock_get(struct sock *sk)
450 {
451 	struct sk_psock *psock;
452 
453 	rcu_read_lock();
454 	psock = sk_psock(sk);
455 	if (psock && !refcount_inc_not_zero(&psock->refcnt))
456 		psock = NULL;
457 	rcu_read_unlock();
458 	return psock;
459 }
460 
461 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
462 
sk_psock_put(struct sock * sk,struct sk_psock * psock)463 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
464 {
465 	if (refcount_dec_and_test(&psock->refcnt))
466 		sk_psock_drop(sk, psock);
467 }
468 
sk_psock_data_ready(struct sock * sk,struct sk_psock * psock)469 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
470 {
471 	read_lock_bh(&sk->sk_callback_lock);
472 	if (psock->saved_data_ready)
473 		psock->saved_data_ready(sk);
474 	else
475 		sk->sk_data_ready(sk);
476 	read_unlock_bh(&sk->sk_callback_lock);
477 }
478 
psock_set_prog(struct bpf_prog ** pprog,struct bpf_prog * prog)479 static inline void psock_set_prog(struct bpf_prog **pprog,
480 				  struct bpf_prog *prog)
481 {
482 	prog = xchg(pprog, prog);
483 	if (prog)
484 		bpf_prog_put(prog);
485 }
486 
psock_replace_prog(struct bpf_prog ** pprog,struct bpf_prog * prog,struct bpf_prog * old)487 static inline int psock_replace_prog(struct bpf_prog **pprog,
488 				     struct bpf_prog *prog,
489 				     struct bpf_prog *old)
490 {
491 	if (cmpxchg(pprog, old, prog) != old)
492 		return -ENOENT;
493 
494 	if (old)
495 		bpf_prog_put(old);
496 
497 	return 0;
498 }
499 
psock_progs_drop(struct sk_psock_progs * progs)500 static inline void psock_progs_drop(struct sk_psock_progs *progs)
501 {
502 	psock_set_prog(&progs->msg_parser, NULL);
503 	psock_set_prog(&progs->stream_parser, NULL);
504 	psock_set_prog(&progs->stream_verdict, NULL);
505 	psock_set_prog(&progs->skb_verdict, NULL);
506 }
507 
508 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
509 
sk_psock_strp_enabled(struct sk_psock * psock)510 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
511 {
512 	if (!psock)
513 		return false;
514 	return !!psock->saved_data_ready;
515 }
516 
517 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
518 
519 #define BPF_F_STRPARSER	(1UL << 1)
520 
521 /* We only have two bits so far. */
522 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
523 
skb_bpf_strparser(const struct sk_buff * skb)524 static inline bool skb_bpf_strparser(const struct sk_buff *skb)
525 {
526 	unsigned long sk_redir = skb->_sk_redir;
527 
528 	return sk_redir & BPF_F_STRPARSER;
529 }
530 
skb_bpf_set_strparser(struct sk_buff * skb)531 static inline void skb_bpf_set_strparser(struct sk_buff *skb)
532 {
533 	skb->_sk_redir |= BPF_F_STRPARSER;
534 }
535 
skb_bpf_ingress(const struct sk_buff * skb)536 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
537 {
538 	unsigned long sk_redir = skb->_sk_redir;
539 
540 	return sk_redir & BPF_F_INGRESS;
541 }
542 
skb_bpf_set_ingress(struct sk_buff * skb)543 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
544 {
545 	skb->_sk_redir |= BPF_F_INGRESS;
546 }
547 
skb_bpf_set_redir(struct sk_buff * skb,struct sock * sk_redir,bool ingress)548 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
549 				     bool ingress)
550 {
551 	skb->_sk_redir = (unsigned long)sk_redir;
552 	if (ingress)
553 		skb->_sk_redir |= BPF_F_INGRESS;
554 }
555 
skb_bpf_redirect_fetch(const struct sk_buff * skb)556 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
557 {
558 	unsigned long sk_redir = skb->_sk_redir;
559 
560 	return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
561 }
562 
skb_bpf_redirect_clear(struct sk_buff * skb)563 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
564 {
565 	skb->_sk_redir = 0;
566 }
567 #endif /* CONFIG_NET_SOCK_MSG */
568 #endif /* _LINUX_SKMSG_H */
569