xref: /linux/net/core/datagram.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	SUCS NET3:
4  *
5  *	Generic datagram handling routines. These are generic for all
6  *	protocols. Possibly a generic IP version on top of these would
7  *	make sense. Not tonight however 8-).
8  *	This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9  *	NetROM layer all have identical poll code and mostly
10  *	identical recvmsg() code. So we share it here. The poll was
11  *	shared before but buried in udp.c so I moved it.
12  *
13  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
14  *						     udp.c code)
15  *
16  *	Fixes:
17  *		Alan Cox	:	NULL return from skb_peek_copy()
18  *					understood
19  *		Alan Cox	:	Rewrote skb_read_datagram to avoid the
20  *					skb_peek_copy stuff.
21  *		Alan Cox	:	Added support for SOCK_SEQPACKET.
22  *					IPX can no longer use the SO_TYPE hack
23  *					but AX.25 now works right, and SPX is
24  *					feasible.
25  *		Alan Cox	:	Fixed write poll of non IP protocol
26  *					crash.
27  *		Florian  La Roche:	Changed for my new skbuff handling.
28  *		Darryl Miles	:	Fixed non-blocking SOCK_SEQPACKET.
29  *		Linus Torvalds	:	BSD semantic fixes.
30  *		Alan Cox	:	Datagram iovec handling
31  *		Darryl Miles	:	Fixed non-blocking SOCK_STREAM.
32  *		Alan Cox	:	POSIXisms
33  *		Pete Wyckoff    :       Unconnected accept() fix.
34  *
35  */
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/iov_iter.h>
54 #include <linux/indirect_call_wrapper.h>
55 
56 #include <net/protocol.h>
57 #include <linux/skbuff.h>
58 
59 #include <net/checksum.h>
60 #include <net/sock.h>
61 #include <net/tcp_states.h>
62 #include <trace/events/skb.h>
63 #include <net/busy_poll.h>
64 #include <crypto/hash.h>
65 
66 /*
67  *	Is a socket 'connection oriented' ?
68  */
connection_based(struct sock * sk)69 static inline int connection_based(struct sock *sk)
70 {
71 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
72 }
73 
receiver_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)74 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
75 				  void *key)
76 {
77 	/*
78 	 * Avoid a wakeup if event not interesting for us
79 	 */
80 	if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
81 		return 0;
82 	return autoremove_wake_function(wait, mode, sync, key);
83 }
84 /*
85  * Wait for the last received packet to be different from skb
86  */
__skb_wait_for_more_packets(struct sock * sk,struct sk_buff_head * queue,int * err,long * timeo_p,const struct sk_buff * skb)87 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
88 				int *err, long *timeo_p,
89 				const struct sk_buff *skb)
90 {
91 	int error;
92 	DEFINE_WAIT_FUNC(wait, receiver_wake_function);
93 
94 	prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
95 
96 	/* Socket errors? */
97 	error = sock_error(sk);
98 	if (error)
99 		goto out_err;
100 
101 	if (READ_ONCE(queue->prev) != skb)
102 		goto out;
103 
104 	/* Socket shut down? */
105 	if (sk->sk_shutdown & RCV_SHUTDOWN)
106 		goto out_noerr;
107 
108 	/* Sequenced packets can come disconnected.
109 	 * If so we report the problem
110 	 */
111 	error = -ENOTCONN;
112 	if (connection_based(sk) &&
113 	    !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
114 		goto out_err;
115 
116 	/* handle signals */
117 	if (signal_pending(current))
118 		goto interrupted;
119 
120 	error = 0;
121 	*timeo_p = schedule_timeout(*timeo_p);
122 out:
123 	finish_wait(sk_sleep(sk), &wait);
124 	return error;
125 interrupted:
126 	error = sock_intr_errno(*timeo_p);
127 out_err:
128 	*err = error;
129 	goto out;
130 out_noerr:
131 	*err = 0;
132 	error = 1;
133 	goto out;
134 }
135 EXPORT_SYMBOL(__skb_wait_for_more_packets);
136 
skb_set_peeked(struct sk_buff * skb)137 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
138 {
139 	struct sk_buff *nskb;
140 
141 	if (skb->peeked)
142 		return skb;
143 
144 	/* We have to unshare an skb before modifying it. */
145 	if (!skb_shared(skb))
146 		goto done;
147 
148 	nskb = skb_clone(skb, GFP_ATOMIC);
149 	if (!nskb)
150 		return ERR_PTR(-ENOMEM);
151 
152 	skb->prev->next = nskb;
153 	skb->next->prev = nskb;
154 	nskb->prev = skb->prev;
155 	nskb->next = skb->next;
156 
157 	consume_skb(skb);
158 	skb = nskb;
159 
160 done:
161 	skb->peeked = 1;
162 
163 	return skb;
164 }
165 
__skb_try_recv_from_queue(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,int * off,int * err,struct sk_buff ** last)166 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
167 					  struct sk_buff_head *queue,
168 					  unsigned int flags,
169 					  int *off, int *err,
170 					  struct sk_buff **last)
171 {
172 	bool peek_at_off = false;
173 	struct sk_buff *skb;
174 	int _off = 0;
175 
176 	if (unlikely(flags & MSG_PEEK && *off >= 0)) {
177 		peek_at_off = true;
178 		_off = *off;
179 	}
180 
181 	*last = queue->prev;
182 	skb_queue_walk(queue, skb) {
183 		if (flags & MSG_PEEK) {
184 			if (peek_at_off && _off >= skb->len &&
185 			    (_off || skb->peeked)) {
186 				_off -= skb->len;
187 				continue;
188 			}
189 			if (!skb->len) {
190 				skb = skb_set_peeked(skb);
191 				if (IS_ERR(skb)) {
192 					*err = PTR_ERR(skb);
193 					return NULL;
194 				}
195 			}
196 			refcount_inc(&skb->users);
197 		} else {
198 			__skb_unlink(skb, queue);
199 		}
200 		*off = _off;
201 		return skb;
202 	}
203 	return NULL;
204 }
205 
206 /**
207  *	__skb_try_recv_datagram - Receive a datagram skbuff
208  *	@sk: socket
209  *	@queue: socket queue from which to receive
210  *	@flags: MSG\_ flags
211  *	@off: an offset in bytes to peek skb from. Returns an offset
212  *	      within an skb where data actually starts
213  *	@err: error code returned
214  *	@last: set to last peeked message to inform the wait function
215  *	       what to look for when peeking
216  *
217  *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
218  *	and possible races. This replaces identical code in packet, raw and
219  *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
220  *	the long standing peek and read race for datagram sockets. If you
221  *	alter this routine remember it must be re-entrant.
222  *
223  *	This function will lock the socket if a skb is returned, so
224  *	the caller needs to unlock the socket in that case (usually by
225  *	calling skb_free_datagram). Returns NULL with @err set to
226  *	-EAGAIN if no data was available or to some other value if an
227  *	error was detected.
228  *
229  *	* It does not lock socket since today. This function is
230  *	* free of race conditions. This measure should/can improve
231  *	* significantly datagram socket latencies at high loads,
232  *	* when data copying to user space takes lots of time.
233  *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
234  *	*  8) Great win.)
235  *	*			                    --ANK (980729)
236  *
237  *	The order of the tests when we find no data waiting are specified
238  *	quite explicitly by POSIX 1003.1g, don't change them without having
239  *	the standard around please.
240  */
__skb_try_recv_datagram(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,int * off,int * err,struct sk_buff ** last)241 struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
242 					struct sk_buff_head *queue,
243 					unsigned int flags, int *off, int *err,
244 					struct sk_buff **last)
245 {
246 	struct sk_buff *skb;
247 	unsigned long cpu_flags;
248 	/*
249 	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
250 	 */
251 	int error = sock_error(sk);
252 
253 	if (error)
254 		goto no_packet;
255 
256 	do {
257 		/* Again only user level code calls this function, so nothing
258 		 * interrupt level will suddenly eat the receive_queue.
259 		 *
260 		 * Look at current nfs client by the way...
261 		 * However, this function was correct in any case. 8)
262 		 */
263 		spin_lock_irqsave(&queue->lock, cpu_flags);
264 		skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
265 						last);
266 		spin_unlock_irqrestore(&queue->lock, cpu_flags);
267 		if (error)
268 			goto no_packet;
269 		if (skb)
270 			return skb;
271 
272 		if (!sk_can_busy_loop(sk))
273 			break;
274 
275 		sk_busy_loop(sk, flags & MSG_DONTWAIT);
276 	} while (READ_ONCE(queue->prev) != *last);
277 
278 	error = -EAGAIN;
279 
280 no_packet:
281 	*err = error;
282 	return NULL;
283 }
284 EXPORT_SYMBOL(__skb_try_recv_datagram);
285 
__skb_recv_datagram(struct sock * sk,struct sk_buff_head * sk_queue,unsigned int flags,int * off,int * err)286 struct sk_buff *__skb_recv_datagram(struct sock *sk,
287 				    struct sk_buff_head *sk_queue,
288 				    unsigned int flags, int *off, int *err)
289 {
290 	struct sk_buff *skb, *last;
291 	long timeo;
292 
293 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
294 
295 	do {
296 		skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
297 					      &last);
298 		if (skb)
299 			return skb;
300 
301 		if (*err != -EAGAIN)
302 			break;
303 	} while (timeo &&
304 		 !__skb_wait_for_more_packets(sk, sk_queue, err,
305 					      &timeo, last));
306 
307 	return NULL;
308 }
309 EXPORT_SYMBOL(__skb_recv_datagram);
310 
skb_recv_datagram(struct sock * sk,unsigned int flags,int * err)311 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
312 				  int *err)
313 {
314 	int off = 0;
315 
316 	return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags,
317 				   &off, err);
318 }
319 EXPORT_SYMBOL(skb_recv_datagram);
320 
skb_free_datagram(struct sock * sk,struct sk_buff * skb)321 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
322 {
323 	consume_skb(skb);
324 }
325 EXPORT_SYMBOL(skb_free_datagram);
326 
__sk_queue_drop_skb(struct sock * sk,struct sk_buff_head * sk_queue,struct sk_buff * skb,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb))327 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
328 			struct sk_buff *skb, unsigned int flags,
329 			void (*destructor)(struct sock *sk,
330 					   struct sk_buff *skb))
331 {
332 	int err = 0;
333 
334 	if (flags & MSG_PEEK) {
335 		err = -ENOENT;
336 		spin_lock_bh(&sk_queue->lock);
337 		if (skb->next) {
338 			__skb_unlink(skb, sk_queue);
339 			refcount_dec(&skb->users);
340 			if (destructor)
341 				destructor(sk, skb);
342 			err = 0;
343 		}
344 		spin_unlock_bh(&sk_queue->lock);
345 	}
346 
347 	atomic_inc(&sk->sk_drops);
348 	return err;
349 }
350 EXPORT_SYMBOL(__sk_queue_drop_skb);
351 
352 /**
353  *	skb_kill_datagram - Free a datagram skbuff forcibly
354  *	@sk: socket
355  *	@skb: datagram skbuff
356  *	@flags: MSG\_ flags
357  *
358  *	This function frees a datagram skbuff that was received by
359  *	skb_recv_datagram.  The flags argument must match the one
360  *	used for skb_recv_datagram.
361  *
362  *	If the MSG_PEEK flag is set, and the packet is still on the
363  *	receive queue of the socket, it will be taken off the queue
364  *	before it is freed.
365  *
366  *	This function currently only disables BH when acquiring the
367  *	sk_receive_queue lock.  Therefore it must not be used in a
368  *	context where that lock is acquired in an IRQ context.
369  *
370  *	It returns 0 if the packet was removed by us.
371  */
372 
skb_kill_datagram(struct sock * sk,struct sk_buff * skb,unsigned int flags)373 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
374 {
375 	int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
376 				      NULL);
377 
378 	kfree_skb(skb);
379 	return err;
380 }
381 EXPORT_SYMBOL(skb_kill_datagram);
382 
383 INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
384 						size_t bytes,
385 						void *data __always_unused,
386 						struct iov_iter *i));
387 
__skb_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,bool fault_short,size_t (* cb)(const void *,size_t,void *,struct iov_iter *),void * data)388 static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
389 			       struct iov_iter *to, int len, bool fault_short,
390 			       size_t (*cb)(const void *, size_t, void *,
391 					    struct iov_iter *), void *data)
392 {
393 	int start = skb_headlen(skb);
394 	int i, copy = start - offset, start_off = offset, n;
395 	struct sk_buff *frag_iter;
396 
397 	/* Copy header. */
398 	if (copy > 0) {
399 		if (copy > len)
400 			copy = len;
401 		n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
402 				    skb->data + offset, copy, data, to);
403 		offset += n;
404 		if (n != copy)
405 			goto short_copy;
406 		if ((len -= copy) == 0)
407 			return 0;
408 	}
409 
410 	if (!skb_frags_readable(skb))
411 		goto short_copy;
412 
413 	/* Copy paged appendix. Hmm... why does this look so complicated? */
414 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
415 		int end;
416 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
417 
418 		WARN_ON(start > offset + len);
419 
420 		end = start + skb_frag_size(frag);
421 		if ((copy = end - offset) > 0) {
422 			u32 p_off, p_len, copied;
423 			struct page *p;
424 			u8 *vaddr;
425 
426 			if (copy > len)
427 				copy = len;
428 
429 			n = 0;
430 			skb_frag_foreach_page(frag,
431 					      skb_frag_off(frag) + offset - start,
432 					      copy, p, p_off, p_len, copied) {
433 				vaddr = kmap_local_page(p);
434 				n += INDIRECT_CALL_1(cb, simple_copy_to_iter,
435 					vaddr + p_off, p_len, data, to);
436 				kunmap_local(vaddr);
437 			}
438 
439 			offset += n;
440 			if (n != copy)
441 				goto short_copy;
442 			if (!(len -= copy))
443 				return 0;
444 		}
445 		start = end;
446 	}
447 
448 	skb_walk_frags(skb, frag_iter) {
449 		int end;
450 
451 		WARN_ON(start > offset + len);
452 
453 		end = start + frag_iter->len;
454 		if ((copy = end - offset) > 0) {
455 			if (copy > len)
456 				copy = len;
457 			if (__skb_datagram_iter(frag_iter, offset - start,
458 						to, copy, fault_short, cb, data))
459 				goto fault;
460 			if ((len -= copy) == 0)
461 				return 0;
462 			offset += copy;
463 		}
464 		start = end;
465 	}
466 	if (!len)
467 		return 0;
468 
469 	/* This is not really a user copy fault, but rather someone
470 	 * gave us a bogus length on the skb.  We should probably
471 	 * print a warning here as it may indicate a kernel bug.
472 	 */
473 
474 fault:
475 	iov_iter_revert(to, offset - start_off);
476 	return -EFAULT;
477 
478 short_copy:
479 	if (fault_short || iov_iter_count(to))
480 		goto fault;
481 
482 	return 0;
483 }
484 
hash_and_copy_to_iter(const void * addr,size_t bytes,void * hashp,struct iov_iter * i)485 static size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
486 				    struct iov_iter *i)
487 {
488 #ifdef CONFIG_CRYPTO_HASH
489 	struct ahash_request *hash = hashp;
490 	struct scatterlist sg;
491 	size_t copied;
492 
493 	copied = copy_to_iter(addr, bytes, i);
494 	sg_init_one(&sg, addr, copied);
495 	ahash_request_set_crypt(hash, &sg, NULL, copied);
496 	crypto_ahash_update(hash);
497 	return copied;
498 #else
499 	return 0;
500 #endif
501 }
502 
503 /**
504  *	skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
505  *          and update a hash.
506  *	@skb: buffer to copy
507  *	@offset: offset in the buffer to start copying from
508  *	@to: iovec iterator to copy to
509  *	@len: amount of data to copy from buffer to iovec
510  *      @hash: hash request to update
511  */
skb_copy_and_hash_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,struct ahash_request * hash)512 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
513 			   struct iov_iter *to, int len,
514 			   struct ahash_request *hash)
515 {
516 	return __skb_datagram_iter(skb, offset, to, len, true,
517 			hash_and_copy_to_iter, hash);
518 }
519 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
520 
simple_copy_to_iter(const void * addr,size_t bytes,void * data __always_unused,struct iov_iter * i)521 static size_t simple_copy_to_iter(const void *addr, size_t bytes,
522 		void *data __always_unused, struct iov_iter *i)
523 {
524 	return copy_to_iter(addr, bytes, i);
525 }
526 
527 /**
528  *	skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
529  *	@skb: buffer to copy
530  *	@offset: offset in the buffer to start copying from
531  *	@to: iovec iterator to copy to
532  *	@len: amount of data to copy from buffer to iovec
533  */
skb_copy_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len)534 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
535 			   struct iov_iter *to, int len)
536 {
537 	trace_skb_copy_datagram_iovec(skb, len);
538 	return __skb_datagram_iter(skb, offset, to, len, false,
539 			simple_copy_to_iter, NULL);
540 }
541 EXPORT_SYMBOL(skb_copy_datagram_iter);
542 
543 /**
544  *	skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
545  *	@skb: buffer to copy
546  *	@offset: offset in the buffer to start copying to
547  *	@from: the copy source
548  *	@len: amount of data to copy to buffer from iovec
549  *
550  *	Returns 0 or -EFAULT.
551  */
skb_copy_datagram_from_iter(struct sk_buff * skb,int offset,struct iov_iter * from,int len)552 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
553 				 struct iov_iter *from,
554 				 int len)
555 {
556 	int start = skb_headlen(skb);
557 	int i, copy = start - offset;
558 	struct sk_buff *frag_iter;
559 
560 	/* Copy header. */
561 	if (copy > 0) {
562 		if (copy > len)
563 			copy = len;
564 		if (copy_from_iter(skb->data + offset, copy, from) != copy)
565 			goto fault;
566 		if ((len -= copy) == 0)
567 			return 0;
568 		offset += copy;
569 	}
570 
571 	/* Copy paged appendix. Hmm... why does this look so complicated? */
572 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
573 		int end;
574 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
575 
576 		WARN_ON(start > offset + len);
577 
578 		end = start + skb_frag_size(frag);
579 		if ((copy = end - offset) > 0) {
580 			size_t copied;
581 
582 			if (copy > len)
583 				copy = len;
584 			copied = copy_page_from_iter(skb_frag_page(frag),
585 					  skb_frag_off(frag) + offset - start,
586 					  copy, from);
587 			if (copied != copy)
588 				goto fault;
589 
590 			if (!(len -= copy))
591 				return 0;
592 			offset += copy;
593 		}
594 		start = end;
595 	}
596 
597 	skb_walk_frags(skb, frag_iter) {
598 		int end;
599 
600 		WARN_ON(start > offset + len);
601 
602 		end = start + frag_iter->len;
603 		if ((copy = end - offset) > 0) {
604 			if (copy > len)
605 				copy = len;
606 			if (skb_copy_datagram_from_iter(frag_iter,
607 							offset - start,
608 							from, copy))
609 				goto fault;
610 			if ((len -= copy) == 0)
611 				return 0;
612 			offset += copy;
613 		}
614 		start = end;
615 	}
616 	if (!len)
617 		return 0;
618 
619 fault:
620 	return -EFAULT;
621 }
622 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
623 
zerocopy_fill_skb_from_iter(struct sk_buff * skb,struct iov_iter * from,size_t length)624 int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
625 				struct iov_iter *from, size_t length)
626 {
627 	int frag = skb_shinfo(skb)->nr_frags;
628 
629 	if (!skb_frags_readable(skb))
630 		return -EFAULT;
631 
632 	while (length && iov_iter_count(from)) {
633 		struct page *head, *last_head = NULL;
634 		struct page *pages[MAX_SKB_FRAGS];
635 		int refs, order, n = 0;
636 		size_t start;
637 		ssize_t copied;
638 
639 		if (frag == MAX_SKB_FRAGS)
640 			return -EMSGSIZE;
641 
642 		copied = iov_iter_get_pages2(from, pages, length,
643 					    MAX_SKB_FRAGS - frag, &start);
644 		if (copied < 0)
645 			return -EFAULT;
646 
647 		length -= copied;
648 
649 		skb->data_len += copied;
650 		skb->len += copied;
651 		skb->truesize += PAGE_ALIGN(copied + start);
652 
653 		head = compound_head(pages[n]);
654 		order = compound_order(head);
655 
656 		for (refs = 0; copied != 0; start = 0) {
657 			int size = min_t(int, copied, PAGE_SIZE - start);
658 
659 			if (pages[n] - head > (1UL << order) - 1) {
660 				head = compound_head(pages[n]);
661 				order = compound_order(head);
662 			}
663 
664 			start += (pages[n] - head) << PAGE_SHIFT;
665 			copied -= size;
666 			n++;
667 			if (frag) {
668 				skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1];
669 
670 				if (head == skb_frag_page(last) &&
671 				    start == skb_frag_off(last) + skb_frag_size(last)) {
672 					skb_frag_size_add(last, size);
673 					/* We combined this page, we need to release
674 					 * a reference. Since compound pages refcount
675 					 * is shared among many pages, batch the refcount
676 					 * adjustments to limit false sharing.
677 					 */
678 					last_head = head;
679 					refs++;
680 					continue;
681 				}
682 			}
683 			if (refs) {
684 				page_ref_sub(last_head, refs);
685 				refs = 0;
686 			}
687 			skb_fill_page_desc_noacc(skb, frag++, head, start, size);
688 		}
689 		if (refs)
690 			page_ref_sub(last_head, refs);
691 	}
692 	return 0;
693 }
694 
__zerocopy_sg_from_iter(struct msghdr * msg,struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)695 int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
696 			    struct sk_buff *skb, struct iov_iter *from,
697 			    size_t length)
698 {
699 	unsigned long orig_size = skb->truesize;
700 	unsigned long truesize;
701 	int ret;
702 
703 	if (msg && msg->msg_ubuf && msg->sg_from_iter)
704 		ret = msg->sg_from_iter(skb, from, length);
705 	else
706 		ret = zerocopy_fill_skb_from_iter(skb, from, length);
707 
708 	truesize = skb->truesize - orig_size;
709 	if (sk && sk->sk_type == SOCK_STREAM) {
710 		sk_wmem_queued_add(sk, truesize);
711 		if (!skb_zcopy_pure(skb))
712 			sk_mem_charge(sk, truesize);
713 	} else {
714 		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
715 	}
716 	return ret;
717 }
718 EXPORT_SYMBOL(__zerocopy_sg_from_iter);
719 
720 /**
721  *	zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
722  *	@skb: buffer to copy
723  *	@from: the source to copy from
724  *
725  *	The function will first copy up to headlen, and then pin the userspace
726  *	pages and build frags through them.
727  *
728  *	Returns 0, -EFAULT or -EMSGSIZE.
729  */
zerocopy_sg_from_iter(struct sk_buff * skb,struct iov_iter * from)730 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
731 {
732 	int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
733 
734 	/* copy up to skb headlen */
735 	if (skb_copy_datagram_from_iter(skb, 0, from, copy))
736 		return -EFAULT;
737 
738 	return __zerocopy_sg_from_iter(NULL, NULL, skb, from, ~0U);
739 }
740 EXPORT_SYMBOL(zerocopy_sg_from_iter);
741 
742 static __always_inline
copy_to_user_iter_csum(void __user * iter_to,size_t progress,size_t len,void * from,void * priv2)743 size_t copy_to_user_iter_csum(void __user *iter_to, size_t progress,
744 			      size_t len, void *from, void *priv2)
745 {
746 	__wsum next, *csum = priv2;
747 
748 	next = csum_and_copy_to_user(from + progress, iter_to, len);
749 	*csum = csum_block_add(*csum, next, progress);
750 	return next ? 0 : len;
751 }
752 
753 static __always_inline
memcpy_to_iter_csum(void * iter_to,size_t progress,size_t len,void * from,void * priv2)754 size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
755 			   size_t len, void *from, void *priv2)
756 {
757 	__wsum *csum = priv2;
758 	__wsum next = csum_partial_copy_nocheck(from + progress, iter_to, len);
759 
760 	*csum = csum_block_add(*csum, next, progress);
761 	return 0;
762 }
763 
764 struct csum_state {
765 	__wsum csum;
766 	size_t off;
767 };
768 
csum_and_copy_to_iter(const void * addr,size_t bytes,void * _csstate,struct iov_iter * i)769 static size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
770 				    struct iov_iter *i)
771 {
772 	struct csum_state *csstate = _csstate;
773 	__wsum sum;
774 
775 	if (WARN_ON_ONCE(i->data_source))
776 		return 0;
777 	if (unlikely(iov_iter_is_discard(i))) {
778 		// can't use csum_memcpy() for that one - data is not copied
779 		csstate->csum = csum_block_add(csstate->csum,
780 					       csum_partial(addr, bytes, 0),
781 					       csstate->off);
782 		csstate->off += bytes;
783 		return bytes;
784 	}
785 
786 	sum = csum_shift(csstate->csum, csstate->off);
787 
788 	bytes = iterate_and_advance2(i, bytes, (void *)addr, &sum,
789 				     copy_to_user_iter_csum,
790 				     memcpy_to_iter_csum);
791 	csstate->csum = csum_shift(sum, csstate->off);
792 	csstate->off += bytes;
793 	return bytes;
794 }
795 
796 /**
797  *	skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
798  *          and update a checksum.
799  *	@skb: buffer to copy
800  *	@offset: offset in the buffer to start copying from
801  *	@to: iovec iterator to copy to
802  *	@len: amount of data to copy from buffer to iovec
803  *      @csump: checksum pointer
804  */
skb_copy_and_csum_datagram(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,__wsum * csump)805 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
806 				      struct iov_iter *to, int len,
807 				      __wsum *csump)
808 {
809 	struct csum_state csdata = { .csum = *csump };
810 	int ret;
811 
812 	ret = __skb_datagram_iter(skb, offset, to, len, true,
813 				  csum_and_copy_to_iter, &csdata);
814 	if (ret)
815 		return ret;
816 
817 	*csump = csdata.csum;
818 	return 0;
819 }
820 
821 /**
822  *	skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
823  *	@skb: skbuff
824  *	@hlen: hardware length
825  *	@msg: destination
826  *
827  *	Caller _must_ check that skb will fit to this iovec.
828  *
829  *	Returns: 0       - success.
830  *		 -EINVAL - checksum failure.
831  *		 -EFAULT - fault during copy.
832  */
skb_copy_and_csum_datagram_msg(struct sk_buff * skb,int hlen,struct msghdr * msg)833 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
834 				   int hlen, struct msghdr *msg)
835 {
836 	__wsum csum;
837 	int chunk = skb->len - hlen;
838 
839 	if (!chunk)
840 		return 0;
841 
842 	if (msg_data_left(msg) < chunk) {
843 		if (__skb_checksum_complete(skb))
844 			return -EINVAL;
845 		if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
846 			goto fault;
847 	} else {
848 		csum = csum_partial(skb->data, hlen, skb->csum);
849 		if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
850 					       chunk, &csum))
851 			goto fault;
852 
853 		if (csum_fold(csum)) {
854 			iov_iter_revert(&msg->msg_iter, chunk);
855 			return -EINVAL;
856 		}
857 
858 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
859 		    !skb->csum_complete_sw)
860 			netdev_rx_csum_fault(NULL, skb);
861 	}
862 	return 0;
863 fault:
864 	return -EFAULT;
865 }
866 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
867 
868 /**
869  * 	datagram_poll - generic datagram poll
870  *	@file: file struct
871  *	@sock: socket
872  *	@wait: poll table
873  *
874  *	Datagram poll: Again totally generic. This also handles
875  *	sequenced packet sockets providing the socket receive queue
876  *	is only ever holding data ready to receive.
877  *
878  *	Note: when you *don't* use this routine for this protocol,
879  *	and you use a different write policy from sock_writeable()
880  *	then please supply your own write_space callback.
881  */
datagram_poll(struct file * file,struct socket * sock,poll_table * wait)882 __poll_t datagram_poll(struct file *file, struct socket *sock,
883 			   poll_table *wait)
884 {
885 	struct sock *sk = sock->sk;
886 	__poll_t mask;
887 	u8 shutdown;
888 
889 	sock_poll_wait(file, sock, wait);
890 	mask = 0;
891 
892 	/* exceptional events? */
893 	if (READ_ONCE(sk->sk_err) ||
894 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
895 		mask |= EPOLLERR |
896 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
897 
898 	shutdown = READ_ONCE(sk->sk_shutdown);
899 	if (shutdown & RCV_SHUTDOWN)
900 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
901 	if (shutdown == SHUTDOWN_MASK)
902 		mask |= EPOLLHUP;
903 
904 	/* readable? */
905 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
906 		mask |= EPOLLIN | EPOLLRDNORM;
907 
908 	/* Connection-based need to check for termination and startup */
909 	if (connection_based(sk)) {
910 		int state = READ_ONCE(sk->sk_state);
911 
912 		if (state == TCP_CLOSE)
913 			mask |= EPOLLHUP;
914 		/* connection hasn't started yet? */
915 		if (state == TCP_SYN_SENT)
916 			return mask;
917 	}
918 
919 	/* writable? */
920 	if (sock_writeable(sk))
921 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
922 	else
923 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
924 
925 	return mask;
926 }
927 EXPORT_SYMBOL(datagram_poll);
928