xref: /linux/net/core/sock.c (revision 15ecd83dc06277385ad71dc7ea26911d9a79acaf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Generic socket support routines. Memory allocators, socket lock/release
8  *		handler for protocols to use and generic option handler.
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  */
85 
86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
87 
88 #include <linux/unaligned.h>
89 #include <linux/capability.h>
90 #include <linux/errno.h>
91 #include <linux/errqueue.h>
92 #include <linux/types.h>
93 #include <linux/socket.h>
94 #include <linux/in.h>
95 #include <linux/kernel.h>
96 #include <linux/module.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/sched.h>
100 #include <linux/sched/mm.h>
101 #include <linux/timer.h>
102 #include <linux/string.h>
103 #include <linux/sockios.h>
104 #include <linux/net.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/tcp.h>
110 #include <linux/udp.h>
111 #include <linux/init.h>
112 #include <linux/highmem.h>
113 #include <linux/user_namespace.h>
114 #include <linux/static_key.h>
115 #include <linux/memcontrol.h>
116 #include <linux/prefetch.h>
117 #include <linux/compat.h>
118 #include <linux/mroute.h>
119 #include <linux/mroute6.h>
120 #include <linux/icmpv6.h>
121 
122 #include <linux/uaccess.h>
123 
124 #include <linux/netdevice.h>
125 #include <net/protocol.h>
126 #include <linux/skbuff.h>
127 #include <linux/skbuff_ref.h>
128 #include <net/net_namespace.h>
129 #include <net/request_sock.h>
130 #include <net/sock.h>
131 #include <net/proto_memory.h>
132 #include <linux/net_tstamp.h>
133 #include <net/xfrm.h>
134 #include <linux/ipsec.h>
135 #include <net/cls_cgroup.h>
136 #include <net/netprio_cgroup.h>
137 #include <linux/sock_diag.h>
138 
139 #include <linux/filter.h>
140 #include <net/sock_reuseport.h>
141 #include <net/bpf_sk_storage.h>
142 
143 #include <trace/events/sock.h>
144 
145 #include <net/tcp.h>
146 #include <net/busy_poll.h>
147 #include <net/phonet/phonet.h>
148 
149 #include <linux/ethtool.h>
150 
151 #include <uapi/linux/pidfd.h>
152 
153 #include "dev.h"
154 
155 static DEFINE_MUTEX(proto_list_mutex);
156 static LIST_HEAD(proto_list);
157 
158 static void sock_def_write_space_wfree(struct sock *sk);
159 static void sock_def_write_space(struct sock *sk);
160 
161 /**
162  * sk_ns_capable - General socket capability test
163  * @sk: Socket to use a capability on or through
164  * @user_ns: The user namespace of the capability to use
165  * @cap: The capability to use
166  *
167  * Test to see if the opener of the socket had when the socket was
168  * created and the current process has the capability @cap in the user
169  * namespace @user_ns.
170  */
171 bool sk_ns_capable(const struct sock *sk,
172 		   struct user_namespace *user_ns, int cap)
173 {
174 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
175 		ns_capable(user_ns, cap);
176 }
177 EXPORT_SYMBOL(sk_ns_capable);
178 
179 /**
180  * sk_capable - Socket global capability test
181  * @sk: Socket to use a capability on or through
182  * @cap: The global capability to use
183  *
184  * Test to see if the opener of the socket had when the socket was
185  * created and the current process has the capability @cap in all user
186  * namespaces.
187  */
188 bool sk_capable(const struct sock *sk, int cap)
189 {
190 	return sk_ns_capable(sk, &init_user_ns, cap);
191 }
192 EXPORT_SYMBOL(sk_capable);
193 
194 /**
195  * sk_net_capable - Network namespace socket capability test
196  * @sk: Socket to use a capability on or through
197  * @cap: The capability to use
198  *
199  * Test to see if the opener of the socket had when the socket was created
200  * and the current process has the capability @cap over the network namespace
201  * the socket is a member of.
202  */
203 bool sk_net_capable(const struct sock *sk, int cap)
204 {
205 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
206 }
207 EXPORT_SYMBOL(sk_net_capable);
208 
209 /*
210  * Each address family might have different locking rules, so we have
211  * one slock key per address family and separate keys for internal and
212  * userspace sockets.
213  */
214 static struct lock_class_key af_family_keys[AF_MAX];
215 static struct lock_class_key af_family_kern_keys[AF_MAX];
216 static struct lock_class_key af_family_slock_keys[AF_MAX];
217 static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
218 
219 /*
220  * Make lock validator output more readable. (we pre-construct these
221  * strings build-time, so that runtime initialization of socket
222  * locks is fast):
223  */
224 
225 #define _sock_locks(x)						  \
226   x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
227   x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
228   x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
229   x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
230   x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
231   x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
232   x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
233   x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
234   x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
235   x "27"       ,	x "28"          ,	x "AF_CAN"      , \
236   x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
237   x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
238   x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
239   x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
240   x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
241   x "AF_MCTP"  , \
242   x "AF_MAX"
243 
244 static const char *const af_family_key_strings[AF_MAX+1] = {
245 	_sock_locks("sk_lock-")
246 };
247 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
248 	_sock_locks("slock-")
249 };
250 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
251 	_sock_locks("clock-")
252 };
253 
254 static const char *const af_family_kern_key_strings[AF_MAX+1] = {
255 	_sock_locks("k-sk_lock-")
256 };
257 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
258 	_sock_locks("k-slock-")
259 };
260 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
261 	_sock_locks("k-clock-")
262 };
263 static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
264 	_sock_locks("rlock-")
265 };
266 static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
267 	_sock_locks("wlock-")
268 };
269 static const char *const af_family_elock_key_strings[AF_MAX+1] = {
270 	_sock_locks("elock-")
271 };
272 
273 /*
274  * sk_callback_lock and sk queues locking rules are per-address-family,
275  * so split the lock classes by using a per-AF key:
276  */
277 static struct lock_class_key af_callback_keys[AF_MAX];
278 static struct lock_class_key af_rlock_keys[AF_MAX];
279 static struct lock_class_key af_wlock_keys[AF_MAX];
280 static struct lock_class_key af_elock_keys[AF_MAX];
281 static struct lock_class_key af_kern_callback_keys[AF_MAX];
282 
283 /* Run time adjustable parameters. */
284 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
285 EXPORT_SYMBOL(sysctl_wmem_max);
286 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
287 EXPORT_SYMBOL(sysctl_rmem_max);
288 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
289 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
290 
291 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
292 EXPORT_SYMBOL_GPL(memalloc_socks_key);
293 
294 /**
295  * sk_set_memalloc - sets %SOCK_MEMALLOC
296  * @sk: socket to set it on
297  *
298  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
299  * It's the responsibility of the admin to adjust min_free_kbytes
300  * to meet the requirements
301  */
302 void sk_set_memalloc(struct sock *sk)
303 {
304 	sock_set_flag(sk, SOCK_MEMALLOC);
305 	sk->sk_allocation |= __GFP_MEMALLOC;
306 	static_branch_inc(&memalloc_socks_key);
307 }
308 EXPORT_SYMBOL_GPL(sk_set_memalloc);
309 
310 void sk_clear_memalloc(struct sock *sk)
311 {
312 	sock_reset_flag(sk, SOCK_MEMALLOC);
313 	sk->sk_allocation &= ~__GFP_MEMALLOC;
314 	static_branch_dec(&memalloc_socks_key);
315 
316 	/*
317 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
318 	 * progress of swapping. SOCK_MEMALLOC may be cleared while
319 	 * it has rmem allocations due to the last swapfile being deactivated
320 	 * but there is a risk that the socket is unusable due to exceeding
321 	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
322 	 */
323 	sk_mem_reclaim(sk);
324 }
325 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
326 
327 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
328 {
329 	int ret;
330 	unsigned int noreclaim_flag;
331 
332 	/* these should have been dropped before queueing */
333 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
334 
335 	noreclaim_flag = memalloc_noreclaim_save();
336 	ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
337 				 tcp_v6_do_rcv,
338 				 tcp_v4_do_rcv,
339 				 sk, skb);
340 	memalloc_noreclaim_restore(noreclaim_flag);
341 
342 	return ret;
343 }
344 EXPORT_SYMBOL(__sk_backlog_rcv);
345 
346 void sk_error_report(struct sock *sk)
347 {
348 	sk->sk_error_report(sk);
349 
350 	switch (sk->sk_family) {
351 	case AF_INET:
352 		fallthrough;
353 	case AF_INET6:
354 		trace_inet_sk_error_report(sk);
355 		break;
356 	default:
357 		break;
358 	}
359 }
360 EXPORT_SYMBOL(sk_error_report);
361 
362 int sock_get_timeout(long timeo, void *optval, bool old_timeval)
363 {
364 	struct __kernel_sock_timeval tv;
365 
366 	if (timeo == MAX_SCHEDULE_TIMEOUT) {
367 		tv.tv_sec = 0;
368 		tv.tv_usec = 0;
369 	} else {
370 		tv.tv_sec = timeo / HZ;
371 		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
372 	}
373 
374 	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
375 		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
376 		*(struct old_timeval32 *)optval = tv32;
377 		return sizeof(tv32);
378 	}
379 
380 	if (old_timeval) {
381 		struct __kernel_old_timeval old_tv;
382 		old_tv.tv_sec = tv.tv_sec;
383 		old_tv.tv_usec = tv.tv_usec;
384 		*(struct __kernel_old_timeval *)optval = old_tv;
385 		return sizeof(old_tv);
386 	}
387 
388 	*(struct __kernel_sock_timeval *)optval = tv;
389 	return sizeof(tv);
390 }
391 EXPORT_SYMBOL(sock_get_timeout);
392 
393 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
394 			   sockptr_t optval, int optlen, bool old_timeval)
395 {
396 	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
397 		struct old_timeval32 tv32;
398 
399 		if (optlen < sizeof(tv32))
400 			return -EINVAL;
401 
402 		if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
403 			return -EFAULT;
404 		tv->tv_sec = tv32.tv_sec;
405 		tv->tv_usec = tv32.tv_usec;
406 	} else if (old_timeval) {
407 		struct __kernel_old_timeval old_tv;
408 
409 		if (optlen < sizeof(old_tv))
410 			return -EINVAL;
411 		if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
412 			return -EFAULT;
413 		tv->tv_sec = old_tv.tv_sec;
414 		tv->tv_usec = old_tv.tv_usec;
415 	} else {
416 		if (optlen < sizeof(*tv))
417 			return -EINVAL;
418 		if (copy_from_sockptr(tv, optval, sizeof(*tv)))
419 			return -EFAULT;
420 	}
421 
422 	return 0;
423 }
424 EXPORT_SYMBOL(sock_copy_user_timeval);
425 
426 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
427 			    bool old_timeval)
428 {
429 	struct __kernel_sock_timeval tv;
430 	int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
431 	long val;
432 
433 	if (err)
434 		return err;
435 
436 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
437 		return -EDOM;
438 
439 	if (tv.tv_sec < 0) {
440 		static int warned __read_mostly;
441 
442 		WRITE_ONCE(*timeo_p, 0);
443 		if (warned < 10 && net_ratelimit()) {
444 			warned++;
445 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
446 				__func__, current->comm, task_pid_nr(current));
447 		}
448 		return 0;
449 	}
450 	val = MAX_SCHEDULE_TIMEOUT;
451 	if ((tv.tv_sec || tv.tv_usec) &&
452 	    (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)))
453 		val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec,
454 						    USEC_PER_SEC / HZ);
455 	WRITE_ONCE(*timeo_p, val);
456 	return 0;
457 }
458 
459 static bool sk_set_prio_allowed(const struct sock *sk, int val)
460 {
461 	return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) ||
462 		sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
463 		sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN));
464 }
465 
466 static bool sock_needs_netstamp(const struct sock *sk)
467 {
468 	switch (sk->sk_family) {
469 	case AF_UNSPEC:
470 	case AF_UNIX:
471 		return false;
472 	default:
473 		return true;
474 	}
475 }
476 
477 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
478 {
479 	if (sk->sk_flags & flags) {
480 		sk->sk_flags &= ~flags;
481 		if (sock_needs_netstamp(sk) &&
482 		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
483 			net_disable_timestamp();
484 	}
485 }
486 
487 
488 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
489 {
490 	unsigned long flags;
491 	struct sk_buff_head *list = &sk->sk_receive_queue;
492 
493 	if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
494 		atomic_inc(&sk->sk_drops);
495 		trace_sock_rcvqueue_full(sk, skb);
496 		return -ENOMEM;
497 	}
498 
499 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
500 		atomic_inc(&sk->sk_drops);
501 		return -ENOBUFS;
502 	}
503 
504 	skb->dev = NULL;
505 	skb_set_owner_r(skb, sk);
506 
507 	/* we escape from rcu protected region, make sure we dont leak
508 	 * a norefcounted dst
509 	 */
510 	skb_dst_force(skb);
511 
512 	spin_lock_irqsave(&list->lock, flags);
513 	sock_skb_set_dropcount(sk, skb);
514 	__skb_queue_tail(list, skb);
515 	spin_unlock_irqrestore(&list->lock, flags);
516 
517 	if (!sock_flag(sk, SOCK_DEAD))
518 		sk->sk_data_ready(sk);
519 	return 0;
520 }
521 EXPORT_SYMBOL(__sock_queue_rcv_skb);
522 
523 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
524 			      enum skb_drop_reason *reason)
525 {
526 	enum skb_drop_reason drop_reason;
527 	int err;
528 
529 	err = sk_filter(sk, skb);
530 	if (err) {
531 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
532 		goto out;
533 	}
534 	err = __sock_queue_rcv_skb(sk, skb);
535 	switch (err) {
536 	case -ENOMEM:
537 		drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
538 		break;
539 	case -ENOBUFS:
540 		drop_reason = SKB_DROP_REASON_PROTO_MEM;
541 		break;
542 	default:
543 		drop_reason = SKB_NOT_DROPPED_YET;
544 		break;
545 	}
546 out:
547 	if (reason)
548 		*reason = drop_reason;
549 	return err;
550 }
551 EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
552 
553 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
554 		     const int nested, unsigned int trim_cap, bool refcounted)
555 {
556 	int rc = NET_RX_SUCCESS;
557 
558 	if (sk_filter_trim_cap(sk, skb, trim_cap))
559 		goto discard_and_relse;
560 
561 	skb->dev = NULL;
562 
563 	if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
564 		atomic_inc(&sk->sk_drops);
565 		goto discard_and_relse;
566 	}
567 	if (nested)
568 		bh_lock_sock_nested(sk);
569 	else
570 		bh_lock_sock(sk);
571 	if (!sock_owned_by_user(sk)) {
572 		/*
573 		 * trylock + unlock semantics:
574 		 */
575 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
576 
577 		rc = sk_backlog_rcv(sk, skb);
578 
579 		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
580 	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
581 		bh_unlock_sock(sk);
582 		atomic_inc(&sk->sk_drops);
583 		goto discard_and_relse;
584 	}
585 
586 	bh_unlock_sock(sk);
587 out:
588 	if (refcounted)
589 		sock_put(sk);
590 	return rc;
591 discard_and_relse:
592 	kfree_skb(skb);
593 	goto out;
594 }
595 EXPORT_SYMBOL(__sk_receive_skb);
596 
597 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
598 							  u32));
599 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
600 							   u32));
601 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
602 {
603 	struct dst_entry *dst = __sk_dst_get(sk);
604 
605 	if (dst && dst->obsolete &&
606 	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
607 			       dst, cookie) == NULL) {
608 		sk_tx_queue_clear(sk);
609 		WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
610 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
611 		dst_release(dst);
612 		return NULL;
613 	}
614 
615 	return dst;
616 }
617 EXPORT_SYMBOL(__sk_dst_check);
618 
619 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
620 {
621 	struct dst_entry *dst = sk_dst_get(sk);
622 
623 	if (dst && dst->obsolete &&
624 	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
625 			       dst, cookie) == NULL) {
626 		sk_dst_reset(sk);
627 		dst_release(dst);
628 		return NULL;
629 	}
630 
631 	return dst;
632 }
633 EXPORT_SYMBOL(sk_dst_check);
634 
635 static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
636 {
637 	int ret = -ENOPROTOOPT;
638 #ifdef CONFIG_NETDEVICES
639 	struct net *net = sock_net(sk);
640 
641 	/* Sorry... */
642 	ret = -EPERM;
643 	if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
644 		goto out;
645 
646 	ret = -EINVAL;
647 	if (ifindex < 0)
648 		goto out;
649 
650 	/* Paired with all READ_ONCE() done locklessly. */
651 	WRITE_ONCE(sk->sk_bound_dev_if, ifindex);
652 
653 	if (sk->sk_prot->rehash)
654 		sk->sk_prot->rehash(sk);
655 	sk_dst_reset(sk);
656 
657 	ret = 0;
658 
659 out:
660 #endif
661 
662 	return ret;
663 }
664 
665 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
666 {
667 	int ret;
668 
669 	if (lock_sk)
670 		lock_sock(sk);
671 	ret = sock_bindtoindex_locked(sk, ifindex);
672 	if (lock_sk)
673 		release_sock(sk);
674 
675 	return ret;
676 }
677 EXPORT_SYMBOL(sock_bindtoindex);
678 
679 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
680 {
681 	int ret = -ENOPROTOOPT;
682 #ifdef CONFIG_NETDEVICES
683 	struct net *net = sock_net(sk);
684 	char devname[IFNAMSIZ];
685 	int index;
686 
687 	ret = -EINVAL;
688 	if (optlen < 0)
689 		goto out;
690 
691 	/* Bind this socket to a particular device like "eth0",
692 	 * as specified in the passed interface name. If the
693 	 * name is "" or the option length is zero the socket
694 	 * is not bound.
695 	 */
696 	if (optlen > IFNAMSIZ - 1)
697 		optlen = IFNAMSIZ - 1;
698 	memset(devname, 0, sizeof(devname));
699 
700 	ret = -EFAULT;
701 	if (copy_from_sockptr(devname, optval, optlen))
702 		goto out;
703 
704 	index = 0;
705 	if (devname[0] != '\0') {
706 		struct net_device *dev;
707 
708 		rcu_read_lock();
709 		dev = dev_get_by_name_rcu(net, devname);
710 		if (dev)
711 			index = dev->ifindex;
712 		rcu_read_unlock();
713 		ret = -ENODEV;
714 		if (!dev)
715 			goto out;
716 	}
717 
718 	sockopt_lock_sock(sk);
719 	ret = sock_bindtoindex_locked(sk, index);
720 	sockopt_release_sock(sk);
721 out:
722 #endif
723 
724 	return ret;
725 }
726 
727 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval,
728 				sockptr_t optlen, int len)
729 {
730 	int ret = -ENOPROTOOPT;
731 #ifdef CONFIG_NETDEVICES
732 	int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
733 	struct net *net = sock_net(sk);
734 	char devname[IFNAMSIZ];
735 
736 	if (bound_dev_if == 0) {
737 		len = 0;
738 		goto zero;
739 	}
740 
741 	ret = -EINVAL;
742 	if (len < IFNAMSIZ)
743 		goto out;
744 
745 	ret = netdev_get_name(net, devname, bound_dev_if);
746 	if (ret)
747 		goto out;
748 
749 	len = strlen(devname) + 1;
750 
751 	ret = -EFAULT;
752 	if (copy_to_sockptr(optval, devname, len))
753 		goto out;
754 
755 zero:
756 	ret = -EFAULT;
757 	if (copy_to_sockptr(optlen, &len, sizeof(int)))
758 		goto out;
759 
760 	ret = 0;
761 
762 out:
763 #endif
764 
765 	return ret;
766 }
767 
768 bool sk_mc_loop(const struct sock *sk)
769 {
770 	if (dev_recursion_level())
771 		return false;
772 	if (!sk)
773 		return true;
774 	/* IPV6_ADDRFORM can change sk->sk_family under us. */
775 	switch (READ_ONCE(sk->sk_family)) {
776 	case AF_INET:
777 		return inet_test_bit(MC_LOOP, sk);
778 #if IS_ENABLED(CONFIG_IPV6)
779 	case AF_INET6:
780 		return inet6_test_bit(MC6_LOOP, sk);
781 #endif
782 	}
783 	WARN_ON_ONCE(1);
784 	return true;
785 }
786 EXPORT_SYMBOL(sk_mc_loop);
787 
788 void sock_set_reuseaddr(struct sock *sk)
789 {
790 	lock_sock(sk);
791 	sk->sk_reuse = SK_CAN_REUSE;
792 	release_sock(sk);
793 }
794 EXPORT_SYMBOL(sock_set_reuseaddr);
795 
796 void sock_set_reuseport(struct sock *sk)
797 {
798 	lock_sock(sk);
799 	sk->sk_reuseport = true;
800 	release_sock(sk);
801 }
802 EXPORT_SYMBOL(sock_set_reuseport);
803 
804 void sock_no_linger(struct sock *sk)
805 {
806 	lock_sock(sk);
807 	WRITE_ONCE(sk->sk_lingertime, 0);
808 	sock_set_flag(sk, SOCK_LINGER);
809 	release_sock(sk);
810 }
811 EXPORT_SYMBOL(sock_no_linger);
812 
813 void sock_set_priority(struct sock *sk, u32 priority)
814 {
815 	WRITE_ONCE(sk->sk_priority, priority);
816 }
817 EXPORT_SYMBOL(sock_set_priority);
818 
819 void sock_set_sndtimeo(struct sock *sk, s64 secs)
820 {
821 	lock_sock(sk);
822 	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
823 		WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
824 	else
825 		WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
826 	release_sock(sk);
827 }
828 EXPORT_SYMBOL(sock_set_sndtimeo);
829 
830 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
831 {
832 	sock_valbool_flag(sk, SOCK_RCVTSTAMP, val);
833 	sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns);
834 	if (val)  {
835 		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
836 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
837 	}
838 }
839 
840 void sock_enable_timestamps(struct sock *sk)
841 {
842 	lock_sock(sk);
843 	__sock_set_timestamps(sk, true, false, true);
844 	release_sock(sk);
845 }
846 EXPORT_SYMBOL(sock_enable_timestamps);
847 
848 void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
849 {
850 	switch (optname) {
851 	case SO_TIMESTAMP_OLD:
852 		__sock_set_timestamps(sk, valbool, false, false);
853 		break;
854 	case SO_TIMESTAMP_NEW:
855 		__sock_set_timestamps(sk, valbool, true, false);
856 		break;
857 	case SO_TIMESTAMPNS_OLD:
858 		__sock_set_timestamps(sk, valbool, false, true);
859 		break;
860 	case SO_TIMESTAMPNS_NEW:
861 		__sock_set_timestamps(sk, valbool, true, true);
862 		break;
863 	}
864 }
865 
866 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
867 {
868 	struct net *net = sock_net(sk);
869 	struct net_device *dev = NULL;
870 	bool match = false;
871 	int *vclock_index;
872 	int i, num;
873 
874 	if (sk->sk_bound_dev_if)
875 		dev = dev_get_by_index(net, sk->sk_bound_dev_if);
876 
877 	if (!dev) {
878 		pr_err("%s: sock not bind to device\n", __func__);
879 		return -EOPNOTSUPP;
880 	}
881 
882 	num = ethtool_get_phc_vclocks(dev, &vclock_index);
883 	dev_put(dev);
884 
885 	for (i = 0; i < num; i++) {
886 		if (*(vclock_index + i) == phc_index) {
887 			match = true;
888 			break;
889 		}
890 	}
891 
892 	if (num > 0)
893 		kfree(vclock_index);
894 
895 	if (!match)
896 		return -EINVAL;
897 
898 	WRITE_ONCE(sk->sk_bind_phc, phc_index);
899 
900 	return 0;
901 }
902 
903 int sock_set_timestamping(struct sock *sk, int optname,
904 			  struct so_timestamping timestamping)
905 {
906 	int val = timestamping.flags;
907 	int ret;
908 
909 	if (val & ~SOF_TIMESTAMPING_MASK)
910 		return -EINVAL;
911 
912 	if (val & SOF_TIMESTAMPING_OPT_ID_TCP &&
913 	    !(val & SOF_TIMESTAMPING_OPT_ID))
914 		return -EINVAL;
915 
916 	if (val & SOF_TIMESTAMPING_OPT_ID &&
917 	    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
918 		if (sk_is_tcp(sk)) {
919 			if ((1 << sk->sk_state) &
920 			    (TCPF_CLOSE | TCPF_LISTEN))
921 				return -EINVAL;
922 			if (val & SOF_TIMESTAMPING_OPT_ID_TCP)
923 				atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq);
924 			else
925 				atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
926 		} else {
927 			atomic_set(&sk->sk_tskey, 0);
928 		}
929 	}
930 
931 	if (val & SOF_TIMESTAMPING_OPT_STATS &&
932 	    !(val & SOF_TIMESTAMPING_OPT_TSONLY))
933 		return -EINVAL;
934 
935 	if (val & SOF_TIMESTAMPING_BIND_PHC) {
936 		ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
937 		if (ret)
938 			return ret;
939 	}
940 
941 	WRITE_ONCE(sk->sk_tsflags, val);
942 	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
943 	sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY));
944 
945 	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
946 		sock_enable_timestamp(sk,
947 				      SOCK_TIMESTAMPING_RX_SOFTWARE);
948 	else
949 		sock_disable_timestamp(sk,
950 				       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
951 	return 0;
952 }
953 
954 #if defined(CONFIG_CGROUP_BPF)
955 void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op)
956 {
957 	struct bpf_sock_ops_kern sock_ops;
958 
959 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
960 	sock_ops.op = op;
961 	sock_ops.is_fullsock = 1;
962 	sock_ops.sk = sk;
963 	bpf_skops_init_skb(&sock_ops, skb, 0);
964 	__cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS);
965 }
966 #endif
967 
968 void sock_set_keepalive(struct sock *sk)
969 {
970 	lock_sock(sk);
971 	if (sk->sk_prot->keepalive)
972 		sk->sk_prot->keepalive(sk, true);
973 	sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
974 	release_sock(sk);
975 }
976 EXPORT_SYMBOL(sock_set_keepalive);
977 
978 static void __sock_set_rcvbuf(struct sock *sk, int val)
979 {
980 	/* Ensure val * 2 fits into an int, to prevent max_t() from treating it
981 	 * as a negative value.
982 	 */
983 	val = min_t(int, val, INT_MAX / 2);
984 	sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
985 
986 	/* We double it on the way in to account for "struct sk_buff" etc.
987 	 * overhead.   Applications assume that the SO_RCVBUF setting they make
988 	 * will allow that much actual data to be received on that socket.
989 	 *
990 	 * Applications are unaware that "struct sk_buff" and other overheads
991 	 * allocate from the receive buffer during socket buffer allocation.
992 	 *
993 	 * And after considering the possible alternatives, returning the value
994 	 * we actually used in getsockopt is the most desirable behavior.
995 	 */
996 	WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
997 }
998 
999 void sock_set_rcvbuf(struct sock *sk, int val)
1000 {
1001 	lock_sock(sk);
1002 	__sock_set_rcvbuf(sk, val);
1003 	release_sock(sk);
1004 }
1005 EXPORT_SYMBOL(sock_set_rcvbuf);
1006 
1007 static void __sock_set_mark(struct sock *sk, u32 val)
1008 {
1009 	if (val != sk->sk_mark) {
1010 		WRITE_ONCE(sk->sk_mark, val);
1011 		sk_dst_reset(sk);
1012 	}
1013 }
1014 
1015 void sock_set_mark(struct sock *sk, u32 val)
1016 {
1017 	lock_sock(sk);
1018 	__sock_set_mark(sk, val);
1019 	release_sock(sk);
1020 }
1021 EXPORT_SYMBOL(sock_set_mark);
1022 
1023 static void sock_release_reserved_memory(struct sock *sk, int bytes)
1024 {
1025 	/* Round down bytes to multiple of pages */
1026 	bytes = round_down(bytes, PAGE_SIZE);
1027 
1028 	WARN_ON(bytes > sk->sk_reserved_mem);
1029 	WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes);
1030 	sk_mem_reclaim(sk);
1031 }
1032 
1033 static int sock_reserve_memory(struct sock *sk, int bytes)
1034 {
1035 	long allocated;
1036 	bool charged;
1037 	int pages;
1038 
1039 	if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
1040 		return -EOPNOTSUPP;
1041 
1042 	if (!bytes)
1043 		return 0;
1044 
1045 	pages = sk_mem_pages(bytes);
1046 
1047 	/* pre-charge to memcg */
1048 	charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
1049 					  GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1050 	if (!charged)
1051 		return -ENOMEM;
1052 
1053 	/* pre-charge to forward_alloc */
1054 	sk_memory_allocated_add(sk, pages);
1055 	allocated = sk_memory_allocated(sk);
1056 	/* If the system goes into memory pressure with this
1057 	 * precharge, give up and return error.
1058 	 */
1059 	if (allocated > sk_prot_mem_limits(sk, 1)) {
1060 		sk_memory_allocated_sub(sk, pages);
1061 		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
1062 		return -ENOMEM;
1063 	}
1064 	sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
1065 
1066 	WRITE_ONCE(sk->sk_reserved_mem,
1067 		   sk->sk_reserved_mem + (pages << PAGE_SHIFT));
1068 
1069 	return 0;
1070 }
1071 
1072 #ifdef CONFIG_PAGE_POOL
1073 
1074 /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED
1075  * in 1 syscall. The limit exists to limit the amount of memory the kernel
1076  * allocates to copy these tokens, and to prevent looping over the frags for
1077  * too long.
1078  */
1079 #define MAX_DONTNEED_TOKENS 128
1080 #define MAX_DONTNEED_FRAGS 1024
1081 
1082 static noinline_for_stack int
1083 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
1084 {
1085 	unsigned int num_tokens, i, j, k, netmem_num = 0;
1086 	struct dmabuf_token *tokens;
1087 	int ret = 0, num_frags = 0;
1088 	netmem_ref netmems[16];
1089 
1090 	if (!sk_is_tcp(sk))
1091 		return -EBADF;
1092 
1093 	if (optlen % sizeof(*tokens) ||
1094 	    optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
1095 		return -EINVAL;
1096 
1097 	num_tokens = optlen / sizeof(*tokens);
1098 	tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
1099 	if (!tokens)
1100 		return -ENOMEM;
1101 
1102 	if (copy_from_sockptr(tokens, optval, optlen)) {
1103 		kvfree(tokens);
1104 		return -EFAULT;
1105 	}
1106 
1107 	xa_lock_bh(&sk->sk_user_frags);
1108 	for (i = 0; i < num_tokens; i++) {
1109 		for (j = 0; j < tokens[i].token_count; j++) {
1110 			if (++num_frags > MAX_DONTNEED_FRAGS)
1111 				goto frag_limit_reached;
1112 
1113 			netmem_ref netmem = (__force netmem_ref)__xa_erase(
1114 				&sk->sk_user_frags, tokens[i].token_start + j);
1115 
1116 			if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
1117 				continue;
1118 
1119 			netmems[netmem_num++] = netmem;
1120 			if (netmem_num == ARRAY_SIZE(netmems)) {
1121 				xa_unlock_bh(&sk->sk_user_frags);
1122 				for (k = 0; k < netmem_num; k++)
1123 					WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
1124 				netmem_num = 0;
1125 				xa_lock_bh(&sk->sk_user_frags);
1126 			}
1127 			ret++;
1128 		}
1129 	}
1130 
1131 frag_limit_reached:
1132 	xa_unlock_bh(&sk->sk_user_frags);
1133 	for (k = 0; k < netmem_num; k++)
1134 		WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
1135 
1136 	kvfree(tokens);
1137 	return ret;
1138 }
1139 #endif
1140 
1141 void sockopt_lock_sock(struct sock *sk)
1142 {
1143 	/* When current->bpf_ctx is set, the setsockopt is called from
1144 	 * a bpf prog.  bpf has ensured the sk lock has been
1145 	 * acquired before calling setsockopt().
1146 	 */
1147 	if (has_current_bpf_ctx())
1148 		return;
1149 
1150 	lock_sock(sk);
1151 }
1152 EXPORT_SYMBOL(sockopt_lock_sock);
1153 
1154 void sockopt_release_sock(struct sock *sk)
1155 {
1156 	if (has_current_bpf_ctx())
1157 		return;
1158 
1159 	release_sock(sk);
1160 }
1161 EXPORT_SYMBOL(sockopt_release_sock);
1162 
1163 bool sockopt_ns_capable(struct user_namespace *ns, int cap)
1164 {
1165 	return has_current_bpf_ctx() || ns_capable(ns, cap);
1166 }
1167 EXPORT_SYMBOL(sockopt_ns_capable);
1168 
1169 bool sockopt_capable(int cap)
1170 {
1171 	return has_current_bpf_ctx() || capable(cap);
1172 }
1173 EXPORT_SYMBOL(sockopt_capable);
1174 
1175 static int sockopt_validate_clockid(__kernel_clockid_t value)
1176 {
1177 	switch (value) {
1178 	case CLOCK_REALTIME:
1179 	case CLOCK_MONOTONIC:
1180 	case CLOCK_TAI:
1181 		return 0;
1182 	}
1183 	return -EINVAL;
1184 }
1185 
1186 /*
1187  *	This is meant for all protocols to use and covers goings on
1188  *	at the socket level. Everything here is generic.
1189  */
1190 
1191 int sk_setsockopt(struct sock *sk, int level, int optname,
1192 		  sockptr_t optval, unsigned int optlen)
1193 {
1194 	struct so_timestamping timestamping;
1195 	struct socket *sock = sk->sk_socket;
1196 	struct sock_txtime sk_txtime;
1197 	int val;
1198 	int valbool;
1199 	struct linger ling;
1200 	int ret = 0;
1201 
1202 	/*
1203 	 *	Options without arguments
1204 	 */
1205 
1206 	if (optname == SO_BINDTODEVICE)
1207 		return sock_setbindtodevice(sk, optval, optlen);
1208 
1209 	if (optlen < sizeof(int))
1210 		return -EINVAL;
1211 
1212 	if (copy_from_sockptr(&val, optval, sizeof(val)))
1213 		return -EFAULT;
1214 
1215 	valbool = val ? 1 : 0;
1216 
1217 	/* handle options which do not require locking the socket. */
1218 	switch (optname) {
1219 	case SO_PRIORITY:
1220 		if (sk_set_prio_allowed(sk, val)) {
1221 			sock_set_priority(sk, val);
1222 			return 0;
1223 		}
1224 		return -EPERM;
1225 	case SO_PASSSEC:
1226 		assign_bit(SOCK_PASSSEC, &sock->flags, valbool);
1227 		return 0;
1228 	case SO_PASSCRED:
1229 		assign_bit(SOCK_PASSCRED, &sock->flags, valbool);
1230 		return 0;
1231 	case SO_PASSPIDFD:
1232 		assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool);
1233 		return 0;
1234 	case SO_TYPE:
1235 	case SO_PROTOCOL:
1236 	case SO_DOMAIN:
1237 	case SO_ERROR:
1238 		return -ENOPROTOOPT;
1239 #ifdef CONFIG_NET_RX_BUSY_POLL
1240 	case SO_BUSY_POLL:
1241 		if (val < 0)
1242 			return -EINVAL;
1243 		WRITE_ONCE(sk->sk_ll_usec, val);
1244 		return 0;
1245 	case SO_PREFER_BUSY_POLL:
1246 		if (valbool && !sockopt_capable(CAP_NET_ADMIN))
1247 			return -EPERM;
1248 		WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1249 		return 0;
1250 	case SO_BUSY_POLL_BUDGET:
1251 		if (val > READ_ONCE(sk->sk_busy_poll_budget) &&
1252 		    !sockopt_capable(CAP_NET_ADMIN))
1253 			return -EPERM;
1254 		if (val < 0 || val > U16_MAX)
1255 			return -EINVAL;
1256 		WRITE_ONCE(sk->sk_busy_poll_budget, val);
1257 		return 0;
1258 #endif
1259 	case SO_MAX_PACING_RATE:
1260 		{
1261 		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1262 		unsigned long pacing_rate;
1263 
1264 		if (sizeof(ulval) != sizeof(val) &&
1265 		    optlen >= sizeof(ulval) &&
1266 		    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1267 			return -EFAULT;
1268 		}
1269 		if (ulval != ~0UL)
1270 			cmpxchg(&sk->sk_pacing_status,
1271 				SK_PACING_NONE,
1272 				SK_PACING_NEEDED);
1273 		/* Pairs with READ_ONCE() from sk_getsockopt() */
1274 		WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
1275 		pacing_rate = READ_ONCE(sk->sk_pacing_rate);
1276 		if (ulval < pacing_rate)
1277 			WRITE_ONCE(sk->sk_pacing_rate, ulval);
1278 		return 0;
1279 		}
1280 	case SO_TXREHASH:
1281 		if (val < -1 || val > 1)
1282 			return -EINVAL;
1283 		if ((u8)val == SOCK_TXREHASH_DEFAULT)
1284 			val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
1285 		/* Paired with READ_ONCE() in tcp_rtx_synack()
1286 		 * and sk_getsockopt().
1287 		 */
1288 		WRITE_ONCE(sk->sk_txrehash, (u8)val);
1289 		return 0;
1290 	case SO_PEEK_OFF:
1291 		{
1292 		int (*set_peek_off)(struct sock *sk, int val);
1293 
1294 		set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
1295 		if (set_peek_off)
1296 			ret = set_peek_off(sk, val);
1297 		else
1298 			ret = -EOPNOTSUPP;
1299 		return ret;
1300 		}
1301 #ifdef CONFIG_PAGE_POOL
1302 	case SO_DEVMEM_DONTNEED:
1303 		return sock_devmem_dontneed(sk, optval, optlen);
1304 #endif
1305 	}
1306 
1307 	sockopt_lock_sock(sk);
1308 
1309 	switch (optname) {
1310 	case SO_DEBUG:
1311 		if (val && !sockopt_capable(CAP_NET_ADMIN))
1312 			ret = -EACCES;
1313 		else
1314 			sock_valbool_flag(sk, SOCK_DBG, valbool);
1315 		break;
1316 	case SO_REUSEADDR:
1317 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
1318 		break;
1319 	case SO_REUSEPORT:
1320 		if (valbool && !sk_is_inet(sk))
1321 			ret = -EOPNOTSUPP;
1322 		else
1323 			sk->sk_reuseport = valbool;
1324 		break;
1325 	case SO_DONTROUTE:
1326 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1327 		sk_dst_reset(sk);
1328 		break;
1329 	case SO_BROADCAST:
1330 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1331 		break;
1332 	case SO_SNDBUF:
1333 		/* Don't error on this BSD doesn't and if you think
1334 		 * about it this is right. Otherwise apps have to
1335 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1336 		 * are treated in BSD as hints
1337 		 */
1338 		val = min_t(u32, val, READ_ONCE(sysctl_wmem_max));
1339 set_sndbuf:
1340 		/* Ensure val * 2 fits into an int, to prevent max_t()
1341 		 * from treating it as a negative value.
1342 		 */
1343 		val = min_t(int, val, INT_MAX / 2);
1344 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1345 		WRITE_ONCE(sk->sk_sndbuf,
1346 			   max_t(int, val * 2, SOCK_MIN_SNDBUF));
1347 		/* Wake up sending tasks if we upped the value. */
1348 		sk->sk_write_space(sk);
1349 		break;
1350 
1351 	case SO_SNDBUFFORCE:
1352 		if (!sockopt_capable(CAP_NET_ADMIN)) {
1353 			ret = -EPERM;
1354 			break;
1355 		}
1356 
1357 		/* No negative values (to prevent underflow, as val will be
1358 		 * multiplied by 2).
1359 		 */
1360 		if (val < 0)
1361 			val = 0;
1362 		goto set_sndbuf;
1363 
1364 	case SO_RCVBUF:
1365 		/* Don't error on this BSD doesn't and if you think
1366 		 * about it this is right. Otherwise apps have to
1367 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1368 		 * are treated in BSD as hints
1369 		 */
1370 		__sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max)));
1371 		break;
1372 
1373 	case SO_RCVBUFFORCE:
1374 		if (!sockopt_capable(CAP_NET_ADMIN)) {
1375 			ret = -EPERM;
1376 			break;
1377 		}
1378 
1379 		/* No negative values (to prevent underflow, as val will be
1380 		 * multiplied by 2).
1381 		 */
1382 		__sock_set_rcvbuf(sk, max(val, 0));
1383 		break;
1384 
1385 	case SO_KEEPALIVE:
1386 		if (sk->sk_prot->keepalive)
1387 			sk->sk_prot->keepalive(sk, valbool);
1388 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1389 		break;
1390 
1391 	case SO_OOBINLINE:
1392 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1393 		break;
1394 
1395 	case SO_NO_CHECK:
1396 		sk->sk_no_check_tx = valbool;
1397 		break;
1398 
1399 	case SO_LINGER:
1400 		if (optlen < sizeof(ling)) {
1401 			ret = -EINVAL;	/* 1003.1g */
1402 			break;
1403 		}
1404 		if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
1405 			ret = -EFAULT;
1406 			break;
1407 		}
1408 		if (!ling.l_onoff) {
1409 			sock_reset_flag(sk, SOCK_LINGER);
1410 		} else {
1411 			unsigned long t_sec = ling.l_linger;
1412 
1413 			if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ)
1414 				WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT);
1415 			else
1416 				WRITE_ONCE(sk->sk_lingertime, t_sec * HZ);
1417 			sock_set_flag(sk, SOCK_LINGER);
1418 		}
1419 		break;
1420 
1421 	case SO_BSDCOMPAT:
1422 		break;
1423 
1424 	case SO_TIMESTAMP_OLD:
1425 	case SO_TIMESTAMP_NEW:
1426 	case SO_TIMESTAMPNS_OLD:
1427 	case SO_TIMESTAMPNS_NEW:
1428 		sock_set_timestamp(sk, optname, valbool);
1429 		break;
1430 
1431 	case SO_TIMESTAMPING_NEW:
1432 	case SO_TIMESTAMPING_OLD:
1433 		if (optlen == sizeof(timestamping)) {
1434 			if (copy_from_sockptr(&timestamping, optval,
1435 					      sizeof(timestamping))) {
1436 				ret = -EFAULT;
1437 				break;
1438 			}
1439 		} else {
1440 			memset(&timestamping, 0, sizeof(timestamping));
1441 			timestamping.flags = val;
1442 		}
1443 		ret = sock_set_timestamping(sk, optname, timestamping);
1444 		break;
1445 
1446 	case SO_RCVLOWAT:
1447 		{
1448 		int (*set_rcvlowat)(struct sock *sk, int val) = NULL;
1449 
1450 		if (val < 0)
1451 			val = INT_MAX;
1452 		if (sock)
1453 			set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat;
1454 		if (set_rcvlowat)
1455 			ret = set_rcvlowat(sk, val);
1456 		else
1457 			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1458 		break;
1459 		}
1460 	case SO_RCVTIMEO_OLD:
1461 	case SO_RCVTIMEO_NEW:
1462 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1463 				       optlen, optname == SO_RCVTIMEO_OLD);
1464 		break;
1465 
1466 	case SO_SNDTIMEO_OLD:
1467 	case SO_SNDTIMEO_NEW:
1468 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1469 				       optlen, optname == SO_SNDTIMEO_OLD);
1470 		break;
1471 
1472 	case SO_ATTACH_FILTER: {
1473 		struct sock_fprog fprog;
1474 
1475 		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1476 		if (!ret)
1477 			ret = sk_attach_filter(&fprog, sk);
1478 		break;
1479 	}
1480 	case SO_ATTACH_BPF:
1481 		ret = -EINVAL;
1482 		if (optlen == sizeof(u32)) {
1483 			u32 ufd;
1484 
1485 			ret = -EFAULT;
1486 			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1487 				break;
1488 
1489 			ret = sk_attach_bpf(ufd, sk);
1490 		}
1491 		break;
1492 
1493 	case SO_ATTACH_REUSEPORT_CBPF: {
1494 		struct sock_fprog fprog;
1495 
1496 		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1497 		if (!ret)
1498 			ret = sk_reuseport_attach_filter(&fprog, sk);
1499 		break;
1500 	}
1501 	case SO_ATTACH_REUSEPORT_EBPF:
1502 		ret = -EINVAL;
1503 		if (optlen == sizeof(u32)) {
1504 			u32 ufd;
1505 
1506 			ret = -EFAULT;
1507 			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1508 				break;
1509 
1510 			ret = sk_reuseport_attach_bpf(ufd, sk);
1511 		}
1512 		break;
1513 
1514 	case SO_DETACH_REUSEPORT_BPF:
1515 		ret = reuseport_detach_prog(sk);
1516 		break;
1517 
1518 	case SO_DETACH_FILTER:
1519 		ret = sk_detach_filter(sk);
1520 		break;
1521 
1522 	case SO_LOCK_FILTER:
1523 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1524 			ret = -EPERM;
1525 		else
1526 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1527 		break;
1528 
1529 	case SO_MARK:
1530 		if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1531 		    !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1532 			ret = -EPERM;
1533 			break;
1534 		}
1535 
1536 		__sock_set_mark(sk, val);
1537 		break;
1538 	case SO_RCVMARK:
1539 		sock_valbool_flag(sk, SOCK_RCVMARK, valbool);
1540 		break;
1541 
1542 	case SO_RCVPRIORITY:
1543 		sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool);
1544 		break;
1545 
1546 	case SO_RXQ_OVFL:
1547 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1548 		break;
1549 
1550 	case SO_WIFI_STATUS:
1551 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1552 		break;
1553 
1554 	case SO_NOFCS:
1555 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1556 		break;
1557 
1558 	case SO_SELECT_ERR_QUEUE:
1559 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1560 		break;
1561 
1562 
1563 	case SO_INCOMING_CPU:
1564 		reuseport_update_incoming_cpu(sk, val);
1565 		break;
1566 
1567 	case SO_CNX_ADVICE:
1568 		if (val == 1)
1569 			dst_negative_advice(sk);
1570 		break;
1571 
1572 	case SO_ZEROCOPY:
1573 		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1574 			if (!(sk_is_tcp(sk) ||
1575 			      (sk->sk_type == SOCK_DGRAM &&
1576 			       sk->sk_protocol == IPPROTO_UDP)))
1577 				ret = -EOPNOTSUPP;
1578 		} else if (sk->sk_family != PF_RDS) {
1579 			ret = -EOPNOTSUPP;
1580 		}
1581 		if (!ret) {
1582 			if (val < 0 || val > 1)
1583 				ret = -EINVAL;
1584 			else
1585 				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1586 		}
1587 		break;
1588 
1589 	case SO_TXTIME:
1590 		if (optlen != sizeof(struct sock_txtime)) {
1591 			ret = -EINVAL;
1592 			break;
1593 		} else if (copy_from_sockptr(&sk_txtime, optval,
1594 			   sizeof(struct sock_txtime))) {
1595 			ret = -EFAULT;
1596 			break;
1597 		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1598 			ret = -EINVAL;
1599 			break;
1600 		}
1601 		/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1602 		 * scheduler has enough safe guards.
1603 		 */
1604 		if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1605 		    !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1606 			ret = -EPERM;
1607 			break;
1608 		}
1609 
1610 		ret = sockopt_validate_clockid(sk_txtime.clockid);
1611 		if (ret)
1612 			break;
1613 
1614 		sock_valbool_flag(sk, SOCK_TXTIME, true);
1615 		sk->sk_clockid = sk_txtime.clockid;
1616 		sk->sk_txtime_deadline_mode =
1617 			!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1618 		sk->sk_txtime_report_errors =
1619 			!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1620 		break;
1621 
1622 	case SO_BINDTOIFINDEX:
1623 		ret = sock_bindtoindex_locked(sk, val);
1624 		break;
1625 
1626 	case SO_BUF_LOCK:
1627 		if (val & ~SOCK_BUF_LOCK_MASK) {
1628 			ret = -EINVAL;
1629 			break;
1630 		}
1631 		sk->sk_userlocks = val | (sk->sk_userlocks &
1632 					  ~SOCK_BUF_LOCK_MASK);
1633 		break;
1634 
1635 	case SO_RESERVE_MEM:
1636 	{
1637 		int delta;
1638 
1639 		if (val < 0) {
1640 			ret = -EINVAL;
1641 			break;
1642 		}
1643 
1644 		delta = val - sk->sk_reserved_mem;
1645 		if (delta < 0)
1646 			sock_release_reserved_memory(sk, -delta);
1647 		else
1648 			ret = sock_reserve_memory(sk, delta);
1649 		break;
1650 	}
1651 
1652 	default:
1653 		ret = -ENOPROTOOPT;
1654 		break;
1655 	}
1656 	sockopt_release_sock(sk);
1657 	return ret;
1658 }
1659 
1660 int sock_setsockopt(struct socket *sock, int level, int optname,
1661 		    sockptr_t optval, unsigned int optlen)
1662 {
1663 	return sk_setsockopt(sock->sk, level, optname,
1664 			     optval, optlen);
1665 }
1666 EXPORT_SYMBOL(sock_setsockopt);
1667 
1668 static const struct cred *sk_get_peer_cred(struct sock *sk)
1669 {
1670 	const struct cred *cred;
1671 
1672 	spin_lock(&sk->sk_peer_lock);
1673 	cred = get_cred(sk->sk_peer_cred);
1674 	spin_unlock(&sk->sk_peer_lock);
1675 
1676 	return cred;
1677 }
1678 
1679 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1680 			  struct ucred *ucred)
1681 {
1682 	ucred->pid = pid_vnr(pid);
1683 	ucred->uid = ucred->gid = -1;
1684 	if (cred) {
1685 		struct user_namespace *current_ns = current_user_ns();
1686 
1687 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1688 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1689 	}
1690 }
1691 
1692 static int groups_to_user(sockptr_t dst, const struct group_info *src)
1693 {
1694 	struct user_namespace *user_ns = current_user_ns();
1695 	int i;
1696 
1697 	for (i = 0; i < src->ngroups; i++) {
1698 		gid_t gid = from_kgid_munged(user_ns, src->gid[i]);
1699 
1700 		if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid)))
1701 			return -EFAULT;
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 int sk_getsockopt(struct sock *sk, int level, int optname,
1708 		  sockptr_t optval, sockptr_t optlen)
1709 {
1710 	struct socket *sock = sk->sk_socket;
1711 
1712 	union {
1713 		int val;
1714 		u64 val64;
1715 		unsigned long ulval;
1716 		struct linger ling;
1717 		struct old_timeval32 tm32;
1718 		struct __kernel_old_timeval tm;
1719 		struct  __kernel_sock_timeval stm;
1720 		struct sock_txtime txtime;
1721 		struct so_timestamping timestamping;
1722 	} v;
1723 
1724 	int lv = sizeof(int);
1725 	int len;
1726 
1727 	if (copy_from_sockptr(&len, optlen, sizeof(int)))
1728 		return -EFAULT;
1729 	if (len < 0)
1730 		return -EINVAL;
1731 
1732 	memset(&v, 0, sizeof(v));
1733 
1734 	switch (optname) {
1735 	case SO_DEBUG:
1736 		v.val = sock_flag(sk, SOCK_DBG);
1737 		break;
1738 
1739 	case SO_DONTROUTE:
1740 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1741 		break;
1742 
1743 	case SO_BROADCAST:
1744 		v.val = sock_flag(sk, SOCK_BROADCAST);
1745 		break;
1746 
1747 	case SO_SNDBUF:
1748 		v.val = READ_ONCE(sk->sk_sndbuf);
1749 		break;
1750 
1751 	case SO_RCVBUF:
1752 		v.val = READ_ONCE(sk->sk_rcvbuf);
1753 		break;
1754 
1755 	case SO_REUSEADDR:
1756 		v.val = sk->sk_reuse;
1757 		break;
1758 
1759 	case SO_REUSEPORT:
1760 		v.val = sk->sk_reuseport;
1761 		break;
1762 
1763 	case SO_KEEPALIVE:
1764 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1765 		break;
1766 
1767 	case SO_TYPE:
1768 		v.val = sk->sk_type;
1769 		break;
1770 
1771 	case SO_PROTOCOL:
1772 		v.val = sk->sk_protocol;
1773 		break;
1774 
1775 	case SO_DOMAIN:
1776 		v.val = sk->sk_family;
1777 		break;
1778 
1779 	case SO_ERROR:
1780 		v.val = -sock_error(sk);
1781 		if (v.val == 0)
1782 			v.val = xchg(&sk->sk_err_soft, 0);
1783 		break;
1784 
1785 	case SO_OOBINLINE:
1786 		v.val = sock_flag(sk, SOCK_URGINLINE);
1787 		break;
1788 
1789 	case SO_NO_CHECK:
1790 		v.val = sk->sk_no_check_tx;
1791 		break;
1792 
1793 	case SO_PRIORITY:
1794 		v.val = READ_ONCE(sk->sk_priority);
1795 		break;
1796 
1797 	case SO_LINGER:
1798 		lv		= sizeof(v.ling);
1799 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1800 		v.ling.l_linger	= READ_ONCE(sk->sk_lingertime) / HZ;
1801 		break;
1802 
1803 	case SO_BSDCOMPAT:
1804 		break;
1805 
1806 	case SO_TIMESTAMP_OLD:
1807 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1808 				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
1809 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1810 		break;
1811 
1812 	case SO_TIMESTAMPNS_OLD:
1813 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1814 		break;
1815 
1816 	case SO_TIMESTAMP_NEW:
1817 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1818 		break;
1819 
1820 	case SO_TIMESTAMPNS_NEW:
1821 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1822 		break;
1823 
1824 	case SO_TIMESTAMPING_OLD:
1825 	case SO_TIMESTAMPING_NEW:
1826 		lv = sizeof(v.timestamping);
1827 		/* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only
1828 		 * returning the flags when they were set through the same option.
1829 		 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD.
1830 		 */
1831 		if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) {
1832 			v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
1833 			v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
1834 		}
1835 		break;
1836 
1837 	case SO_RCVTIMEO_OLD:
1838 	case SO_RCVTIMEO_NEW:
1839 		lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v,
1840 				      SO_RCVTIMEO_OLD == optname);
1841 		break;
1842 
1843 	case SO_SNDTIMEO_OLD:
1844 	case SO_SNDTIMEO_NEW:
1845 		lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v,
1846 				      SO_SNDTIMEO_OLD == optname);
1847 		break;
1848 
1849 	case SO_RCVLOWAT:
1850 		v.val = READ_ONCE(sk->sk_rcvlowat);
1851 		break;
1852 
1853 	case SO_SNDLOWAT:
1854 		v.val = 1;
1855 		break;
1856 
1857 	case SO_PASSCRED:
1858 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1859 		break;
1860 
1861 	case SO_PASSPIDFD:
1862 		v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags);
1863 		break;
1864 
1865 	case SO_PEERCRED:
1866 	{
1867 		struct ucred peercred;
1868 		if (len > sizeof(peercred))
1869 			len = sizeof(peercred);
1870 
1871 		spin_lock(&sk->sk_peer_lock);
1872 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1873 		spin_unlock(&sk->sk_peer_lock);
1874 
1875 		if (copy_to_sockptr(optval, &peercred, len))
1876 			return -EFAULT;
1877 		goto lenout;
1878 	}
1879 
1880 	case SO_PEERPIDFD:
1881 	{
1882 		struct pid *peer_pid;
1883 		struct file *pidfd_file = NULL;
1884 		unsigned int flags = 0;
1885 		int pidfd;
1886 
1887 		if (len > sizeof(pidfd))
1888 			len = sizeof(pidfd);
1889 
1890 		spin_lock(&sk->sk_peer_lock);
1891 		peer_pid = get_pid(sk->sk_peer_pid);
1892 		spin_unlock(&sk->sk_peer_lock);
1893 
1894 		if (!peer_pid)
1895 			return -ENODATA;
1896 
1897 		/* The use of PIDFD_STALE requires stashing of struct pid
1898 		 * on pidfs with pidfs_register_pid() and only AF_UNIX
1899 		 * were prepared for this.
1900 		 */
1901 		if (sk->sk_family == AF_UNIX)
1902 			flags = PIDFD_STALE;
1903 
1904 		pidfd = pidfd_prepare(peer_pid, flags, &pidfd_file);
1905 		put_pid(peer_pid);
1906 		if (pidfd < 0)
1907 			return pidfd;
1908 
1909 		if (copy_to_sockptr(optval, &pidfd, len) ||
1910 		    copy_to_sockptr(optlen, &len, sizeof(int))) {
1911 			put_unused_fd(pidfd);
1912 			fput(pidfd_file);
1913 
1914 			return -EFAULT;
1915 		}
1916 
1917 		fd_install(pidfd, pidfd_file);
1918 		return 0;
1919 	}
1920 
1921 	case SO_PEERGROUPS:
1922 	{
1923 		const struct cred *cred;
1924 		int ret, n;
1925 
1926 		cred = sk_get_peer_cred(sk);
1927 		if (!cred)
1928 			return -ENODATA;
1929 
1930 		n = cred->group_info->ngroups;
1931 		if (len < n * sizeof(gid_t)) {
1932 			len = n * sizeof(gid_t);
1933 			put_cred(cred);
1934 			return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE;
1935 		}
1936 		len = n * sizeof(gid_t);
1937 
1938 		ret = groups_to_user(optval, cred->group_info);
1939 		put_cred(cred);
1940 		if (ret)
1941 			return ret;
1942 		goto lenout;
1943 	}
1944 
1945 	case SO_PEERNAME:
1946 	{
1947 		struct sockaddr_storage address;
1948 
1949 		lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2);
1950 		if (lv < 0)
1951 			return -ENOTCONN;
1952 		if (lv < len)
1953 			return -EINVAL;
1954 		if (copy_to_sockptr(optval, &address, len))
1955 			return -EFAULT;
1956 		goto lenout;
1957 	}
1958 
1959 	/* Dubious BSD thing... Probably nobody even uses it, but
1960 	 * the UNIX standard wants it for whatever reason... -DaveM
1961 	 */
1962 	case SO_ACCEPTCONN:
1963 		v.val = sk->sk_state == TCP_LISTEN;
1964 		break;
1965 
1966 	case SO_PASSSEC:
1967 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1968 		break;
1969 
1970 	case SO_PEERSEC:
1971 		return security_socket_getpeersec_stream(sock,
1972 							 optval, optlen, len);
1973 
1974 	case SO_MARK:
1975 		v.val = READ_ONCE(sk->sk_mark);
1976 		break;
1977 
1978 	case SO_RCVMARK:
1979 		v.val = sock_flag(sk, SOCK_RCVMARK);
1980 		break;
1981 
1982 	case SO_RCVPRIORITY:
1983 		v.val = sock_flag(sk, SOCK_RCVPRIORITY);
1984 		break;
1985 
1986 	case SO_RXQ_OVFL:
1987 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1988 		break;
1989 
1990 	case SO_WIFI_STATUS:
1991 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1992 		break;
1993 
1994 	case SO_PEEK_OFF:
1995 		if (!READ_ONCE(sock->ops)->set_peek_off)
1996 			return -EOPNOTSUPP;
1997 
1998 		v.val = READ_ONCE(sk->sk_peek_off);
1999 		break;
2000 	case SO_NOFCS:
2001 		v.val = sock_flag(sk, SOCK_NOFCS);
2002 		break;
2003 
2004 	case SO_BINDTODEVICE:
2005 		return sock_getbindtodevice(sk, optval, optlen, len);
2006 
2007 	case SO_GET_FILTER:
2008 		len = sk_get_filter(sk, optval, len);
2009 		if (len < 0)
2010 			return len;
2011 
2012 		goto lenout;
2013 
2014 	case SO_LOCK_FILTER:
2015 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
2016 		break;
2017 
2018 	case SO_BPF_EXTENSIONS:
2019 		v.val = bpf_tell_extensions();
2020 		break;
2021 
2022 	case SO_SELECT_ERR_QUEUE:
2023 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
2024 		break;
2025 
2026 #ifdef CONFIG_NET_RX_BUSY_POLL
2027 	case SO_BUSY_POLL:
2028 		v.val = READ_ONCE(sk->sk_ll_usec);
2029 		break;
2030 	case SO_PREFER_BUSY_POLL:
2031 		v.val = READ_ONCE(sk->sk_prefer_busy_poll);
2032 		break;
2033 #endif
2034 
2035 	case SO_MAX_PACING_RATE:
2036 		/* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
2037 		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
2038 			lv = sizeof(v.ulval);
2039 			v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
2040 		} else {
2041 			/* 32bit version */
2042 			v.val = min_t(unsigned long, ~0U,
2043 				      READ_ONCE(sk->sk_max_pacing_rate));
2044 		}
2045 		break;
2046 
2047 	case SO_INCOMING_CPU:
2048 		v.val = READ_ONCE(sk->sk_incoming_cpu);
2049 		break;
2050 
2051 	case SO_MEMINFO:
2052 	{
2053 		u32 meminfo[SK_MEMINFO_VARS];
2054 
2055 		sk_get_meminfo(sk, meminfo);
2056 
2057 		len = min_t(unsigned int, len, sizeof(meminfo));
2058 		if (copy_to_sockptr(optval, &meminfo, len))
2059 			return -EFAULT;
2060 
2061 		goto lenout;
2062 	}
2063 
2064 #ifdef CONFIG_NET_RX_BUSY_POLL
2065 	case SO_INCOMING_NAPI_ID:
2066 		v.val = READ_ONCE(sk->sk_napi_id);
2067 
2068 		/* aggregate non-NAPI IDs down to 0 */
2069 		if (!napi_id_valid(v.val))
2070 			v.val = 0;
2071 
2072 		break;
2073 #endif
2074 
2075 	case SO_COOKIE:
2076 		lv = sizeof(u64);
2077 		if (len < lv)
2078 			return -EINVAL;
2079 		v.val64 = sock_gen_cookie(sk);
2080 		break;
2081 
2082 	case SO_ZEROCOPY:
2083 		v.val = sock_flag(sk, SOCK_ZEROCOPY);
2084 		break;
2085 
2086 	case SO_TXTIME:
2087 		lv = sizeof(v.txtime);
2088 		v.txtime.clockid = sk->sk_clockid;
2089 		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
2090 				  SOF_TXTIME_DEADLINE_MODE : 0;
2091 		v.txtime.flags |= sk->sk_txtime_report_errors ?
2092 				  SOF_TXTIME_REPORT_ERRORS : 0;
2093 		break;
2094 
2095 	case SO_BINDTOIFINDEX:
2096 		v.val = READ_ONCE(sk->sk_bound_dev_if);
2097 		break;
2098 
2099 	case SO_NETNS_COOKIE:
2100 		lv = sizeof(u64);
2101 		if (len != lv)
2102 			return -EINVAL;
2103 		v.val64 = sock_net(sk)->net_cookie;
2104 		break;
2105 
2106 	case SO_BUF_LOCK:
2107 		v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
2108 		break;
2109 
2110 	case SO_RESERVE_MEM:
2111 		v.val = READ_ONCE(sk->sk_reserved_mem);
2112 		break;
2113 
2114 	case SO_TXREHASH:
2115 		/* Paired with WRITE_ONCE() in sk_setsockopt() */
2116 		v.val = READ_ONCE(sk->sk_txrehash);
2117 		break;
2118 
2119 	default:
2120 		/* We implement the SO_SNDLOWAT etc to not be settable
2121 		 * (1003.1g 7).
2122 		 */
2123 		return -ENOPROTOOPT;
2124 	}
2125 
2126 	if (len > lv)
2127 		len = lv;
2128 	if (copy_to_sockptr(optval, &v, len))
2129 		return -EFAULT;
2130 lenout:
2131 	if (copy_to_sockptr(optlen, &len, sizeof(int)))
2132 		return -EFAULT;
2133 	return 0;
2134 }
2135 
2136 /*
2137  * Initialize an sk_lock.
2138  *
2139  * (We also register the sk_lock with the lock validator.)
2140  */
2141 static inline void sock_lock_init(struct sock *sk)
2142 {
2143 	sk_owner_clear(sk);
2144 
2145 	if (sk->sk_kern_sock)
2146 		sock_lock_init_class_and_name(
2147 			sk,
2148 			af_family_kern_slock_key_strings[sk->sk_family],
2149 			af_family_kern_slock_keys + sk->sk_family,
2150 			af_family_kern_key_strings[sk->sk_family],
2151 			af_family_kern_keys + sk->sk_family);
2152 	else
2153 		sock_lock_init_class_and_name(
2154 			sk,
2155 			af_family_slock_key_strings[sk->sk_family],
2156 			af_family_slock_keys + sk->sk_family,
2157 			af_family_key_strings[sk->sk_family],
2158 			af_family_keys + sk->sk_family);
2159 }
2160 
2161 /*
2162  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
2163  * even temporarily, because of RCU lookups. sk_node should also be left as is.
2164  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
2165  */
2166 static void sock_copy(struct sock *nsk, const struct sock *osk)
2167 {
2168 	const struct proto *prot = READ_ONCE(osk->sk_prot);
2169 #ifdef CONFIG_SECURITY_NETWORK
2170 	void *sptr = nsk->sk_security;
2171 #endif
2172 
2173 	/* If we move sk_tx_queue_mapping out of the private section,
2174 	 * we must check if sk_tx_queue_clear() is called after
2175 	 * sock_copy() in sk_clone_lock().
2176 	 */
2177 	BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) <
2178 		     offsetof(struct sock, sk_dontcopy_begin) ||
2179 		     offsetof(struct sock, sk_tx_queue_mapping) >=
2180 		     offsetof(struct sock, sk_dontcopy_end));
2181 
2182 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
2183 
2184 	unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
2185 		      prot->obj_size - offsetof(struct sock, sk_dontcopy_end),
2186 		      /* alloc is larger than struct, see sk_prot_alloc() */);
2187 
2188 #ifdef CONFIG_SECURITY_NETWORK
2189 	nsk->sk_security = sptr;
2190 	security_sk_clone(osk, nsk);
2191 #endif
2192 }
2193 
2194 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
2195 		int family)
2196 {
2197 	struct sock *sk;
2198 	struct kmem_cache *slab;
2199 
2200 	slab = prot->slab;
2201 	if (slab != NULL) {
2202 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
2203 		if (!sk)
2204 			return sk;
2205 		if (want_init_on_alloc(priority))
2206 			sk_prot_clear_nulls(sk, prot->obj_size);
2207 	} else
2208 		sk = kmalloc(prot->obj_size, priority);
2209 
2210 	if (sk != NULL) {
2211 		if (security_sk_alloc(sk, family, priority))
2212 			goto out_free;
2213 
2214 		if (!try_module_get(prot->owner))
2215 			goto out_free_sec;
2216 	}
2217 
2218 	return sk;
2219 
2220 out_free_sec:
2221 	security_sk_free(sk);
2222 out_free:
2223 	if (slab != NULL)
2224 		kmem_cache_free(slab, sk);
2225 	else
2226 		kfree(sk);
2227 	return NULL;
2228 }
2229 
2230 static void sk_prot_free(struct proto *prot, struct sock *sk)
2231 {
2232 	struct kmem_cache *slab;
2233 	struct module *owner;
2234 
2235 	owner = prot->owner;
2236 	slab = prot->slab;
2237 
2238 	cgroup_sk_free(&sk->sk_cgrp_data);
2239 	mem_cgroup_sk_free(sk);
2240 	security_sk_free(sk);
2241 
2242 	sk_owner_put(sk);
2243 
2244 	if (slab != NULL)
2245 		kmem_cache_free(slab, sk);
2246 	else
2247 		kfree(sk);
2248 	module_put(owner);
2249 }
2250 
2251 /**
2252  *	sk_alloc - All socket objects are allocated here
2253  *	@net: the applicable net namespace
2254  *	@family: protocol family
2255  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2256  *	@prot: struct proto associated with this new sock instance
2257  *	@kern: is this to be a kernel socket?
2258  */
2259 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
2260 		      struct proto *prot, int kern)
2261 {
2262 	struct sock *sk;
2263 
2264 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
2265 	if (sk) {
2266 		sk->sk_family = family;
2267 		/*
2268 		 * See comment in struct sock definition to understand
2269 		 * why we need sk_prot_creator -acme
2270 		 */
2271 		sk->sk_prot = sk->sk_prot_creator = prot;
2272 		sk->sk_kern_sock = kern;
2273 		sock_lock_init(sk);
2274 		sk->sk_net_refcnt = kern ? 0 : 1;
2275 		if (likely(sk->sk_net_refcnt)) {
2276 			get_net_track(net, &sk->ns_tracker, priority);
2277 			sock_inuse_add(net, 1);
2278 		} else {
2279 			net_passive_inc(net);
2280 			__netns_tracker_alloc(net, &sk->ns_tracker,
2281 					      false, priority);
2282 		}
2283 
2284 		sock_net_set(sk, net);
2285 		refcount_set(&sk->sk_wmem_alloc, 1);
2286 
2287 		mem_cgroup_sk_alloc(sk);
2288 		cgroup_sk_alloc(&sk->sk_cgrp_data);
2289 		sock_update_classid(&sk->sk_cgrp_data);
2290 		sock_update_netprioidx(&sk->sk_cgrp_data);
2291 		sk_tx_queue_clear(sk);
2292 	}
2293 
2294 	return sk;
2295 }
2296 EXPORT_SYMBOL(sk_alloc);
2297 
2298 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
2299  * grace period. This is the case for UDP sockets and TCP listeners.
2300  */
2301 static void __sk_destruct(struct rcu_head *head)
2302 {
2303 	struct sock *sk = container_of(head, struct sock, sk_rcu);
2304 	struct net *net = sock_net(sk);
2305 	struct sk_filter *filter;
2306 
2307 	if (sk->sk_destruct)
2308 		sk->sk_destruct(sk);
2309 
2310 	filter = rcu_dereference_check(sk->sk_filter,
2311 				       refcount_read(&sk->sk_wmem_alloc) == 0);
2312 	if (filter) {
2313 		sk_filter_uncharge(sk, filter);
2314 		RCU_INIT_POINTER(sk->sk_filter, NULL);
2315 	}
2316 
2317 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
2318 
2319 #ifdef CONFIG_BPF_SYSCALL
2320 	bpf_sk_storage_free(sk);
2321 #endif
2322 
2323 	if (atomic_read(&sk->sk_omem_alloc))
2324 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
2325 			 __func__, atomic_read(&sk->sk_omem_alloc));
2326 
2327 	if (sk->sk_frag.page) {
2328 		put_page(sk->sk_frag.page);
2329 		sk->sk_frag.page = NULL;
2330 	}
2331 
2332 	/* We do not need to acquire sk->sk_peer_lock, we are the last user. */
2333 	put_cred(sk->sk_peer_cred);
2334 	put_pid(sk->sk_peer_pid);
2335 
2336 	if (likely(sk->sk_net_refcnt)) {
2337 		put_net_track(net, &sk->ns_tracker);
2338 	} else {
2339 		__netns_tracker_free(net, &sk->ns_tracker, false);
2340 		net_passive_dec(net);
2341 	}
2342 	sk_prot_free(sk->sk_prot_creator, sk);
2343 }
2344 
2345 void sk_net_refcnt_upgrade(struct sock *sk)
2346 {
2347 	struct net *net = sock_net(sk);
2348 
2349 	WARN_ON_ONCE(sk->sk_net_refcnt);
2350 	__netns_tracker_free(net, &sk->ns_tracker, false);
2351 	net_passive_dec(net);
2352 	sk->sk_net_refcnt = 1;
2353 	get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
2354 	sock_inuse_add(net, 1);
2355 }
2356 EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
2357 
2358 void sk_destruct(struct sock *sk)
2359 {
2360 	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
2361 
2362 	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
2363 		reuseport_detach_sock(sk);
2364 		use_call_rcu = true;
2365 	}
2366 
2367 	if (use_call_rcu)
2368 		call_rcu(&sk->sk_rcu, __sk_destruct);
2369 	else
2370 		__sk_destruct(&sk->sk_rcu);
2371 }
2372 
2373 static void __sk_free(struct sock *sk)
2374 {
2375 	if (likely(sk->sk_net_refcnt))
2376 		sock_inuse_add(sock_net(sk), -1);
2377 
2378 	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
2379 		sock_diag_broadcast_destroy(sk);
2380 	else
2381 		sk_destruct(sk);
2382 }
2383 
2384 void sk_free(struct sock *sk)
2385 {
2386 	/*
2387 	 * We subtract one from sk_wmem_alloc and can know if
2388 	 * some packets are still in some tx queue.
2389 	 * If not null, sock_wfree() will call __sk_free(sk) later
2390 	 */
2391 	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2392 		__sk_free(sk);
2393 }
2394 EXPORT_SYMBOL(sk_free);
2395 
2396 static void sk_init_common(struct sock *sk)
2397 {
2398 	skb_queue_head_init(&sk->sk_receive_queue);
2399 	skb_queue_head_init(&sk->sk_write_queue);
2400 	skb_queue_head_init(&sk->sk_error_queue);
2401 
2402 	rwlock_init(&sk->sk_callback_lock);
2403 	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
2404 			af_rlock_keys + sk->sk_family,
2405 			af_family_rlock_key_strings[sk->sk_family]);
2406 	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
2407 			af_wlock_keys + sk->sk_family,
2408 			af_family_wlock_key_strings[sk->sk_family]);
2409 	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2410 			af_elock_keys + sk->sk_family,
2411 			af_family_elock_key_strings[sk->sk_family]);
2412 	if (sk->sk_kern_sock)
2413 		lockdep_set_class_and_name(&sk->sk_callback_lock,
2414 			af_kern_callback_keys + sk->sk_family,
2415 			af_family_kern_clock_key_strings[sk->sk_family]);
2416 	else
2417 		lockdep_set_class_and_name(&sk->sk_callback_lock,
2418 			af_callback_keys + sk->sk_family,
2419 			af_family_clock_key_strings[sk->sk_family]);
2420 }
2421 
2422 /**
2423  *	sk_clone_lock - clone a socket, and lock its clone
2424  *	@sk: the socket to clone
2425  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2426  *
2427  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
2428  */
2429 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2430 {
2431 	struct proto *prot = READ_ONCE(sk->sk_prot);
2432 	struct sk_filter *filter;
2433 	bool is_charged = true;
2434 	struct sock *newsk;
2435 
2436 	newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2437 	if (!newsk)
2438 		goto out;
2439 
2440 	sock_copy(newsk, sk);
2441 
2442 	newsk->sk_prot_creator = prot;
2443 
2444 	/* SANITY */
2445 	if (likely(newsk->sk_net_refcnt)) {
2446 		get_net_track(sock_net(newsk), &newsk->ns_tracker, priority);
2447 		sock_inuse_add(sock_net(newsk), 1);
2448 	} else {
2449 		/* Kernel sockets are not elevating the struct net refcount.
2450 		 * Instead, use a tracker to more easily detect if a layer
2451 		 * is not properly dismantling its kernel sockets at netns
2452 		 * destroy time.
2453 		 */
2454 		net_passive_inc(sock_net(newsk));
2455 		__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
2456 				      false, priority);
2457 	}
2458 	sk_node_init(&newsk->sk_node);
2459 	sock_lock_init(newsk);
2460 	bh_lock_sock(newsk);
2461 	newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
2462 	newsk->sk_backlog.len = 0;
2463 
2464 	atomic_set(&newsk->sk_rmem_alloc, 0);
2465 
2466 	/* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
2467 	refcount_set(&newsk->sk_wmem_alloc, 1);
2468 
2469 	atomic_set(&newsk->sk_omem_alloc, 0);
2470 	sk_init_common(newsk);
2471 
2472 	newsk->sk_dst_cache	= NULL;
2473 	newsk->sk_dst_pending_confirm = 0;
2474 	newsk->sk_wmem_queued	= 0;
2475 	newsk->sk_forward_alloc = 0;
2476 	newsk->sk_reserved_mem  = 0;
2477 	atomic_set(&newsk->sk_drops, 0);
2478 	newsk->sk_send_head	= NULL;
2479 	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2480 	atomic_set(&newsk->sk_zckey, 0);
2481 
2482 	sock_reset_flag(newsk, SOCK_DONE);
2483 
2484 	/* sk->sk_memcg will be populated at accept() time */
2485 	newsk->sk_memcg = NULL;
2486 
2487 	cgroup_sk_clone(&newsk->sk_cgrp_data);
2488 
2489 	rcu_read_lock();
2490 	filter = rcu_dereference(sk->sk_filter);
2491 	if (filter != NULL)
2492 		/* though it's an empty new sock, the charging may fail
2493 		 * if sysctl_optmem_max was changed between creation of
2494 		 * original socket and cloning
2495 		 */
2496 		is_charged = sk_filter_charge(newsk, filter);
2497 	RCU_INIT_POINTER(newsk->sk_filter, filter);
2498 	rcu_read_unlock();
2499 
2500 	if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2501 		/* We need to make sure that we don't uncharge the new
2502 		 * socket if we couldn't charge it in the first place
2503 		 * as otherwise we uncharge the parent's filter.
2504 		 */
2505 		if (!is_charged)
2506 			RCU_INIT_POINTER(newsk->sk_filter, NULL);
2507 		sk_free_unlock_clone(newsk);
2508 		newsk = NULL;
2509 		goto out;
2510 	}
2511 	RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
2512 
2513 	if (bpf_sk_storage_clone(sk, newsk)) {
2514 		sk_free_unlock_clone(newsk);
2515 		newsk = NULL;
2516 		goto out;
2517 	}
2518 
2519 	/* Clear sk_user_data if parent had the pointer tagged
2520 	 * as not suitable for copying when cloning.
2521 	 */
2522 	if (sk_user_data_is_nocopy(newsk))
2523 		newsk->sk_user_data = NULL;
2524 
2525 	newsk->sk_err	   = 0;
2526 	newsk->sk_err_soft = 0;
2527 	newsk->sk_priority = 0;
2528 	newsk->sk_incoming_cpu = raw_smp_processor_id();
2529 
2530 	/* Before updating sk_refcnt, we must commit prior changes to memory
2531 	 * (Documentation/RCU/rculist_nulls.rst for details)
2532 	 */
2533 	smp_wmb();
2534 	refcount_set(&newsk->sk_refcnt, 2);
2535 
2536 	sk_set_socket(newsk, NULL);
2537 	sk_tx_queue_clear(newsk);
2538 	RCU_INIT_POINTER(newsk->sk_wq, NULL);
2539 
2540 	if (newsk->sk_prot->sockets_allocated)
2541 		sk_sockets_allocated_inc(newsk);
2542 
2543 	if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2544 		net_enable_timestamp();
2545 out:
2546 	return newsk;
2547 }
2548 EXPORT_SYMBOL_GPL(sk_clone_lock);
2549 
2550 void sk_free_unlock_clone(struct sock *sk)
2551 {
2552 	/* It is still raw copy of parent, so invalidate
2553 	 * destructor and make plain sk_free() */
2554 	sk->sk_destruct = NULL;
2555 	bh_unlock_sock(sk);
2556 	sk_free(sk);
2557 }
2558 EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2559 
2560 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
2561 {
2562 	bool is_ipv6 = false;
2563 	u32 max_size;
2564 
2565 #if IS_ENABLED(CONFIG_IPV6)
2566 	is_ipv6 = (sk->sk_family == AF_INET6 &&
2567 		   !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
2568 #endif
2569 	/* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
2570 	max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
2571 			READ_ONCE(dst->dev->gso_ipv4_max_size);
2572 	if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
2573 		max_size = GSO_LEGACY_MAX_SIZE;
2574 
2575 	return max_size - (MAX_TCP_HEADER + 1);
2576 }
2577 
2578 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2579 {
2580 	u32 max_segs = 1;
2581 
2582 	sk->sk_route_caps = dst->dev->features;
2583 	if (sk_is_tcp(sk)) {
2584 		struct inet_connection_sock *icsk = inet_csk(sk);
2585 
2586 		sk->sk_route_caps |= NETIF_F_GSO;
2587 		icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK);
2588 	}
2589 	if (sk->sk_route_caps & NETIF_F_GSO)
2590 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2591 	if (unlikely(sk->sk_gso_disabled))
2592 		sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2593 	if (sk_can_gso(sk)) {
2594 		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2595 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2596 		} else {
2597 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2598 			sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
2599 			/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
2600 			max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
2601 		}
2602 	}
2603 	sk->sk_gso_max_segs = max_segs;
2604 	sk_dst_set(sk, dst);
2605 }
2606 EXPORT_SYMBOL_GPL(sk_setup_caps);
2607 
2608 /*
2609  *	Simple resource managers for sockets.
2610  */
2611 
2612 
2613 /*
2614  * Write buffer destructor automatically called from kfree_skb.
2615  */
2616 void sock_wfree(struct sk_buff *skb)
2617 {
2618 	struct sock *sk = skb->sk;
2619 	unsigned int len = skb->truesize;
2620 	bool free;
2621 
2622 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2623 		if (sock_flag(sk, SOCK_RCU_FREE) &&
2624 		    sk->sk_write_space == sock_def_write_space) {
2625 			rcu_read_lock();
2626 			free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
2627 			sock_def_write_space_wfree(sk);
2628 			rcu_read_unlock();
2629 			if (unlikely(free))
2630 				__sk_free(sk);
2631 			return;
2632 		}
2633 
2634 		/*
2635 		 * Keep a reference on sk_wmem_alloc, this will be released
2636 		 * after sk_write_space() call
2637 		 */
2638 		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2639 		sk->sk_write_space(sk);
2640 		len = 1;
2641 	}
2642 	/*
2643 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2644 	 * could not do because of in-flight packets
2645 	 */
2646 	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2647 		__sk_free(sk);
2648 }
2649 EXPORT_SYMBOL(sock_wfree);
2650 
2651 /* This variant of sock_wfree() is used by TCP,
2652  * since it sets SOCK_USE_WRITE_QUEUE.
2653  */
2654 void __sock_wfree(struct sk_buff *skb)
2655 {
2656 	struct sock *sk = skb->sk;
2657 
2658 	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2659 		__sk_free(sk);
2660 }
2661 
2662 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2663 {
2664 	skb_orphan(skb);
2665 #ifdef CONFIG_INET
2666 	if (unlikely(!sk_fullsock(sk)))
2667 		return skb_set_owner_edemux(skb, sk);
2668 #endif
2669 	skb->sk = sk;
2670 	skb->destructor = sock_wfree;
2671 	skb_set_hash_from_sk(skb, sk);
2672 	/*
2673 	 * We used to take a refcount on sk, but following operation
2674 	 * is enough to guarantee sk_free() won't free this sock until
2675 	 * all in-flight packets are completed
2676 	 */
2677 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2678 }
2679 EXPORT_SYMBOL(skb_set_owner_w);
2680 
2681 static bool can_skb_orphan_partial(const struct sk_buff *skb)
2682 {
2683 	/* Drivers depend on in-order delivery for crypto offload,
2684 	 * partial orphan breaks out-of-order-OK logic.
2685 	 */
2686 	if (skb_is_decrypted(skb))
2687 		return false;
2688 
2689 	return (skb->destructor == sock_wfree ||
2690 		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2691 }
2692 
2693 /* This helper is used by netem, as it can hold packets in its
2694  * delay queue. We want to allow the owner socket to send more
2695  * packets, as if they were already TX completed by a typical driver.
2696  * But we also want to keep skb->sk set because some packet schedulers
2697  * rely on it (sch_fq for example).
2698  */
2699 void skb_orphan_partial(struct sk_buff *skb)
2700 {
2701 	if (skb_is_tcp_pure_ack(skb))
2702 		return;
2703 
2704 	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2705 		return;
2706 
2707 	skb_orphan(skb);
2708 }
2709 EXPORT_SYMBOL(skb_orphan_partial);
2710 
2711 /*
2712  * Read buffer destructor automatically called from kfree_skb.
2713  */
2714 void sock_rfree(struct sk_buff *skb)
2715 {
2716 	struct sock *sk = skb->sk;
2717 	unsigned int len = skb->truesize;
2718 
2719 	atomic_sub(len, &sk->sk_rmem_alloc);
2720 	sk_mem_uncharge(sk, len);
2721 }
2722 EXPORT_SYMBOL(sock_rfree);
2723 
2724 /*
2725  * Buffer destructor for skbs that are not used directly in read or write
2726  * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2727  */
2728 void sock_efree(struct sk_buff *skb)
2729 {
2730 	sock_put(skb->sk);
2731 }
2732 EXPORT_SYMBOL(sock_efree);
2733 
2734 /* Buffer destructor for prefetch/receive path where reference count may
2735  * not be held, e.g. for listen sockets.
2736  */
2737 #ifdef CONFIG_INET
2738 void sock_pfree(struct sk_buff *skb)
2739 {
2740 	struct sock *sk = skb->sk;
2741 
2742 	if (!sk_is_refcounted(sk))
2743 		return;
2744 
2745 	if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
2746 		inet_reqsk(sk)->rsk_listener = NULL;
2747 		reqsk_free(inet_reqsk(sk));
2748 		return;
2749 	}
2750 
2751 	sock_gen_put(sk);
2752 }
2753 EXPORT_SYMBOL(sock_pfree);
2754 #endif /* CONFIG_INET */
2755 
2756 kuid_t sock_i_uid(struct sock *sk)
2757 {
2758 	kuid_t uid;
2759 
2760 	read_lock_bh(&sk->sk_callback_lock);
2761 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2762 	read_unlock_bh(&sk->sk_callback_lock);
2763 	return uid;
2764 }
2765 EXPORT_SYMBOL(sock_i_uid);
2766 
2767 unsigned long __sock_i_ino(struct sock *sk)
2768 {
2769 	unsigned long ino;
2770 
2771 	read_lock(&sk->sk_callback_lock);
2772 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2773 	read_unlock(&sk->sk_callback_lock);
2774 	return ino;
2775 }
2776 EXPORT_SYMBOL(__sock_i_ino);
2777 
2778 unsigned long sock_i_ino(struct sock *sk)
2779 {
2780 	unsigned long ino;
2781 
2782 	local_bh_disable();
2783 	ino = __sock_i_ino(sk);
2784 	local_bh_enable();
2785 	return ino;
2786 }
2787 EXPORT_SYMBOL(sock_i_ino);
2788 
2789 /*
2790  * Allocate a skb from the socket's send buffer.
2791  */
2792 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2793 			     gfp_t priority)
2794 {
2795 	if (force ||
2796 	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2797 		struct sk_buff *skb = alloc_skb(size, priority);
2798 
2799 		if (skb) {
2800 			skb_set_owner_w(skb, sk);
2801 			return skb;
2802 		}
2803 	}
2804 	return NULL;
2805 }
2806 EXPORT_SYMBOL(sock_wmalloc);
2807 
2808 static void sock_ofree(struct sk_buff *skb)
2809 {
2810 	struct sock *sk = skb->sk;
2811 
2812 	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2813 }
2814 
2815 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2816 			     gfp_t priority)
2817 {
2818 	struct sk_buff *skb;
2819 
2820 	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2821 	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2822 	    READ_ONCE(sock_net(sk)->core.sysctl_optmem_max))
2823 		return NULL;
2824 
2825 	skb = alloc_skb(size, priority);
2826 	if (!skb)
2827 		return NULL;
2828 
2829 	atomic_add(skb->truesize, &sk->sk_omem_alloc);
2830 	skb->sk = sk;
2831 	skb->destructor = sock_ofree;
2832 	return skb;
2833 }
2834 
2835 /*
2836  * Allocate a memory block from the socket's option memory buffer.
2837  */
2838 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2839 {
2840 	int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max);
2841 
2842 	if ((unsigned int)size <= optmem_max &&
2843 	    atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
2844 		void *mem;
2845 		/* First do the add, to avoid the race if kmalloc
2846 		 * might sleep.
2847 		 */
2848 		atomic_add(size, &sk->sk_omem_alloc);
2849 		mem = kmalloc(size, priority);
2850 		if (mem)
2851 			return mem;
2852 		atomic_sub(size, &sk->sk_omem_alloc);
2853 	}
2854 	return NULL;
2855 }
2856 EXPORT_SYMBOL(sock_kmalloc);
2857 
2858 /*
2859  * Duplicate the input "src" memory block using the socket's
2860  * option memory buffer.
2861  */
2862 void *sock_kmemdup(struct sock *sk, const void *src,
2863 		   int size, gfp_t priority)
2864 {
2865 	void *mem;
2866 
2867 	mem = sock_kmalloc(sk, size, priority);
2868 	if (mem)
2869 		memcpy(mem, src, size);
2870 	return mem;
2871 }
2872 EXPORT_SYMBOL(sock_kmemdup);
2873 
2874 /* Free an option memory block. Note, we actually want the inline
2875  * here as this allows gcc to detect the nullify and fold away the
2876  * condition entirely.
2877  */
2878 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2879 				  const bool nullify)
2880 {
2881 	if (WARN_ON_ONCE(!mem))
2882 		return;
2883 	if (nullify)
2884 		kfree_sensitive(mem);
2885 	else
2886 		kfree(mem);
2887 	atomic_sub(size, &sk->sk_omem_alloc);
2888 }
2889 
2890 void sock_kfree_s(struct sock *sk, void *mem, int size)
2891 {
2892 	__sock_kfree_s(sk, mem, size, false);
2893 }
2894 EXPORT_SYMBOL(sock_kfree_s);
2895 
2896 void sock_kzfree_s(struct sock *sk, void *mem, int size)
2897 {
2898 	__sock_kfree_s(sk, mem, size, true);
2899 }
2900 EXPORT_SYMBOL(sock_kzfree_s);
2901 
2902 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2903    I think, these locks should be removed for datagram sockets.
2904  */
2905 static long sock_wait_for_wmem(struct sock *sk, long timeo)
2906 {
2907 	DEFINE_WAIT(wait);
2908 
2909 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2910 	for (;;) {
2911 		if (!timeo)
2912 			break;
2913 		if (signal_pending(current))
2914 			break;
2915 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2916 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2917 		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2918 			break;
2919 		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2920 			break;
2921 		if (READ_ONCE(sk->sk_err))
2922 			break;
2923 		timeo = schedule_timeout(timeo);
2924 	}
2925 	finish_wait(sk_sleep(sk), &wait);
2926 	return timeo;
2927 }
2928 
2929 
2930 /*
2931  *	Generic send/receive buffer handlers
2932  */
2933 
2934 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2935 				     unsigned long data_len, int noblock,
2936 				     int *errcode, int max_page_order)
2937 {
2938 	struct sk_buff *skb;
2939 	long timeo;
2940 	int err;
2941 
2942 	timeo = sock_sndtimeo(sk, noblock);
2943 	for (;;) {
2944 		err = sock_error(sk);
2945 		if (err != 0)
2946 			goto failure;
2947 
2948 		err = -EPIPE;
2949 		if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
2950 			goto failure;
2951 
2952 		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2953 			break;
2954 
2955 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2956 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2957 		err = -EAGAIN;
2958 		if (!timeo)
2959 			goto failure;
2960 		if (signal_pending(current))
2961 			goto interrupted;
2962 		timeo = sock_wait_for_wmem(sk, timeo);
2963 	}
2964 	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2965 				   errcode, sk->sk_allocation);
2966 	if (skb)
2967 		skb_set_owner_w(skb, sk);
2968 	return skb;
2969 
2970 interrupted:
2971 	err = sock_intr_errno(timeo);
2972 failure:
2973 	*errcode = err;
2974 	return NULL;
2975 }
2976 EXPORT_SYMBOL(sock_alloc_send_pskb);
2977 
2978 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
2979 		     struct sockcm_cookie *sockc)
2980 {
2981 	u32 tsflags;
2982 
2983 	BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31));
2984 
2985 	switch (cmsg->cmsg_type) {
2986 	case SO_MARK:
2987 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
2988 		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2989 			return -EPERM;
2990 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2991 			return -EINVAL;
2992 		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2993 		break;
2994 	case SO_TIMESTAMPING_OLD:
2995 	case SO_TIMESTAMPING_NEW:
2996 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2997 			return -EINVAL;
2998 
2999 		tsflags = *(u32 *)CMSG_DATA(cmsg);
3000 		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
3001 			return -EINVAL;
3002 
3003 		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
3004 		sockc->tsflags |= tsflags;
3005 		break;
3006 	case SCM_TXTIME:
3007 		if (!sock_flag(sk, SOCK_TXTIME))
3008 			return -EINVAL;
3009 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
3010 			return -EINVAL;
3011 		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
3012 		break;
3013 	case SCM_TS_OPT_ID:
3014 		if (sk_is_tcp(sk))
3015 			return -EINVAL;
3016 		tsflags = READ_ONCE(sk->sk_tsflags);
3017 		if (!(tsflags & SOF_TIMESTAMPING_OPT_ID))
3018 			return -EINVAL;
3019 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
3020 			return -EINVAL;
3021 		sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg);
3022 		sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID;
3023 		break;
3024 	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
3025 	case SCM_RIGHTS:
3026 	case SCM_CREDENTIALS:
3027 		break;
3028 	case SO_PRIORITY:
3029 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
3030 			return -EINVAL;
3031 		if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg)))
3032 			return -EPERM;
3033 		sockc->priority = *(u32 *)CMSG_DATA(cmsg);
3034 		break;
3035 	default:
3036 		return -EINVAL;
3037 	}
3038 	return 0;
3039 }
3040 EXPORT_SYMBOL(__sock_cmsg_send);
3041 
3042 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
3043 		   struct sockcm_cookie *sockc)
3044 {
3045 	struct cmsghdr *cmsg;
3046 	int ret;
3047 
3048 	for_each_cmsghdr(cmsg, msg) {
3049 		if (!CMSG_OK(msg, cmsg))
3050 			return -EINVAL;
3051 		if (cmsg->cmsg_level != SOL_SOCKET)
3052 			continue;
3053 		ret = __sock_cmsg_send(sk, cmsg, sockc);
3054 		if (ret)
3055 			return ret;
3056 	}
3057 	return 0;
3058 }
3059 EXPORT_SYMBOL(sock_cmsg_send);
3060 
3061 static void sk_enter_memory_pressure(struct sock *sk)
3062 {
3063 	if (!sk->sk_prot->enter_memory_pressure)
3064 		return;
3065 
3066 	sk->sk_prot->enter_memory_pressure(sk);
3067 }
3068 
3069 static void sk_leave_memory_pressure(struct sock *sk)
3070 {
3071 	if (sk->sk_prot->leave_memory_pressure) {
3072 		INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
3073 				     tcp_leave_memory_pressure, sk);
3074 	} else {
3075 		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
3076 
3077 		if (memory_pressure && READ_ONCE(*memory_pressure))
3078 			WRITE_ONCE(*memory_pressure, 0);
3079 	}
3080 }
3081 
3082 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
3083 
3084 /**
3085  * skb_page_frag_refill - check that a page_frag contains enough room
3086  * @sz: minimum size of the fragment we want to get
3087  * @pfrag: pointer to page_frag
3088  * @gfp: priority for memory allocation
3089  *
3090  * Note: While this allocator tries to use high order pages, there is
3091  * no guarantee that allocations succeed. Therefore, @sz MUST be
3092  * less or equal than PAGE_SIZE.
3093  */
3094 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
3095 {
3096 	if (pfrag->page) {
3097 		if (page_ref_count(pfrag->page) == 1) {
3098 			pfrag->offset = 0;
3099 			return true;
3100 		}
3101 		if (pfrag->offset + sz <= pfrag->size)
3102 			return true;
3103 		put_page(pfrag->page);
3104 	}
3105 
3106 	pfrag->offset = 0;
3107 	if (SKB_FRAG_PAGE_ORDER &&
3108 	    !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
3109 		/* Avoid direct reclaim but allow kswapd to wake */
3110 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
3111 					  __GFP_COMP | __GFP_NOWARN |
3112 					  __GFP_NORETRY,
3113 					  SKB_FRAG_PAGE_ORDER);
3114 		if (likely(pfrag->page)) {
3115 			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
3116 			return true;
3117 		}
3118 	}
3119 	pfrag->page = alloc_page(gfp);
3120 	if (likely(pfrag->page)) {
3121 		pfrag->size = PAGE_SIZE;
3122 		return true;
3123 	}
3124 	return false;
3125 }
3126 EXPORT_SYMBOL(skb_page_frag_refill);
3127 
3128 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
3129 {
3130 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
3131 		return true;
3132 
3133 	sk_enter_memory_pressure(sk);
3134 	sk_stream_moderate_sndbuf(sk);
3135 	return false;
3136 }
3137 EXPORT_SYMBOL(sk_page_frag_refill);
3138 
3139 void __lock_sock(struct sock *sk)
3140 	__releases(&sk->sk_lock.slock)
3141 	__acquires(&sk->sk_lock.slock)
3142 {
3143 	DEFINE_WAIT(wait);
3144 
3145 	for (;;) {
3146 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
3147 					TASK_UNINTERRUPTIBLE);
3148 		spin_unlock_bh(&sk->sk_lock.slock);
3149 		schedule();
3150 		spin_lock_bh(&sk->sk_lock.slock);
3151 		if (!sock_owned_by_user(sk))
3152 			break;
3153 	}
3154 	finish_wait(&sk->sk_lock.wq, &wait);
3155 }
3156 
3157 void __release_sock(struct sock *sk)
3158 	__releases(&sk->sk_lock.slock)
3159 	__acquires(&sk->sk_lock.slock)
3160 {
3161 	struct sk_buff *skb, *next;
3162 
3163 	while ((skb = sk->sk_backlog.head) != NULL) {
3164 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
3165 
3166 		spin_unlock_bh(&sk->sk_lock.slock);
3167 
3168 		do {
3169 			next = skb->next;
3170 			prefetch(next);
3171 			DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
3172 			skb_mark_not_on_list(skb);
3173 			sk_backlog_rcv(sk, skb);
3174 
3175 			cond_resched();
3176 
3177 			skb = next;
3178 		} while (skb != NULL);
3179 
3180 		spin_lock_bh(&sk->sk_lock.slock);
3181 	}
3182 
3183 	/*
3184 	 * Doing the zeroing here guarantee we can not loop forever
3185 	 * while a wild producer attempts to flood us.
3186 	 */
3187 	sk->sk_backlog.len = 0;
3188 }
3189 
3190 void __sk_flush_backlog(struct sock *sk)
3191 {
3192 	spin_lock_bh(&sk->sk_lock.slock);
3193 	__release_sock(sk);
3194 
3195 	if (sk->sk_prot->release_cb)
3196 		INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
3197 				     tcp_release_cb, sk);
3198 
3199 	spin_unlock_bh(&sk->sk_lock.slock);
3200 }
3201 EXPORT_SYMBOL_GPL(__sk_flush_backlog);
3202 
3203 /**
3204  * sk_wait_data - wait for data to arrive at sk_receive_queue
3205  * @sk:    sock to wait on
3206  * @timeo: for how long
3207  * @skb:   last skb seen on sk_receive_queue
3208  *
3209  * Now socket state including sk->sk_err is changed only under lock,
3210  * hence we may omit checks after joining wait queue.
3211  * We check receive queue before schedule() only as optimization;
3212  * it is very likely that release_sock() added new data.
3213  */
3214 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
3215 {
3216 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
3217 	int rc;
3218 
3219 	add_wait_queue(sk_sleep(sk), &wait);
3220 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3221 	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
3222 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
3223 	remove_wait_queue(sk_sleep(sk), &wait);
3224 	return rc;
3225 }
3226 EXPORT_SYMBOL(sk_wait_data);
3227 
3228 /**
3229  *	__sk_mem_raise_allocated - increase memory_allocated
3230  *	@sk: socket
3231  *	@size: memory size to allocate
3232  *	@amt: pages to allocate
3233  *	@kind: allocation type
3234  *
3235  *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc.
3236  *
3237  *	Unlike the globally shared limits among the sockets under same protocol,
3238  *	consuming the budget of a memcg won't have direct effect on other ones.
3239  *	So be optimistic about memcg's tolerance, and leave the callers to decide
3240  *	whether or not to raise allocated through sk_under_memory_pressure() or
3241  *	its variants.
3242  */
3243 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
3244 {
3245 	struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
3246 	struct proto *prot = sk->sk_prot;
3247 	bool charged = false;
3248 	long allocated;
3249 
3250 	sk_memory_allocated_add(sk, amt);
3251 	allocated = sk_memory_allocated(sk);
3252 
3253 	if (memcg) {
3254 		if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
3255 			goto suppress_allocation;
3256 		charged = true;
3257 	}
3258 
3259 	/* Under limit. */
3260 	if (allocated <= sk_prot_mem_limits(sk, 0)) {
3261 		sk_leave_memory_pressure(sk);
3262 		return 1;
3263 	}
3264 
3265 	/* Under pressure. */
3266 	if (allocated > sk_prot_mem_limits(sk, 1))
3267 		sk_enter_memory_pressure(sk);
3268 
3269 	/* Over hard limit. */
3270 	if (allocated > sk_prot_mem_limits(sk, 2))
3271 		goto suppress_allocation;
3272 
3273 	/* Guarantee minimum buffer size under pressure (either global
3274 	 * or memcg) to make sure features described in RFC 7323 (TCP
3275 	 * Extensions for High Performance) work properly.
3276 	 *
3277 	 * This rule does NOT stand when exceeds global or memcg's hard
3278 	 * limit, or else a DoS attack can be taken place by spawning
3279 	 * lots of sockets whose usage are under minimum buffer size.
3280 	 */
3281 	if (kind == SK_MEM_RECV) {
3282 		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
3283 			return 1;
3284 
3285 	} else { /* SK_MEM_SEND */
3286 		int wmem0 = sk_get_wmem0(sk, prot);
3287 
3288 		if (sk->sk_type == SOCK_STREAM) {
3289 			if (sk->sk_wmem_queued < wmem0)
3290 				return 1;
3291 		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
3292 				return 1;
3293 		}
3294 	}
3295 
3296 	if (sk_has_memory_pressure(sk)) {
3297 		u64 alloc;
3298 
3299 		/* The following 'average' heuristic is within the
3300 		 * scope of global accounting, so it only makes
3301 		 * sense for global memory pressure.
3302 		 */
3303 		if (!sk_under_global_memory_pressure(sk))
3304 			return 1;
3305 
3306 		/* Try to be fair among all the sockets under global
3307 		 * pressure by allowing the ones that below average
3308 		 * usage to raise.
3309 		 */
3310 		alloc = sk_sockets_allocated_read_positive(sk);
3311 		if (sk_prot_mem_limits(sk, 2) > alloc *
3312 		    sk_mem_pages(sk->sk_wmem_queued +
3313 				 atomic_read(&sk->sk_rmem_alloc) +
3314 				 sk->sk_forward_alloc))
3315 			return 1;
3316 	}
3317 
3318 suppress_allocation:
3319 
3320 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
3321 		sk_stream_moderate_sndbuf(sk);
3322 
3323 		/* Fail only if socket is _under_ its sndbuf.
3324 		 * In this case we cannot block, so that we have to fail.
3325 		 */
3326 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
3327 			/* Force charge with __GFP_NOFAIL */
3328 			if (memcg && !charged) {
3329 				mem_cgroup_charge_skmem(memcg, amt,
3330 					gfp_memcg_charge() | __GFP_NOFAIL);
3331 			}
3332 			return 1;
3333 		}
3334 	}
3335 
3336 	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
3337 		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
3338 
3339 	sk_memory_allocated_sub(sk, amt);
3340 
3341 	if (charged)
3342 		mem_cgroup_uncharge_skmem(memcg, amt);
3343 
3344 	return 0;
3345 }
3346 
3347 /**
3348  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
3349  *	@sk: socket
3350  *	@size: memory size to allocate
3351  *	@kind: allocation type
3352  *
3353  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
3354  *	rmem allocation. This function assumes that protocols which have
3355  *	memory_pressure use sk_wmem_queued as write buffer accounting.
3356  */
3357 int __sk_mem_schedule(struct sock *sk, int size, int kind)
3358 {
3359 	int ret, amt = sk_mem_pages(size);
3360 
3361 	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3362 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
3363 	if (!ret)
3364 		sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
3365 	return ret;
3366 }
3367 EXPORT_SYMBOL(__sk_mem_schedule);
3368 
3369 /**
3370  *	__sk_mem_reduce_allocated - reclaim memory_allocated
3371  *	@sk: socket
3372  *	@amount: number of quanta
3373  *
3374  *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
3375  */
3376 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
3377 {
3378 	sk_memory_allocated_sub(sk, amount);
3379 
3380 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3381 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
3382 
3383 	if (sk_under_global_memory_pressure(sk) &&
3384 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
3385 		sk_leave_memory_pressure(sk);
3386 }
3387 
3388 /**
3389  *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
3390  *	@sk: socket
3391  *	@amount: number of bytes (rounded down to a PAGE_SIZE multiple)
3392  */
3393 void __sk_mem_reclaim(struct sock *sk, int amount)
3394 {
3395 	amount >>= PAGE_SHIFT;
3396 	sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
3397 	__sk_mem_reduce_allocated(sk, amount);
3398 }
3399 EXPORT_SYMBOL(__sk_mem_reclaim);
3400 
3401 int sk_set_peek_off(struct sock *sk, int val)
3402 {
3403 	WRITE_ONCE(sk->sk_peek_off, val);
3404 	return 0;
3405 }
3406 EXPORT_SYMBOL_GPL(sk_set_peek_off);
3407 
3408 /*
3409  * Set of default routines for initialising struct proto_ops when
3410  * the protocol does not support a particular function. In certain
3411  * cases where it makes no sense for a protocol to have a "do nothing"
3412  * function, some default processing is provided.
3413  */
3414 
3415 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
3416 {
3417 	return -EOPNOTSUPP;
3418 }
3419 EXPORT_SYMBOL(sock_no_bind);
3420 
3421 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
3422 		    int len, int flags)
3423 {
3424 	return -EOPNOTSUPP;
3425 }
3426 EXPORT_SYMBOL(sock_no_connect);
3427 
3428 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
3429 {
3430 	return -EOPNOTSUPP;
3431 }
3432 EXPORT_SYMBOL(sock_no_socketpair);
3433 
3434 int sock_no_accept(struct socket *sock, struct socket *newsock,
3435 		   struct proto_accept_arg *arg)
3436 {
3437 	return -EOPNOTSUPP;
3438 }
3439 EXPORT_SYMBOL(sock_no_accept);
3440 
3441 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
3442 		    int peer)
3443 {
3444 	return -EOPNOTSUPP;
3445 }
3446 EXPORT_SYMBOL(sock_no_getname);
3447 
3448 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3449 {
3450 	return -EOPNOTSUPP;
3451 }
3452 EXPORT_SYMBOL(sock_no_ioctl);
3453 
3454 int sock_no_listen(struct socket *sock, int backlog)
3455 {
3456 	return -EOPNOTSUPP;
3457 }
3458 EXPORT_SYMBOL(sock_no_listen);
3459 
3460 int sock_no_shutdown(struct socket *sock, int how)
3461 {
3462 	return -EOPNOTSUPP;
3463 }
3464 EXPORT_SYMBOL(sock_no_shutdown);
3465 
3466 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
3467 {
3468 	return -EOPNOTSUPP;
3469 }
3470 EXPORT_SYMBOL(sock_no_sendmsg);
3471 
3472 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
3473 {
3474 	return -EOPNOTSUPP;
3475 }
3476 EXPORT_SYMBOL(sock_no_sendmsg_locked);
3477 
3478 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
3479 		    int flags)
3480 {
3481 	return -EOPNOTSUPP;
3482 }
3483 EXPORT_SYMBOL(sock_no_recvmsg);
3484 
3485 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
3486 {
3487 	/* Mirror missing mmap method error code */
3488 	return -ENODEV;
3489 }
3490 EXPORT_SYMBOL(sock_no_mmap);
3491 
3492 /*
3493  * When a file is received (via SCM_RIGHTS, etc), we must bump the
3494  * various sock-based usage counts.
3495  */
3496 void __receive_sock(struct file *file)
3497 {
3498 	struct socket *sock;
3499 
3500 	sock = sock_from_file(file);
3501 	if (sock) {
3502 		sock_update_netprioidx(&sock->sk->sk_cgrp_data);
3503 		sock_update_classid(&sock->sk->sk_cgrp_data);
3504 	}
3505 }
3506 
3507 /*
3508  *	Default Socket Callbacks
3509  */
3510 
3511 static void sock_def_wakeup(struct sock *sk)
3512 {
3513 	struct socket_wq *wq;
3514 
3515 	rcu_read_lock();
3516 	wq = rcu_dereference(sk->sk_wq);
3517 	if (skwq_has_sleeper(wq))
3518 		wake_up_interruptible_all(&wq->wait);
3519 	rcu_read_unlock();
3520 }
3521 
3522 static void sock_def_error_report(struct sock *sk)
3523 {
3524 	struct socket_wq *wq;
3525 
3526 	rcu_read_lock();
3527 	wq = rcu_dereference(sk->sk_wq);
3528 	if (skwq_has_sleeper(wq))
3529 		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
3530 	sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR);
3531 	rcu_read_unlock();
3532 }
3533 
3534 void sock_def_readable(struct sock *sk)
3535 {
3536 	struct socket_wq *wq;
3537 
3538 	trace_sk_data_ready(sk);
3539 
3540 	rcu_read_lock();
3541 	wq = rcu_dereference(sk->sk_wq);
3542 	if (skwq_has_sleeper(wq))
3543 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
3544 						EPOLLRDNORM | EPOLLRDBAND);
3545 	sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
3546 	rcu_read_unlock();
3547 }
3548 
3549 static void sock_def_write_space(struct sock *sk)
3550 {
3551 	struct socket_wq *wq;
3552 
3553 	rcu_read_lock();
3554 
3555 	/* Do not wake up a writer until he can make "significant"
3556 	 * progress.  --DaveM
3557 	 */
3558 	if (sock_writeable(sk)) {
3559 		wq = rcu_dereference(sk->sk_wq);
3560 		if (skwq_has_sleeper(wq))
3561 			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3562 						EPOLLWRNORM | EPOLLWRBAND);
3563 
3564 		/* Should agree with poll, otherwise some programs break */
3565 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
3566 	}
3567 
3568 	rcu_read_unlock();
3569 }
3570 
3571 /* An optimised version of sock_def_write_space(), should only be called
3572  * for SOCK_RCU_FREE sockets under RCU read section and after putting
3573  * ->sk_wmem_alloc.
3574  */
3575 static void sock_def_write_space_wfree(struct sock *sk)
3576 {
3577 	/* Do not wake up a writer until he can make "significant"
3578 	 * progress.  --DaveM
3579 	 */
3580 	if (sock_writeable(sk)) {
3581 		struct socket_wq *wq = rcu_dereference(sk->sk_wq);
3582 
3583 		/* rely on refcount_sub from sock_wfree() */
3584 		smp_mb__after_atomic();
3585 		if (wq && waitqueue_active(&wq->wait))
3586 			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3587 						EPOLLWRNORM | EPOLLWRBAND);
3588 
3589 		/* Should agree with poll, otherwise some programs break */
3590 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
3591 	}
3592 }
3593 
3594 static void sock_def_destruct(struct sock *sk)
3595 {
3596 }
3597 
3598 void sk_send_sigurg(struct sock *sk)
3599 {
3600 	if (sk->sk_socket && sk->sk_socket->file)
3601 		if (send_sigurg(sk->sk_socket->file))
3602 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3603 }
3604 EXPORT_SYMBOL(sk_send_sigurg);
3605 
3606 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3607 		    unsigned long expires)
3608 {
3609 	if (!mod_timer(timer, expires))
3610 		sock_hold(sk);
3611 }
3612 EXPORT_SYMBOL(sk_reset_timer);
3613 
3614 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3615 {
3616 	if (timer_delete(timer))
3617 		__sock_put(sk);
3618 }
3619 EXPORT_SYMBOL(sk_stop_timer);
3620 
3621 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3622 {
3623 	if (timer_delete_sync(timer))
3624 		__sock_put(sk);
3625 }
3626 EXPORT_SYMBOL(sk_stop_timer_sync);
3627 
3628 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
3629 {
3630 	sk_init_common(sk);
3631 	sk->sk_send_head	=	NULL;
3632 
3633 	timer_setup(&sk->sk_timer, NULL, 0);
3634 
3635 	sk->sk_allocation	=	GFP_KERNEL;
3636 	sk->sk_rcvbuf		=	READ_ONCE(sysctl_rmem_default);
3637 	sk->sk_sndbuf		=	READ_ONCE(sysctl_wmem_default);
3638 	sk->sk_state		=	TCP_CLOSE;
3639 	sk->sk_use_task_frag	=	true;
3640 	sk_set_socket(sk, sock);
3641 
3642 	sock_set_flag(sk, SOCK_ZAPPED);
3643 
3644 	if (sock) {
3645 		sk->sk_type	=	sock->type;
3646 		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3647 		sock->sk	=	sk;
3648 	} else {
3649 		RCU_INIT_POINTER(sk->sk_wq, NULL);
3650 	}
3651 	sk->sk_uid	=	uid;
3652 
3653 	sk->sk_state_change	=	sock_def_wakeup;
3654 	sk->sk_data_ready	=	sock_def_readable;
3655 	sk->sk_write_space	=	sock_def_write_space;
3656 	sk->sk_error_report	=	sock_def_error_report;
3657 	sk->sk_destruct		=	sock_def_destruct;
3658 
3659 	sk->sk_frag.page	=	NULL;
3660 	sk->sk_frag.offset	=	0;
3661 	sk->sk_peek_off		=	-1;
3662 
3663 	sk->sk_peer_pid 	=	NULL;
3664 	sk->sk_peer_cred	=	NULL;
3665 	spin_lock_init(&sk->sk_peer_lock);
3666 
3667 	sk->sk_write_pending	=	0;
3668 	sk->sk_rcvlowat		=	1;
3669 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
3670 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
3671 
3672 	sk->sk_stamp = SK_DEFAULT_STAMP;
3673 #if BITS_PER_LONG==32
3674 	seqlock_init(&sk->sk_stamp_seq);
3675 #endif
3676 	atomic_set(&sk->sk_zckey, 0);
3677 
3678 #ifdef CONFIG_NET_RX_BUSY_POLL
3679 	sk->sk_napi_id		=	0;
3680 	sk->sk_ll_usec		=	READ_ONCE(sysctl_net_busy_read);
3681 #endif
3682 
3683 	sk->sk_max_pacing_rate = ~0UL;
3684 	sk->sk_pacing_rate = ~0UL;
3685 	WRITE_ONCE(sk->sk_pacing_shift, 10);
3686 	sk->sk_incoming_cpu = -1;
3687 
3688 	sk_rx_queue_clear(sk);
3689 	/*
3690 	 * Before updating sk_refcnt, we must commit prior changes to memory
3691 	 * (Documentation/RCU/rculist_nulls.rst for details)
3692 	 */
3693 	smp_wmb();
3694 	refcount_set(&sk->sk_refcnt, 1);
3695 	atomic_set(&sk->sk_drops, 0);
3696 }
3697 EXPORT_SYMBOL(sock_init_data_uid);
3698 
3699 void sock_init_data(struct socket *sock, struct sock *sk)
3700 {
3701 	kuid_t uid = sock ?
3702 		SOCK_INODE(sock)->i_uid :
3703 		make_kuid(sock_net(sk)->user_ns, 0);
3704 
3705 	sock_init_data_uid(sock, sk, uid);
3706 }
3707 EXPORT_SYMBOL(sock_init_data);
3708 
3709 void lock_sock_nested(struct sock *sk, int subclass)
3710 {
3711 	/* The sk_lock has mutex_lock() semantics here. */
3712 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3713 
3714 	might_sleep();
3715 	spin_lock_bh(&sk->sk_lock.slock);
3716 	if (sock_owned_by_user_nocheck(sk))
3717 		__lock_sock(sk);
3718 	sk->sk_lock.owned = 1;
3719 	spin_unlock_bh(&sk->sk_lock.slock);
3720 }
3721 EXPORT_SYMBOL(lock_sock_nested);
3722 
3723 void release_sock(struct sock *sk)
3724 {
3725 	spin_lock_bh(&sk->sk_lock.slock);
3726 	if (sk->sk_backlog.tail)
3727 		__release_sock(sk);
3728 
3729 	if (sk->sk_prot->release_cb)
3730 		INDIRECT_CALL_INET_1(sk->sk_prot->release_cb,
3731 				     tcp_release_cb, sk);
3732 
3733 	sock_release_ownership(sk);
3734 	if (waitqueue_active(&sk->sk_lock.wq))
3735 		wake_up(&sk->sk_lock.wq);
3736 	spin_unlock_bh(&sk->sk_lock.slock);
3737 }
3738 EXPORT_SYMBOL(release_sock);
3739 
3740 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
3741 {
3742 	might_sleep();
3743 	spin_lock_bh(&sk->sk_lock.slock);
3744 
3745 	if (!sock_owned_by_user_nocheck(sk)) {
3746 		/*
3747 		 * Fast path return with bottom halves disabled and
3748 		 * sock::sk_lock.slock held.
3749 		 *
3750 		 * The 'mutex' is not contended and holding
3751 		 * sock::sk_lock.slock prevents all other lockers to
3752 		 * proceed so the corresponding unlock_sock_fast() can
3753 		 * avoid the slow path of release_sock() completely and
3754 		 * just release slock.
3755 		 *
3756 		 * From a semantical POV this is equivalent to 'acquiring'
3757 		 * the 'mutex', hence the corresponding lockdep
3758 		 * mutex_release() has to happen in the fast path of
3759 		 * unlock_sock_fast().
3760 		 */
3761 		return false;
3762 	}
3763 
3764 	__lock_sock(sk);
3765 	sk->sk_lock.owned = 1;
3766 	__acquire(&sk->sk_lock.slock);
3767 	spin_unlock_bh(&sk->sk_lock.slock);
3768 	return true;
3769 }
3770 EXPORT_SYMBOL(__lock_sock_fast);
3771 
3772 int sock_gettstamp(struct socket *sock, void __user *userstamp,
3773 		   bool timeval, bool time32)
3774 {
3775 	struct sock *sk = sock->sk;
3776 	struct timespec64 ts;
3777 
3778 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3779 	ts = ktime_to_timespec64(sock_read_timestamp(sk));
3780 	if (ts.tv_sec == -1)
3781 		return -ENOENT;
3782 	if (ts.tv_sec == 0) {
3783 		ktime_t kt = ktime_get_real();
3784 		sock_write_timestamp(sk, kt);
3785 		ts = ktime_to_timespec64(kt);
3786 	}
3787 
3788 	if (timeval)
3789 		ts.tv_nsec /= 1000;
3790 
3791 #ifdef CONFIG_COMPAT_32BIT_TIME
3792 	if (time32)
3793 		return put_old_timespec32(&ts, userstamp);
3794 #endif
3795 #ifdef CONFIG_SPARC64
3796 	/* beware of padding in sparc64 timeval */
3797 	if (timeval && !in_compat_syscall()) {
3798 		struct __kernel_old_timeval __user tv = {
3799 			.tv_sec = ts.tv_sec,
3800 			.tv_usec = ts.tv_nsec,
3801 		};
3802 		if (copy_to_user(userstamp, &tv, sizeof(tv)))
3803 			return -EFAULT;
3804 		return 0;
3805 	}
3806 #endif
3807 	return put_timespec64(&ts, userstamp);
3808 }
3809 EXPORT_SYMBOL(sock_gettstamp);
3810 
3811 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3812 {
3813 	if (!sock_flag(sk, flag)) {
3814 		unsigned long previous_flags = sk->sk_flags;
3815 
3816 		sock_set_flag(sk, flag);
3817 		/*
3818 		 * we just set one of the two flags which require net
3819 		 * time stamping, but time stamping might have been on
3820 		 * already because of the other one
3821 		 */
3822 		if (sock_needs_netstamp(sk) &&
3823 		    !(previous_flags & SK_FLAGS_TIMESTAMP))
3824 			net_enable_timestamp();
3825 	}
3826 }
3827 
3828 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3829 		       int level, int type)
3830 {
3831 	struct sock_exterr_skb *serr;
3832 	struct sk_buff *skb;
3833 	int copied, err;
3834 
3835 	err = -EAGAIN;
3836 	skb = sock_dequeue_err_skb(sk);
3837 	if (skb == NULL)
3838 		goto out;
3839 
3840 	copied = skb->len;
3841 	if (copied > len) {
3842 		msg->msg_flags |= MSG_TRUNC;
3843 		copied = len;
3844 	}
3845 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3846 	if (err)
3847 		goto out_free_skb;
3848 
3849 	sock_recv_timestamp(msg, sk, skb);
3850 
3851 	serr = SKB_EXT_ERR(skb);
3852 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3853 
3854 	msg->msg_flags |= MSG_ERRQUEUE;
3855 	err = copied;
3856 
3857 out_free_skb:
3858 	kfree_skb(skb);
3859 out:
3860 	return err;
3861 }
3862 EXPORT_SYMBOL(sock_recv_errqueue);
3863 
3864 /*
3865  *	Get a socket option on an socket.
3866  *
3867  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
3868  *	asynchronous errors should be reported by getsockopt. We assume
3869  *	this means if you specify SO_ERROR (otherwise what is the point of it).
3870  */
3871 int sock_common_getsockopt(struct socket *sock, int level, int optname,
3872 			   char __user *optval, int __user *optlen)
3873 {
3874 	struct sock *sk = sock->sk;
3875 
3876 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
3877 	return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen);
3878 }
3879 EXPORT_SYMBOL(sock_common_getsockopt);
3880 
3881 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3882 			int flags)
3883 {
3884 	struct sock *sk = sock->sk;
3885 	int addr_len = 0;
3886 	int err;
3887 
3888 	err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len);
3889 	if (err >= 0)
3890 		msg->msg_namelen = addr_len;
3891 	return err;
3892 }
3893 EXPORT_SYMBOL(sock_common_recvmsg);
3894 
3895 /*
3896  *	Set socket options on an inet socket.
3897  */
3898 int sock_common_setsockopt(struct socket *sock, int level, int optname,
3899 			   sockptr_t optval, unsigned int optlen)
3900 {
3901 	struct sock *sk = sock->sk;
3902 
3903 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
3904 	return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen);
3905 }
3906 EXPORT_SYMBOL(sock_common_setsockopt);
3907 
3908 void sk_common_release(struct sock *sk)
3909 {
3910 	if (sk->sk_prot->destroy)
3911 		sk->sk_prot->destroy(sk);
3912 
3913 	/*
3914 	 * Observation: when sk_common_release is called, processes have
3915 	 * no access to socket. But net still has.
3916 	 * Step one, detach it from networking:
3917 	 *
3918 	 * A. Remove from hash tables.
3919 	 */
3920 
3921 	sk->sk_prot->unhash(sk);
3922 
3923 	/*
3924 	 * In this point socket cannot receive new packets, but it is possible
3925 	 * that some packets are in flight because some CPU runs receiver and
3926 	 * did hash table lookup before we unhashed socket. They will achieve
3927 	 * receive queue and will be purged by socket destructor.
3928 	 *
3929 	 * Also we still have packets pending on receive queue and probably,
3930 	 * our own packets waiting in device queues. sock_destroy will drain
3931 	 * receive queue, but transmitted packets will delay socket destruction
3932 	 * until the last reference will be released.
3933 	 */
3934 
3935 	sock_orphan(sk);
3936 
3937 	xfrm_sk_free_policy(sk);
3938 
3939 	sock_put(sk);
3940 }
3941 EXPORT_SYMBOL(sk_common_release);
3942 
3943 void sk_get_meminfo(const struct sock *sk, u32 *mem)
3944 {
3945 	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3946 
3947 	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3948 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3949 	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3950 	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3951 	mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc);
3952 	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3953 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3954 	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3955 	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3956 }
3957 
3958 #ifdef CONFIG_PROC_FS
3959 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3960 
3961 int sock_prot_inuse_get(struct net *net, struct proto *prot)
3962 {
3963 	int cpu, idx = prot->inuse_idx;
3964 	int res = 0;
3965 
3966 	for_each_possible_cpu(cpu)
3967 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3968 
3969 	return res >= 0 ? res : 0;
3970 }
3971 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3972 
3973 int sock_inuse_get(struct net *net)
3974 {
3975 	int cpu, res = 0;
3976 
3977 	for_each_possible_cpu(cpu)
3978 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
3979 
3980 	return res;
3981 }
3982 
3983 EXPORT_SYMBOL_GPL(sock_inuse_get);
3984 
3985 static int __net_init sock_inuse_init_net(struct net *net)
3986 {
3987 	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3988 	if (net->core.prot_inuse == NULL)
3989 		return -ENOMEM;
3990 	return 0;
3991 }
3992 
3993 static void __net_exit sock_inuse_exit_net(struct net *net)
3994 {
3995 	free_percpu(net->core.prot_inuse);
3996 }
3997 
3998 static struct pernet_operations net_inuse_ops = {
3999 	.init = sock_inuse_init_net,
4000 	.exit = sock_inuse_exit_net,
4001 };
4002 
4003 static __init int net_inuse_init(void)
4004 {
4005 	if (register_pernet_subsys(&net_inuse_ops))
4006 		panic("Cannot initialize net inuse counters");
4007 
4008 	return 0;
4009 }
4010 
4011 core_initcall(net_inuse_init);
4012 
4013 static int assign_proto_idx(struct proto *prot)
4014 {
4015 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
4016 
4017 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
4018 		pr_err("PROTO_INUSE_NR exhausted\n");
4019 		return -ENOSPC;
4020 	}
4021 
4022 	set_bit(prot->inuse_idx, proto_inuse_idx);
4023 	return 0;
4024 }
4025 
4026 static void release_proto_idx(struct proto *prot)
4027 {
4028 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
4029 		clear_bit(prot->inuse_idx, proto_inuse_idx);
4030 }
4031 #else
4032 static inline int assign_proto_idx(struct proto *prot)
4033 {
4034 	return 0;
4035 }
4036 
4037 static inline void release_proto_idx(struct proto *prot)
4038 {
4039 }
4040 
4041 #endif
4042 
4043 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
4044 {
4045 	if (!twsk_prot)
4046 		return;
4047 	kfree(twsk_prot->twsk_slab_name);
4048 	twsk_prot->twsk_slab_name = NULL;
4049 	kmem_cache_destroy(twsk_prot->twsk_slab);
4050 	twsk_prot->twsk_slab = NULL;
4051 }
4052 
4053 static int tw_prot_init(const struct proto *prot)
4054 {
4055 	struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
4056 
4057 	if (!twsk_prot)
4058 		return 0;
4059 
4060 	twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
4061 					      prot->name);
4062 	if (!twsk_prot->twsk_slab_name)
4063 		return -ENOMEM;
4064 
4065 	twsk_prot->twsk_slab =
4066 		kmem_cache_create(twsk_prot->twsk_slab_name,
4067 				  twsk_prot->twsk_obj_size, 0,
4068 				  SLAB_ACCOUNT | prot->slab_flags,
4069 				  NULL);
4070 	if (!twsk_prot->twsk_slab) {
4071 		pr_crit("%s: Can't create timewait sock SLAB cache!\n",
4072 			prot->name);
4073 		return -ENOMEM;
4074 	}
4075 
4076 	return 0;
4077 }
4078 
4079 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
4080 {
4081 	if (!rsk_prot)
4082 		return;
4083 	kfree(rsk_prot->slab_name);
4084 	rsk_prot->slab_name = NULL;
4085 	kmem_cache_destroy(rsk_prot->slab);
4086 	rsk_prot->slab = NULL;
4087 }
4088 
4089 static int req_prot_init(const struct proto *prot)
4090 {
4091 	struct request_sock_ops *rsk_prot = prot->rsk_prot;
4092 
4093 	if (!rsk_prot)
4094 		return 0;
4095 
4096 	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
4097 					prot->name);
4098 	if (!rsk_prot->slab_name)
4099 		return -ENOMEM;
4100 
4101 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
4102 					   rsk_prot->obj_size, 0,
4103 					   SLAB_ACCOUNT | prot->slab_flags,
4104 					   NULL);
4105 
4106 	if (!rsk_prot->slab) {
4107 		pr_crit("%s: Can't create request sock SLAB cache!\n",
4108 			prot->name);
4109 		return -ENOMEM;
4110 	}
4111 	return 0;
4112 }
4113 
4114 int proto_register(struct proto *prot, int alloc_slab)
4115 {
4116 	int ret = -ENOBUFS;
4117 
4118 	if (prot->memory_allocated && !prot->sysctl_mem) {
4119 		pr_err("%s: missing sysctl_mem\n", prot->name);
4120 		return -EINVAL;
4121 	}
4122 	if (prot->memory_allocated && !prot->per_cpu_fw_alloc) {
4123 		pr_err("%s: missing per_cpu_fw_alloc\n", prot->name);
4124 		return -EINVAL;
4125 	}
4126 	if (alloc_slab) {
4127 		prot->slab = kmem_cache_create_usercopy(prot->name,
4128 					prot->obj_size, 0,
4129 					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
4130 					prot->slab_flags,
4131 					prot->useroffset, prot->usersize,
4132 					NULL);
4133 
4134 		if (prot->slab == NULL) {
4135 			pr_crit("%s: Can't create sock SLAB cache!\n",
4136 				prot->name);
4137 			goto out;
4138 		}
4139 
4140 		if (req_prot_init(prot))
4141 			goto out_free_request_sock_slab;
4142 
4143 		if (tw_prot_init(prot))
4144 			goto out_free_timewait_sock_slab;
4145 	}
4146 
4147 	mutex_lock(&proto_list_mutex);
4148 	ret = assign_proto_idx(prot);
4149 	if (ret) {
4150 		mutex_unlock(&proto_list_mutex);
4151 		goto out_free_timewait_sock_slab;
4152 	}
4153 	list_add(&prot->node, &proto_list);
4154 	mutex_unlock(&proto_list_mutex);
4155 	return ret;
4156 
4157 out_free_timewait_sock_slab:
4158 	if (alloc_slab)
4159 		tw_prot_cleanup(prot->twsk_prot);
4160 out_free_request_sock_slab:
4161 	if (alloc_slab) {
4162 		req_prot_cleanup(prot->rsk_prot);
4163 
4164 		kmem_cache_destroy(prot->slab);
4165 		prot->slab = NULL;
4166 	}
4167 out:
4168 	return ret;
4169 }
4170 EXPORT_SYMBOL(proto_register);
4171 
4172 void proto_unregister(struct proto *prot)
4173 {
4174 	mutex_lock(&proto_list_mutex);
4175 	release_proto_idx(prot);
4176 	list_del(&prot->node);
4177 	mutex_unlock(&proto_list_mutex);
4178 
4179 	kmem_cache_destroy(prot->slab);
4180 	prot->slab = NULL;
4181 
4182 	req_prot_cleanup(prot->rsk_prot);
4183 	tw_prot_cleanup(prot->twsk_prot);
4184 }
4185 EXPORT_SYMBOL(proto_unregister);
4186 
4187 int sock_load_diag_module(int family, int protocol)
4188 {
4189 	if (!protocol) {
4190 		if (!sock_is_registered(family))
4191 			return -ENOENT;
4192 
4193 		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
4194 				      NETLINK_SOCK_DIAG, family);
4195 	}
4196 
4197 #ifdef CONFIG_INET
4198 	if (family == AF_INET &&
4199 	    protocol != IPPROTO_RAW &&
4200 	    protocol < MAX_INET_PROTOS &&
4201 	    !rcu_access_pointer(inet_protos[protocol]))
4202 		return -ENOENT;
4203 #endif
4204 
4205 	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
4206 			      NETLINK_SOCK_DIAG, family, protocol);
4207 }
4208 EXPORT_SYMBOL(sock_load_diag_module);
4209 
4210 #ifdef CONFIG_PROC_FS
4211 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
4212 	__acquires(proto_list_mutex)
4213 {
4214 	mutex_lock(&proto_list_mutex);
4215 	return seq_list_start_head(&proto_list, *pos);
4216 }
4217 
4218 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4219 {
4220 	return seq_list_next(v, &proto_list, pos);
4221 }
4222 
4223 static void proto_seq_stop(struct seq_file *seq, void *v)
4224 	__releases(proto_list_mutex)
4225 {
4226 	mutex_unlock(&proto_list_mutex);
4227 }
4228 
4229 static char proto_method_implemented(const void *method)
4230 {
4231 	return method == NULL ? 'n' : 'y';
4232 }
4233 static long sock_prot_memory_allocated(struct proto *proto)
4234 {
4235 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
4236 }
4237 
4238 static const char *sock_prot_memory_pressure(struct proto *proto)
4239 {
4240 	return proto->memory_pressure != NULL ?
4241 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
4242 }
4243 
4244 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
4245 {
4246 
4247 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
4248 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
4249 		   proto->name,
4250 		   proto->obj_size,
4251 		   sock_prot_inuse_get(seq_file_net(seq), proto),
4252 		   sock_prot_memory_allocated(proto),
4253 		   sock_prot_memory_pressure(proto),
4254 		   proto->max_header,
4255 		   proto->slab == NULL ? "no" : "yes",
4256 		   module_name(proto->owner),
4257 		   proto_method_implemented(proto->close),
4258 		   proto_method_implemented(proto->connect),
4259 		   proto_method_implemented(proto->disconnect),
4260 		   proto_method_implemented(proto->accept),
4261 		   proto_method_implemented(proto->ioctl),
4262 		   proto_method_implemented(proto->init),
4263 		   proto_method_implemented(proto->destroy),
4264 		   proto_method_implemented(proto->shutdown),
4265 		   proto_method_implemented(proto->setsockopt),
4266 		   proto_method_implemented(proto->getsockopt),
4267 		   proto_method_implemented(proto->sendmsg),
4268 		   proto_method_implemented(proto->recvmsg),
4269 		   proto_method_implemented(proto->bind),
4270 		   proto_method_implemented(proto->backlog_rcv),
4271 		   proto_method_implemented(proto->hash),
4272 		   proto_method_implemented(proto->unhash),
4273 		   proto_method_implemented(proto->get_port),
4274 		   proto_method_implemented(proto->enter_memory_pressure));
4275 }
4276 
4277 static int proto_seq_show(struct seq_file *seq, void *v)
4278 {
4279 	if (v == &proto_list)
4280 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
4281 			   "protocol",
4282 			   "size",
4283 			   "sockets",
4284 			   "memory",
4285 			   "press",
4286 			   "maxhdr",
4287 			   "slab",
4288 			   "module",
4289 			   "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n");
4290 	else
4291 		proto_seq_printf(seq, list_entry(v, struct proto, node));
4292 	return 0;
4293 }
4294 
4295 static const struct seq_operations proto_seq_ops = {
4296 	.start  = proto_seq_start,
4297 	.next   = proto_seq_next,
4298 	.stop   = proto_seq_stop,
4299 	.show   = proto_seq_show,
4300 };
4301 
4302 static __net_init int proto_init_net(struct net *net)
4303 {
4304 	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
4305 			sizeof(struct seq_net_private)))
4306 		return -ENOMEM;
4307 
4308 	return 0;
4309 }
4310 
4311 static __net_exit void proto_exit_net(struct net *net)
4312 {
4313 	remove_proc_entry("protocols", net->proc_net);
4314 }
4315 
4316 
4317 static __net_initdata struct pernet_operations proto_net_ops = {
4318 	.init = proto_init_net,
4319 	.exit = proto_exit_net,
4320 };
4321 
4322 static int __init proto_init(void)
4323 {
4324 	return register_pernet_subsys(&proto_net_ops);
4325 }
4326 
4327 subsys_initcall(proto_init);
4328 
4329 #endif /* PROC_FS */
4330 
4331 #ifdef CONFIG_NET_RX_BUSY_POLL
4332 bool sk_busy_loop_end(void *p, unsigned long start_time)
4333 {
4334 	struct sock *sk = p;
4335 
4336 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
4337 		return true;
4338 
4339 	if (sk_is_udp(sk) &&
4340 	    !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
4341 		return true;
4342 
4343 	return sk_busy_loop_timeout(sk, start_time);
4344 }
4345 EXPORT_SYMBOL(sk_busy_loop_end);
4346 #endif /* CONFIG_NET_RX_BUSY_POLL */
4347 
4348 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
4349 {
4350 	if (!sk->sk_prot->bind_add)
4351 		return -EOPNOTSUPP;
4352 	return sk->sk_prot->bind_add(sk, addr, addr_len);
4353 }
4354 EXPORT_SYMBOL(sock_bind_add);
4355 
4356 /* Copy 'size' bytes from userspace and return `size` back to userspace */
4357 int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
4358 		     void __user *arg, void *karg, size_t size)
4359 {
4360 	int ret;
4361 
4362 	if (copy_from_user(karg, arg, size))
4363 		return -EFAULT;
4364 
4365 	ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg);
4366 	if (ret)
4367 		return ret;
4368 
4369 	if (copy_to_user(arg, karg, size))
4370 		return -EFAULT;
4371 
4372 	return 0;
4373 }
4374 EXPORT_SYMBOL(sock_ioctl_inout);
4375 
4376 /* This is the most common ioctl prep function, where the result (4 bytes) is
4377  * copied back to userspace if the ioctl() returns successfully. No input is
4378  * copied from userspace as input argument.
4379  */
4380 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg)
4381 {
4382 	int ret, karg = 0;
4383 
4384 	ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg);
4385 	if (ret)
4386 		return ret;
4387 
4388 	return put_user(karg, (int __user *)arg);
4389 }
4390 
4391 /* A wrapper around sock ioctls, which copies the data from userspace
4392  * (depending on the protocol/ioctl), and copies back the result to userspace.
4393  * The main motivation for this function is to pass kernel memory to the
4394  * protocol ioctl callbacks, instead of userspace memory.
4395  */
4396 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
4397 {
4398 	int rc = 1;
4399 
4400 	if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET)
4401 		rc = ipmr_sk_ioctl(sk, cmd, arg);
4402 	else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6)
4403 		rc = ip6mr_sk_ioctl(sk, cmd, arg);
4404 	else if (sk_is_phonet(sk))
4405 		rc = phonet_sk_ioctl(sk, cmd, arg);
4406 
4407 	/* If ioctl was processed, returns its value */
4408 	if (rc <= 0)
4409 		return rc;
4410 
4411 	/* Otherwise call the default handler */
4412 	return sock_ioctl_out(sk, cmd, arg);
4413 }
4414 EXPORT_SYMBOL(sk_ioctl);
4415 
4416 static int __init sock_struct_check(void)
4417 {
4418 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
4419 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
4420 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
4421 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
4422 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
4423 
4424 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
4425 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
4426 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
4427 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
4428 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
4429 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
4430 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
4431 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
4432 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
4433 
4434 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
4435 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
4436 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
4437 
4438 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
4439 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
4440 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
4441 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
4442 
4443 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
4444 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
4445 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
4446 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
4447 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
4448 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
4449 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
4450 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
4451 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
4452 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
4453 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
4454 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
4455 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
4456 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
4457 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
4458 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
4459 
4460 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
4461 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
4462 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
4463 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
4464 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
4465 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
4466 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
4467 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
4468 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
4469 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
4470 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
4471 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
4472 	CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
4473 	return 0;
4474 }
4475 
4476 core_initcall(sock_struct_check);
4477