xref: /linux/net/core/sock.c (revision a997157e42e3119b13c644549a3d8381a1d825d6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Generic socket support routines. Memory allocators, socket lock/release
8  *		handler for protocols to use and generic option handler.
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  */
85 
86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
87 
88 #include <asm/unaligned.h>
89 #include <linux/capability.h>
90 #include <linux/errno.h>
91 #include <linux/errqueue.h>
92 #include <linux/types.h>
93 #include <linux/socket.h>
94 #include <linux/in.h>
95 #include <linux/kernel.h>
96 #include <linux/module.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/sched.h>
100 #include <linux/sched/mm.h>
101 #include <linux/timer.h>
102 #include <linux/string.h>
103 #include <linux/sockios.h>
104 #include <linux/net.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/tcp.h>
110 #include <linux/init.h>
111 #include <linux/highmem.h>
112 #include <linux/user_namespace.h>
113 #include <linux/static_key.h>
114 #include <linux/memcontrol.h>
115 #include <linux/prefetch.h>
116 #include <linux/compat.h>
117 
118 #include <linux/uaccess.h>
119 
120 #include <linux/netdevice.h>
121 #include <net/protocol.h>
122 #include <linux/skbuff.h>
123 #include <net/net_namespace.h>
124 #include <net/request_sock.h>
125 #include <net/sock.h>
126 #include <linux/net_tstamp.h>
127 #include <net/xfrm.h>
128 #include <linux/ipsec.h>
129 #include <net/cls_cgroup.h>
130 #include <net/netprio_cgroup.h>
131 #include <linux/sock_diag.h>
132 
133 #include <linux/filter.h>
134 #include <net/sock_reuseport.h>
135 #include <net/bpf_sk_storage.h>
136 
137 #include <trace/events/sock.h>
138 
139 #include <net/tcp.h>
140 #include <net/busy_poll.h>
141 
142 #include <linux/ethtool.h>
143 
144 #include "dev.h"
145 
146 static DEFINE_MUTEX(proto_list_mutex);
147 static LIST_HEAD(proto_list);
148 
149 /**
150  * sk_ns_capable - General socket capability test
151  * @sk: Socket to use a capability on or through
152  * @user_ns: The user namespace of the capability to use
153  * @cap: The capability to use
154  *
155  * Test to see if the opener of the socket had when the socket was
156  * created and the current process has the capability @cap in the user
157  * namespace @user_ns.
158  */
159 bool sk_ns_capable(const struct sock *sk,
160 		   struct user_namespace *user_ns, int cap)
161 {
162 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
163 		ns_capable(user_ns, cap);
164 }
165 EXPORT_SYMBOL(sk_ns_capable);
166 
167 /**
168  * sk_capable - Socket global capability test
169  * @sk: Socket to use a capability on or through
170  * @cap: The global capability to use
171  *
172  * Test to see if the opener of the socket had when the socket was
173  * created and the current process has the capability @cap in all user
174  * namespaces.
175  */
176 bool sk_capable(const struct sock *sk, int cap)
177 {
178 	return sk_ns_capable(sk, &init_user_ns, cap);
179 }
180 EXPORT_SYMBOL(sk_capable);
181 
182 /**
183  * sk_net_capable - Network namespace socket capability test
184  * @sk: Socket to use a capability on or through
185  * @cap: The capability to use
186  *
187  * Test to see if the opener of the socket had when the socket was created
188  * and the current process has the capability @cap over the network namespace
189  * the socket is a member of.
190  */
191 bool sk_net_capable(const struct sock *sk, int cap)
192 {
193 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
194 }
195 EXPORT_SYMBOL(sk_net_capable);
196 
197 /*
198  * Each address family might have different locking rules, so we have
199  * one slock key per address family and separate keys for internal and
200  * userspace sockets.
201  */
202 static struct lock_class_key af_family_keys[AF_MAX];
203 static struct lock_class_key af_family_kern_keys[AF_MAX];
204 static struct lock_class_key af_family_slock_keys[AF_MAX];
205 static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
206 
207 /*
208  * Make lock validator output more readable. (we pre-construct these
209  * strings build-time, so that runtime initialization of socket
210  * locks is fast):
211  */
212 
213 #define _sock_locks(x)						  \
214   x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
215   x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
216   x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
217   x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
218   x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
219   x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
220   x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
221   x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
222   x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
223   x "27"       ,	x "28"          ,	x "AF_CAN"      , \
224   x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
225   x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
226   x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
227   x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
228   x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
229   x "AF_MCTP"  , \
230   x "AF_MAX"
231 
232 static const char *const af_family_key_strings[AF_MAX+1] = {
233 	_sock_locks("sk_lock-")
234 };
235 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
236 	_sock_locks("slock-")
237 };
238 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
239 	_sock_locks("clock-")
240 };
241 
242 static const char *const af_family_kern_key_strings[AF_MAX+1] = {
243 	_sock_locks("k-sk_lock-")
244 };
245 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
246 	_sock_locks("k-slock-")
247 };
248 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
249 	_sock_locks("k-clock-")
250 };
251 static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
252 	_sock_locks("rlock-")
253 };
254 static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
255 	_sock_locks("wlock-")
256 };
257 static const char *const af_family_elock_key_strings[AF_MAX+1] = {
258 	_sock_locks("elock-")
259 };
260 
261 /*
262  * sk_callback_lock and sk queues locking rules are per-address-family,
263  * so split the lock classes by using a per-AF key:
264  */
265 static struct lock_class_key af_callback_keys[AF_MAX];
266 static struct lock_class_key af_rlock_keys[AF_MAX];
267 static struct lock_class_key af_wlock_keys[AF_MAX];
268 static struct lock_class_key af_elock_keys[AF_MAX];
269 static struct lock_class_key af_kern_callback_keys[AF_MAX];
270 
271 /* Run time adjustable parameters. */
272 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
273 EXPORT_SYMBOL(sysctl_wmem_max);
274 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
275 EXPORT_SYMBOL(sysctl_rmem_max);
276 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
277 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
278 
279 /* Maximal space eaten by iovec or ancillary data plus some space */
280 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
281 EXPORT_SYMBOL(sysctl_optmem_max);
282 
283 int sysctl_tstamp_allow_data __read_mostly = 1;
284 
285 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
286 EXPORT_SYMBOL_GPL(memalloc_socks_key);
287 
288 /**
289  * sk_set_memalloc - sets %SOCK_MEMALLOC
290  * @sk: socket to set it on
291  *
292  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
293  * It's the responsibility of the admin to adjust min_free_kbytes
294  * to meet the requirements
295  */
296 void sk_set_memalloc(struct sock *sk)
297 {
298 	sock_set_flag(sk, SOCK_MEMALLOC);
299 	sk->sk_allocation |= __GFP_MEMALLOC;
300 	static_branch_inc(&memalloc_socks_key);
301 }
302 EXPORT_SYMBOL_GPL(sk_set_memalloc);
303 
304 void sk_clear_memalloc(struct sock *sk)
305 {
306 	sock_reset_flag(sk, SOCK_MEMALLOC);
307 	sk->sk_allocation &= ~__GFP_MEMALLOC;
308 	static_branch_dec(&memalloc_socks_key);
309 
310 	/*
311 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
312 	 * progress of swapping. SOCK_MEMALLOC may be cleared while
313 	 * it has rmem allocations due to the last swapfile being deactivated
314 	 * but there is a risk that the socket is unusable due to exceeding
315 	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
316 	 */
317 	sk_mem_reclaim(sk);
318 }
319 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
320 
321 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
322 {
323 	int ret;
324 	unsigned int noreclaim_flag;
325 
326 	/* these should have been dropped before queueing */
327 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
328 
329 	noreclaim_flag = memalloc_noreclaim_save();
330 	ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
331 				 tcp_v6_do_rcv,
332 				 tcp_v4_do_rcv,
333 				 sk, skb);
334 	memalloc_noreclaim_restore(noreclaim_flag);
335 
336 	return ret;
337 }
338 EXPORT_SYMBOL(__sk_backlog_rcv);
339 
340 void sk_error_report(struct sock *sk)
341 {
342 	sk->sk_error_report(sk);
343 
344 	switch (sk->sk_family) {
345 	case AF_INET:
346 		fallthrough;
347 	case AF_INET6:
348 		trace_inet_sk_error_report(sk);
349 		break;
350 	default:
351 		break;
352 	}
353 }
354 EXPORT_SYMBOL(sk_error_report);
355 
356 int sock_get_timeout(long timeo, void *optval, bool old_timeval)
357 {
358 	struct __kernel_sock_timeval tv;
359 
360 	if (timeo == MAX_SCHEDULE_TIMEOUT) {
361 		tv.tv_sec = 0;
362 		tv.tv_usec = 0;
363 	} else {
364 		tv.tv_sec = timeo / HZ;
365 		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
366 	}
367 
368 	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
369 		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
370 		*(struct old_timeval32 *)optval = tv32;
371 		return sizeof(tv32);
372 	}
373 
374 	if (old_timeval) {
375 		struct __kernel_old_timeval old_tv;
376 		old_tv.tv_sec = tv.tv_sec;
377 		old_tv.tv_usec = tv.tv_usec;
378 		*(struct __kernel_old_timeval *)optval = old_tv;
379 		return sizeof(old_tv);
380 	}
381 
382 	*(struct __kernel_sock_timeval *)optval = tv;
383 	return sizeof(tv);
384 }
385 EXPORT_SYMBOL(sock_get_timeout);
386 
387 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
388 			   sockptr_t optval, int optlen, bool old_timeval)
389 {
390 	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
391 		struct old_timeval32 tv32;
392 
393 		if (optlen < sizeof(tv32))
394 			return -EINVAL;
395 
396 		if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
397 			return -EFAULT;
398 		tv->tv_sec = tv32.tv_sec;
399 		tv->tv_usec = tv32.tv_usec;
400 	} else if (old_timeval) {
401 		struct __kernel_old_timeval old_tv;
402 
403 		if (optlen < sizeof(old_tv))
404 			return -EINVAL;
405 		if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
406 			return -EFAULT;
407 		tv->tv_sec = old_tv.tv_sec;
408 		tv->tv_usec = old_tv.tv_usec;
409 	} else {
410 		if (optlen < sizeof(*tv))
411 			return -EINVAL;
412 		if (copy_from_sockptr(tv, optval, sizeof(*tv)))
413 			return -EFAULT;
414 	}
415 
416 	return 0;
417 }
418 EXPORT_SYMBOL(sock_copy_user_timeval);
419 
420 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
421 			    bool old_timeval)
422 {
423 	struct __kernel_sock_timeval tv;
424 	int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
425 
426 	if (err)
427 		return err;
428 
429 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
430 		return -EDOM;
431 
432 	if (tv.tv_sec < 0) {
433 		static int warned __read_mostly;
434 
435 		*timeo_p = 0;
436 		if (warned < 10 && net_ratelimit()) {
437 			warned++;
438 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
439 				__func__, current->comm, task_pid_nr(current));
440 		}
441 		return 0;
442 	}
443 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
444 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
445 		return 0;
446 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
447 		*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
448 	return 0;
449 }
450 
451 static bool sock_needs_netstamp(const struct sock *sk)
452 {
453 	switch (sk->sk_family) {
454 	case AF_UNSPEC:
455 	case AF_UNIX:
456 		return false;
457 	default:
458 		return true;
459 	}
460 }
461 
462 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
463 {
464 	if (sk->sk_flags & flags) {
465 		sk->sk_flags &= ~flags;
466 		if (sock_needs_netstamp(sk) &&
467 		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
468 			net_disable_timestamp();
469 	}
470 }
471 
472 
473 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
474 {
475 	unsigned long flags;
476 	struct sk_buff_head *list = &sk->sk_receive_queue;
477 
478 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
479 		atomic_inc(&sk->sk_drops);
480 		trace_sock_rcvqueue_full(sk, skb);
481 		return -ENOMEM;
482 	}
483 
484 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
485 		atomic_inc(&sk->sk_drops);
486 		return -ENOBUFS;
487 	}
488 
489 	skb->dev = NULL;
490 	skb_set_owner_r(skb, sk);
491 
492 	/* we escape from rcu protected region, make sure we dont leak
493 	 * a norefcounted dst
494 	 */
495 	skb_dst_force(skb);
496 
497 	spin_lock_irqsave(&list->lock, flags);
498 	sock_skb_set_dropcount(sk, skb);
499 	__skb_queue_tail(list, skb);
500 	spin_unlock_irqrestore(&list->lock, flags);
501 
502 	if (!sock_flag(sk, SOCK_DEAD))
503 		sk->sk_data_ready(sk);
504 	return 0;
505 }
506 EXPORT_SYMBOL(__sock_queue_rcv_skb);
507 
508 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
509 			      enum skb_drop_reason *reason)
510 {
511 	enum skb_drop_reason drop_reason;
512 	int err;
513 
514 	err = sk_filter(sk, skb);
515 	if (err) {
516 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
517 		goto out;
518 	}
519 	err = __sock_queue_rcv_skb(sk, skb);
520 	switch (err) {
521 	case -ENOMEM:
522 		drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
523 		break;
524 	case -ENOBUFS:
525 		drop_reason = SKB_DROP_REASON_PROTO_MEM;
526 		break;
527 	default:
528 		drop_reason = SKB_NOT_DROPPED_YET;
529 		break;
530 	}
531 out:
532 	if (reason)
533 		*reason = drop_reason;
534 	return err;
535 }
536 EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
537 
538 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
539 		     const int nested, unsigned int trim_cap, bool refcounted)
540 {
541 	int rc = NET_RX_SUCCESS;
542 
543 	if (sk_filter_trim_cap(sk, skb, trim_cap))
544 		goto discard_and_relse;
545 
546 	skb->dev = NULL;
547 
548 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
549 		atomic_inc(&sk->sk_drops);
550 		goto discard_and_relse;
551 	}
552 	if (nested)
553 		bh_lock_sock_nested(sk);
554 	else
555 		bh_lock_sock(sk);
556 	if (!sock_owned_by_user(sk)) {
557 		/*
558 		 * trylock + unlock semantics:
559 		 */
560 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
561 
562 		rc = sk_backlog_rcv(sk, skb);
563 
564 		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
565 	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
566 		bh_unlock_sock(sk);
567 		atomic_inc(&sk->sk_drops);
568 		goto discard_and_relse;
569 	}
570 
571 	bh_unlock_sock(sk);
572 out:
573 	if (refcounted)
574 		sock_put(sk);
575 	return rc;
576 discard_and_relse:
577 	kfree_skb(skb);
578 	goto out;
579 }
580 EXPORT_SYMBOL(__sk_receive_skb);
581 
582 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *,
583 							  u32));
584 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
585 							   u32));
586 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
587 {
588 	struct dst_entry *dst = __sk_dst_get(sk);
589 
590 	if (dst && dst->obsolete &&
591 	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
592 			       dst, cookie) == NULL) {
593 		sk_tx_queue_clear(sk);
594 		sk->sk_dst_pending_confirm = 0;
595 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
596 		dst_release(dst);
597 		return NULL;
598 	}
599 
600 	return dst;
601 }
602 EXPORT_SYMBOL(__sk_dst_check);
603 
604 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
605 {
606 	struct dst_entry *dst = sk_dst_get(sk);
607 
608 	if (dst && dst->obsolete &&
609 	    INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
610 			       dst, cookie) == NULL) {
611 		sk_dst_reset(sk);
612 		dst_release(dst);
613 		return NULL;
614 	}
615 
616 	return dst;
617 }
618 EXPORT_SYMBOL(sk_dst_check);
619 
620 static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
621 {
622 	int ret = -ENOPROTOOPT;
623 #ifdef CONFIG_NETDEVICES
624 	struct net *net = sock_net(sk);
625 
626 	/* Sorry... */
627 	ret = -EPERM;
628 	if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
629 		goto out;
630 
631 	ret = -EINVAL;
632 	if (ifindex < 0)
633 		goto out;
634 
635 	sk->sk_bound_dev_if = ifindex;
636 	if (sk->sk_prot->rehash)
637 		sk->sk_prot->rehash(sk);
638 	sk_dst_reset(sk);
639 
640 	ret = 0;
641 
642 out:
643 #endif
644 
645 	return ret;
646 }
647 
648 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
649 {
650 	int ret;
651 
652 	if (lock_sk)
653 		lock_sock(sk);
654 	ret = sock_bindtoindex_locked(sk, ifindex);
655 	if (lock_sk)
656 		release_sock(sk);
657 
658 	return ret;
659 }
660 EXPORT_SYMBOL(sock_bindtoindex);
661 
662 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
663 {
664 	int ret = -ENOPROTOOPT;
665 #ifdef CONFIG_NETDEVICES
666 	struct net *net = sock_net(sk);
667 	char devname[IFNAMSIZ];
668 	int index;
669 
670 	ret = -EINVAL;
671 	if (optlen < 0)
672 		goto out;
673 
674 	/* Bind this socket to a particular device like "eth0",
675 	 * as specified in the passed interface name. If the
676 	 * name is "" or the option length is zero the socket
677 	 * is not bound.
678 	 */
679 	if (optlen > IFNAMSIZ - 1)
680 		optlen = IFNAMSIZ - 1;
681 	memset(devname, 0, sizeof(devname));
682 
683 	ret = -EFAULT;
684 	if (copy_from_sockptr(devname, optval, optlen))
685 		goto out;
686 
687 	index = 0;
688 	if (devname[0] != '\0') {
689 		struct net_device *dev;
690 
691 		rcu_read_lock();
692 		dev = dev_get_by_name_rcu(net, devname);
693 		if (dev)
694 			index = dev->ifindex;
695 		rcu_read_unlock();
696 		ret = -ENODEV;
697 		if (!dev)
698 			goto out;
699 	}
700 
701 	return sock_bindtoindex(sk, index, true);
702 out:
703 #endif
704 
705 	return ret;
706 }
707 
708 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
709 				int __user *optlen, int len)
710 {
711 	int ret = -ENOPROTOOPT;
712 #ifdef CONFIG_NETDEVICES
713 	struct net *net = sock_net(sk);
714 	char devname[IFNAMSIZ];
715 
716 	if (sk->sk_bound_dev_if == 0) {
717 		len = 0;
718 		goto zero;
719 	}
720 
721 	ret = -EINVAL;
722 	if (len < IFNAMSIZ)
723 		goto out;
724 
725 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
726 	if (ret)
727 		goto out;
728 
729 	len = strlen(devname) + 1;
730 
731 	ret = -EFAULT;
732 	if (copy_to_user(optval, devname, len))
733 		goto out;
734 
735 zero:
736 	ret = -EFAULT;
737 	if (put_user(len, optlen))
738 		goto out;
739 
740 	ret = 0;
741 
742 out:
743 #endif
744 
745 	return ret;
746 }
747 
748 bool sk_mc_loop(struct sock *sk)
749 {
750 	if (dev_recursion_level())
751 		return false;
752 	if (!sk)
753 		return true;
754 	switch (sk->sk_family) {
755 	case AF_INET:
756 		return inet_sk(sk)->mc_loop;
757 #if IS_ENABLED(CONFIG_IPV6)
758 	case AF_INET6:
759 		return inet6_sk(sk)->mc_loop;
760 #endif
761 	}
762 	WARN_ON_ONCE(1);
763 	return true;
764 }
765 EXPORT_SYMBOL(sk_mc_loop);
766 
767 void sock_set_reuseaddr(struct sock *sk)
768 {
769 	lock_sock(sk);
770 	sk->sk_reuse = SK_CAN_REUSE;
771 	release_sock(sk);
772 }
773 EXPORT_SYMBOL(sock_set_reuseaddr);
774 
775 void sock_set_reuseport(struct sock *sk)
776 {
777 	lock_sock(sk);
778 	sk->sk_reuseport = true;
779 	release_sock(sk);
780 }
781 EXPORT_SYMBOL(sock_set_reuseport);
782 
783 void sock_no_linger(struct sock *sk)
784 {
785 	lock_sock(sk);
786 	sk->sk_lingertime = 0;
787 	sock_set_flag(sk, SOCK_LINGER);
788 	release_sock(sk);
789 }
790 EXPORT_SYMBOL(sock_no_linger);
791 
792 void sock_set_priority(struct sock *sk, u32 priority)
793 {
794 	lock_sock(sk);
795 	sk->sk_priority = priority;
796 	release_sock(sk);
797 }
798 EXPORT_SYMBOL(sock_set_priority);
799 
800 void sock_set_sndtimeo(struct sock *sk, s64 secs)
801 {
802 	lock_sock(sk);
803 	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
804 		sk->sk_sndtimeo = secs * HZ;
805 	else
806 		sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
807 	release_sock(sk);
808 }
809 EXPORT_SYMBOL(sock_set_sndtimeo);
810 
811 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
812 {
813 	if (val)  {
814 		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
815 		sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
816 		sock_set_flag(sk, SOCK_RCVTSTAMP);
817 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
818 	} else {
819 		sock_reset_flag(sk, SOCK_RCVTSTAMP);
820 		sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
821 	}
822 }
823 
824 void sock_enable_timestamps(struct sock *sk)
825 {
826 	lock_sock(sk);
827 	__sock_set_timestamps(sk, true, false, true);
828 	release_sock(sk);
829 }
830 EXPORT_SYMBOL(sock_enable_timestamps);
831 
832 void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
833 {
834 	switch (optname) {
835 	case SO_TIMESTAMP_OLD:
836 		__sock_set_timestamps(sk, valbool, false, false);
837 		break;
838 	case SO_TIMESTAMP_NEW:
839 		__sock_set_timestamps(sk, valbool, true, false);
840 		break;
841 	case SO_TIMESTAMPNS_OLD:
842 		__sock_set_timestamps(sk, valbool, false, true);
843 		break;
844 	case SO_TIMESTAMPNS_NEW:
845 		__sock_set_timestamps(sk, valbool, true, true);
846 		break;
847 	}
848 }
849 
850 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
851 {
852 	struct net *net = sock_net(sk);
853 	struct net_device *dev = NULL;
854 	bool match = false;
855 	int *vclock_index;
856 	int i, num;
857 
858 	if (sk->sk_bound_dev_if)
859 		dev = dev_get_by_index(net, sk->sk_bound_dev_if);
860 
861 	if (!dev) {
862 		pr_err("%s: sock not bind to device\n", __func__);
863 		return -EOPNOTSUPP;
864 	}
865 
866 	num = ethtool_get_phc_vclocks(dev, &vclock_index);
867 	dev_put(dev);
868 
869 	for (i = 0; i < num; i++) {
870 		if (*(vclock_index + i) == phc_index) {
871 			match = true;
872 			break;
873 		}
874 	}
875 
876 	if (num > 0)
877 		kfree(vclock_index);
878 
879 	if (!match)
880 		return -EINVAL;
881 
882 	sk->sk_bind_phc = phc_index;
883 
884 	return 0;
885 }
886 
887 int sock_set_timestamping(struct sock *sk, int optname,
888 			  struct so_timestamping timestamping)
889 {
890 	int val = timestamping.flags;
891 	int ret;
892 
893 	if (val & ~SOF_TIMESTAMPING_MASK)
894 		return -EINVAL;
895 
896 	if (val & SOF_TIMESTAMPING_OPT_ID &&
897 	    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
898 		if (sk_is_tcp(sk)) {
899 			if ((1 << sk->sk_state) &
900 			    (TCPF_CLOSE | TCPF_LISTEN))
901 				return -EINVAL;
902 			atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
903 		} else {
904 			atomic_set(&sk->sk_tskey, 0);
905 		}
906 	}
907 
908 	if (val & SOF_TIMESTAMPING_OPT_STATS &&
909 	    !(val & SOF_TIMESTAMPING_OPT_TSONLY))
910 		return -EINVAL;
911 
912 	if (val & SOF_TIMESTAMPING_BIND_PHC) {
913 		ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
914 		if (ret)
915 			return ret;
916 	}
917 
918 	sk->sk_tsflags = val;
919 	sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
920 
921 	if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
922 		sock_enable_timestamp(sk,
923 				      SOCK_TIMESTAMPING_RX_SOFTWARE);
924 	else
925 		sock_disable_timestamp(sk,
926 				       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
927 	return 0;
928 }
929 
930 void sock_set_keepalive(struct sock *sk)
931 {
932 	lock_sock(sk);
933 	if (sk->sk_prot->keepalive)
934 		sk->sk_prot->keepalive(sk, true);
935 	sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
936 	release_sock(sk);
937 }
938 EXPORT_SYMBOL(sock_set_keepalive);
939 
940 static void __sock_set_rcvbuf(struct sock *sk, int val)
941 {
942 	/* Ensure val * 2 fits into an int, to prevent max_t() from treating it
943 	 * as a negative value.
944 	 */
945 	val = min_t(int, val, INT_MAX / 2);
946 	sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
947 
948 	/* We double it on the way in to account for "struct sk_buff" etc.
949 	 * overhead.   Applications assume that the SO_RCVBUF setting they make
950 	 * will allow that much actual data to be received on that socket.
951 	 *
952 	 * Applications are unaware that "struct sk_buff" and other overheads
953 	 * allocate from the receive buffer during socket buffer allocation.
954 	 *
955 	 * And after considering the possible alternatives, returning the value
956 	 * we actually used in getsockopt is the most desirable behavior.
957 	 */
958 	WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
959 }
960 
961 void sock_set_rcvbuf(struct sock *sk, int val)
962 {
963 	lock_sock(sk);
964 	__sock_set_rcvbuf(sk, val);
965 	release_sock(sk);
966 }
967 EXPORT_SYMBOL(sock_set_rcvbuf);
968 
969 static void __sock_set_mark(struct sock *sk, u32 val)
970 {
971 	if (val != sk->sk_mark) {
972 		sk->sk_mark = val;
973 		sk_dst_reset(sk);
974 	}
975 }
976 
977 void sock_set_mark(struct sock *sk, u32 val)
978 {
979 	lock_sock(sk);
980 	__sock_set_mark(sk, val);
981 	release_sock(sk);
982 }
983 EXPORT_SYMBOL(sock_set_mark);
984 
985 static void sock_release_reserved_memory(struct sock *sk, int bytes)
986 {
987 	/* Round down bytes to multiple of pages */
988 	bytes &= ~(SK_MEM_QUANTUM - 1);
989 
990 	WARN_ON(bytes > sk->sk_reserved_mem);
991 	sk->sk_reserved_mem -= bytes;
992 	sk_mem_reclaim(sk);
993 }
994 
995 static int sock_reserve_memory(struct sock *sk, int bytes)
996 {
997 	long allocated;
998 	bool charged;
999 	int pages;
1000 
1001 	if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
1002 		return -EOPNOTSUPP;
1003 
1004 	if (!bytes)
1005 		return 0;
1006 
1007 	pages = sk_mem_pages(bytes);
1008 
1009 	/* pre-charge to memcg */
1010 	charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
1011 					  GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1012 	if (!charged)
1013 		return -ENOMEM;
1014 
1015 	/* pre-charge to forward_alloc */
1016 	allocated = sk_memory_allocated_add(sk, pages);
1017 	/* If the system goes into memory pressure with this
1018 	 * precharge, give up and return error.
1019 	 */
1020 	if (allocated > sk_prot_mem_limits(sk, 1)) {
1021 		sk_memory_allocated_sub(sk, pages);
1022 		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
1023 		return -ENOMEM;
1024 	}
1025 	sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT;
1026 
1027 	sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT;
1028 
1029 	return 0;
1030 }
1031 
1032 /*
1033  *	This is meant for all protocols to use and covers goings on
1034  *	at the socket level. Everything here is generic.
1035  */
1036 
1037 int sock_setsockopt(struct socket *sock, int level, int optname,
1038 		    sockptr_t optval, unsigned int optlen)
1039 {
1040 	struct so_timestamping timestamping;
1041 	struct sock_txtime sk_txtime;
1042 	struct sock *sk = sock->sk;
1043 	int val;
1044 	int valbool;
1045 	struct linger ling;
1046 	int ret = 0;
1047 
1048 	/*
1049 	 *	Options without arguments
1050 	 */
1051 
1052 	if (optname == SO_BINDTODEVICE)
1053 		return sock_setbindtodevice(sk, optval, optlen);
1054 
1055 	if (optlen < sizeof(int))
1056 		return -EINVAL;
1057 
1058 	if (copy_from_sockptr(&val, optval, sizeof(val)))
1059 		return -EFAULT;
1060 
1061 	valbool = val ? 1 : 0;
1062 
1063 	lock_sock(sk);
1064 
1065 	switch (optname) {
1066 	case SO_DEBUG:
1067 		if (val && !capable(CAP_NET_ADMIN))
1068 			ret = -EACCES;
1069 		else
1070 			sock_valbool_flag(sk, SOCK_DBG, valbool);
1071 		break;
1072 	case SO_REUSEADDR:
1073 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
1074 		break;
1075 	case SO_REUSEPORT:
1076 		sk->sk_reuseport = valbool;
1077 		break;
1078 	case SO_TYPE:
1079 	case SO_PROTOCOL:
1080 	case SO_DOMAIN:
1081 	case SO_ERROR:
1082 		ret = -ENOPROTOOPT;
1083 		break;
1084 	case SO_DONTROUTE:
1085 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
1086 		sk_dst_reset(sk);
1087 		break;
1088 	case SO_BROADCAST:
1089 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
1090 		break;
1091 	case SO_SNDBUF:
1092 		/* Don't error on this BSD doesn't and if you think
1093 		 * about it this is right. Otherwise apps have to
1094 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1095 		 * are treated in BSD as hints
1096 		 */
1097 		val = min_t(u32, val, sysctl_wmem_max);
1098 set_sndbuf:
1099 		/* Ensure val * 2 fits into an int, to prevent max_t()
1100 		 * from treating it as a negative value.
1101 		 */
1102 		val = min_t(int, val, INT_MAX / 2);
1103 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1104 		WRITE_ONCE(sk->sk_sndbuf,
1105 			   max_t(int, val * 2, SOCK_MIN_SNDBUF));
1106 		/* Wake up sending tasks if we upped the value. */
1107 		sk->sk_write_space(sk);
1108 		break;
1109 
1110 	case SO_SNDBUFFORCE:
1111 		if (!capable(CAP_NET_ADMIN)) {
1112 			ret = -EPERM;
1113 			break;
1114 		}
1115 
1116 		/* No negative values (to prevent underflow, as val will be
1117 		 * multiplied by 2).
1118 		 */
1119 		if (val < 0)
1120 			val = 0;
1121 		goto set_sndbuf;
1122 
1123 	case SO_RCVBUF:
1124 		/* Don't error on this BSD doesn't and if you think
1125 		 * about it this is right. Otherwise apps have to
1126 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
1127 		 * are treated in BSD as hints
1128 		 */
1129 		__sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
1130 		break;
1131 
1132 	case SO_RCVBUFFORCE:
1133 		if (!capable(CAP_NET_ADMIN)) {
1134 			ret = -EPERM;
1135 			break;
1136 		}
1137 
1138 		/* No negative values (to prevent underflow, as val will be
1139 		 * multiplied by 2).
1140 		 */
1141 		__sock_set_rcvbuf(sk, max(val, 0));
1142 		break;
1143 
1144 	case SO_KEEPALIVE:
1145 		if (sk->sk_prot->keepalive)
1146 			sk->sk_prot->keepalive(sk, valbool);
1147 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
1148 		break;
1149 
1150 	case SO_OOBINLINE:
1151 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
1152 		break;
1153 
1154 	case SO_NO_CHECK:
1155 		sk->sk_no_check_tx = valbool;
1156 		break;
1157 
1158 	case SO_PRIORITY:
1159 		if ((val >= 0 && val <= 6) ||
1160 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
1161 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1162 			sk->sk_priority = val;
1163 		else
1164 			ret = -EPERM;
1165 		break;
1166 
1167 	case SO_LINGER:
1168 		if (optlen < sizeof(ling)) {
1169 			ret = -EINVAL;	/* 1003.1g */
1170 			break;
1171 		}
1172 		if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
1173 			ret = -EFAULT;
1174 			break;
1175 		}
1176 		if (!ling.l_onoff)
1177 			sock_reset_flag(sk, SOCK_LINGER);
1178 		else {
1179 #if (BITS_PER_LONG == 32)
1180 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
1181 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
1182 			else
1183 #endif
1184 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
1185 			sock_set_flag(sk, SOCK_LINGER);
1186 		}
1187 		break;
1188 
1189 	case SO_BSDCOMPAT:
1190 		break;
1191 
1192 	case SO_PASSCRED:
1193 		if (valbool)
1194 			set_bit(SOCK_PASSCRED, &sock->flags);
1195 		else
1196 			clear_bit(SOCK_PASSCRED, &sock->flags);
1197 		break;
1198 
1199 	case SO_TIMESTAMP_OLD:
1200 	case SO_TIMESTAMP_NEW:
1201 	case SO_TIMESTAMPNS_OLD:
1202 	case SO_TIMESTAMPNS_NEW:
1203 		sock_set_timestamp(sk, optname, valbool);
1204 		break;
1205 
1206 	case SO_TIMESTAMPING_NEW:
1207 	case SO_TIMESTAMPING_OLD:
1208 		if (optlen == sizeof(timestamping)) {
1209 			if (copy_from_sockptr(&timestamping, optval,
1210 					      sizeof(timestamping))) {
1211 				ret = -EFAULT;
1212 				break;
1213 			}
1214 		} else {
1215 			memset(&timestamping, 0, sizeof(timestamping));
1216 			timestamping.flags = val;
1217 		}
1218 		ret = sock_set_timestamping(sk, optname, timestamping);
1219 		break;
1220 
1221 	case SO_RCVLOWAT:
1222 		if (val < 0)
1223 			val = INT_MAX;
1224 		if (sock->ops->set_rcvlowat)
1225 			ret = sock->ops->set_rcvlowat(sk, val);
1226 		else
1227 			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1228 		break;
1229 
1230 	case SO_RCVTIMEO_OLD:
1231 	case SO_RCVTIMEO_NEW:
1232 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1233 				       optlen, optname == SO_RCVTIMEO_OLD);
1234 		break;
1235 
1236 	case SO_SNDTIMEO_OLD:
1237 	case SO_SNDTIMEO_NEW:
1238 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1239 				       optlen, optname == SO_SNDTIMEO_OLD);
1240 		break;
1241 
1242 	case SO_ATTACH_FILTER: {
1243 		struct sock_fprog fprog;
1244 
1245 		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1246 		if (!ret)
1247 			ret = sk_attach_filter(&fprog, sk);
1248 		break;
1249 	}
1250 	case SO_ATTACH_BPF:
1251 		ret = -EINVAL;
1252 		if (optlen == sizeof(u32)) {
1253 			u32 ufd;
1254 
1255 			ret = -EFAULT;
1256 			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1257 				break;
1258 
1259 			ret = sk_attach_bpf(ufd, sk);
1260 		}
1261 		break;
1262 
1263 	case SO_ATTACH_REUSEPORT_CBPF: {
1264 		struct sock_fprog fprog;
1265 
1266 		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1267 		if (!ret)
1268 			ret = sk_reuseport_attach_filter(&fprog, sk);
1269 		break;
1270 	}
1271 	case SO_ATTACH_REUSEPORT_EBPF:
1272 		ret = -EINVAL;
1273 		if (optlen == sizeof(u32)) {
1274 			u32 ufd;
1275 
1276 			ret = -EFAULT;
1277 			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1278 				break;
1279 
1280 			ret = sk_reuseport_attach_bpf(ufd, sk);
1281 		}
1282 		break;
1283 
1284 	case SO_DETACH_REUSEPORT_BPF:
1285 		ret = reuseport_detach_prog(sk);
1286 		break;
1287 
1288 	case SO_DETACH_FILTER:
1289 		ret = sk_detach_filter(sk);
1290 		break;
1291 
1292 	case SO_LOCK_FILTER:
1293 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1294 			ret = -EPERM;
1295 		else
1296 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1297 		break;
1298 
1299 	case SO_PASSSEC:
1300 		if (valbool)
1301 			set_bit(SOCK_PASSSEC, &sock->flags);
1302 		else
1303 			clear_bit(SOCK_PASSSEC, &sock->flags);
1304 		break;
1305 	case SO_MARK:
1306 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1307 		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1308 			ret = -EPERM;
1309 			break;
1310 		}
1311 
1312 		__sock_set_mark(sk, val);
1313 		break;
1314 
1315 	case SO_RXQ_OVFL:
1316 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1317 		break;
1318 
1319 	case SO_WIFI_STATUS:
1320 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1321 		break;
1322 
1323 	case SO_PEEK_OFF:
1324 		if (sock->ops->set_peek_off)
1325 			ret = sock->ops->set_peek_off(sk, val);
1326 		else
1327 			ret = -EOPNOTSUPP;
1328 		break;
1329 
1330 	case SO_NOFCS:
1331 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1332 		break;
1333 
1334 	case SO_SELECT_ERR_QUEUE:
1335 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1336 		break;
1337 
1338 #ifdef CONFIG_NET_RX_BUSY_POLL
1339 	case SO_BUSY_POLL:
1340 		/* allow unprivileged users to decrease the value */
1341 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1342 			ret = -EPERM;
1343 		else {
1344 			if (val < 0)
1345 				ret = -EINVAL;
1346 			else
1347 				WRITE_ONCE(sk->sk_ll_usec, val);
1348 		}
1349 		break;
1350 	case SO_PREFER_BUSY_POLL:
1351 		if (valbool && !capable(CAP_NET_ADMIN))
1352 			ret = -EPERM;
1353 		else
1354 			WRITE_ONCE(sk->sk_prefer_busy_poll, valbool);
1355 		break;
1356 	case SO_BUSY_POLL_BUDGET:
1357 		if (val > READ_ONCE(sk->sk_busy_poll_budget) && !capable(CAP_NET_ADMIN)) {
1358 			ret = -EPERM;
1359 		} else {
1360 			if (val < 0 || val > U16_MAX)
1361 				ret = -EINVAL;
1362 			else
1363 				WRITE_ONCE(sk->sk_busy_poll_budget, val);
1364 		}
1365 		break;
1366 #endif
1367 
1368 	case SO_MAX_PACING_RATE:
1369 		{
1370 		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1371 
1372 		if (sizeof(ulval) != sizeof(val) &&
1373 		    optlen >= sizeof(ulval) &&
1374 		    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1375 			ret = -EFAULT;
1376 			break;
1377 		}
1378 		if (ulval != ~0UL)
1379 			cmpxchg(&sk->sk_pacing_status,
1380 				SK_PACING_NONE,
1381 				SK_PACING_NEEDED);
1382 		sk->sk_max_pacing_rate = ulval;
1383 		sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
1384 		break;
1385 		}
1386 	case SO_INCOMING_CPU:
1387 		WRITE_ONCE(sk->sk_incoming_cpu, val);
1388 		break;
1389 
1390 	case SO_CNX_ADVICE:
1391 		if (val == 1)
1392 			dst_negative_advice(sk);
1393 		break;
1394 
1395 	case SO_ZEROCOPY:
1396 		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1397 			if (!(sk_is_tcp(sk) ||
1398 			      (sk->sk_type == SOCK_DGRAM &&
1399 			       sk->sk_protocol == IPPROTO_UDP)))
1400 				ret = -EOPNOTSUPP;
1401 		} else if (sk->sk_family != PF_RDS) {
1402 			ret = -EOPNOTSUPP;
1403 		}
1404 		if (!ret) {
1405 			if (val < 0 || val > 1)
1406 				ret = -EINVAL;
1407 			else
1408 				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1409 		}
1410 		break;
1411 
1412 	case SO_TXTIME:
1413 		if (optlen != sizeof(struct sock_txtime)) {
1414 			ret = -EINVAL;
1415 			break;
1416 		} else if (copy_from_sockptr(&sk_txtime, optval,
1417 			   sizeof(struct sock_txtime))) {
1418 			ret = -EFAULT;
1419 			break;
1420 		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1421 			ret = -EINVAL;
1422 			break;
1423 		}
1424 		/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1425 		 * scheduler has enough safe guards.
1426 		 */
1427 		if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1428 		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1429 			ret = -EPERM;
1430 			break;
1431 		}
1432 		sock_valbool_flag(sk, SOCK_TXTIME, true);
1433 		sk->sk_clockid = sk_txtime.clockid;
1434 		sk->sk_txtime_deadline_mode =
1435 			!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1436 		sk->sk_txtime_report_errors =
1437 			!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1438 		break;
1439 
1440 	case SO_BINDTOIFINDEX:
1441 		ret = sock_bindtoindex_locked(sk, val);
1442 		break;
1443 
1444 	case SO_BUF_LOCK:
1445 		if (val & ~SOCK_BUF_LOCK_MASK) {
1446 			ret = -EINVAL;
1447 			break;
1448 		}
1449 		sk->sk_userlocks = val | (sk->sk_userlocks &
1450 					  ~SOCK_BUF_LOCK_MASK);
1451 		break;
1452 
1453 	case SO_RESERVE_MEM:
1454 	{
1455 		int delta;
1456 
1457 		if (val < 0) {
1458 			ret = -EINVAL;
1459 			break;
1460 		}
1461 
1462 		delta = val - sk->sk_reserved_mem;
1463 		if (delta < 0)
1464 			sock_release_reserved_memory(sk, -delta);
1465 		else
1466 			ret = sock_reserve_memory(sk, delta);
1467 		break;
1468 	}
1469 
1470 	case SO_TXREHASH:
1471 		if (val < -1 || val > 1) {
1472 			ret = -EINVAL;
1473 			break;
1474 		}
1475 		/* Paired with READ_ONCE() in tcp_rtx_synack() */
1476 		WRITE_ONCE(sk->sk_txrehash, (u8)val);
1477 		break;
1478 
1479 	default:
1480 		ret = -ENOPROTOOPT;
1481 		break;
1482 	}
1483 	release_sock(sk);
1484 	return ret;
1485 }
1486 EXPORT_SYMBOL(sock_setsockopt);
1487 
1488 static const struct cred *sk_get_peer_cred(struct sock *sk)
1489 {
1490 	const struct cred *cred;
1491 
1492 	spin_lock(&sk->sk_peer_lock);
1493 	cred = get_cred(sk->sk_peer_cred);
1494 	spin_unlock(&sk->sk_peer_lock);
1495 
1496 	return cred;
1497 }
1498 
1499 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1500 			  struct ucred *ucred)
1501 {
1502 	ucred->pid = pid_vnr(pid);
1503 	ucred->uid = ucred->gid = -1;
1504 	if (cred) {
1505 		struct user_namespace *current_ns = current_user_ns();
1506 
1507 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1508 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1509 	}
1510 }
1511 
1512 static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1513 {
1514 	struct user_namespace *user_ns = current_user_ns();
1515 	int i;
1516 
1517 	for (i = 0; i < src->ngroups; i++)
1518 		if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1519 			return -EFAULT;
1520 
1521 	return 0;
1522 }
1523 
1524 int sock_getsockopt(struct socket *sock, int level, int optname,
1525 		    char __user *optval, int __user *optlen)
1526 {
1527 	struct sock *sk = sock->sk;
1528 
1529 	union {
1530 		int val;
1531 		u64 val64;
1532 		unsigned long ulval;
1533 		struct linger ling;
1534 		struct old_timeval32 tm32;
1535 		struct __kernel_old_timeval tm;
1536 		struct  __kernel_sock_timeval stm;
1537 		struct sock_txtime txtime;
1538 		struct so_timestamping timestamping;
1539 	} v;
1540 
1541 	int lv = sizeof(int);
1542 	int len;
1543 
1544 	if (get_user(len, optlen))
1545 		return -EFAULT;
1546 	if (len < 0)
1547 		return -EINVAL;
1548 
1549 	memset(&v, 0, sizeof(v));
1550 
1551 	switch (optname) {
1552 	case SO_DEBUG:
1553 		v.val = sock_flag(sk, SOCK_DBG);
1554 		break;
1555 
1556 	case SO_DONTROUTE:
1557 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1558 		break;
1559 
1560 	case SO_BROADCAST:
1561 		v.val = sock_flag(sk, SOCK_BROADCAST);
1562 		break;
1563 
1564 	case SO_SNDBUF:
1565 		v.val = sk->sk_sndbuf;
1566 		break;
1567 
1568 	case SO_RCVBUF:
1569 		v.val = sk->sk_rcvbuf;
1570 		break;
1571 
1572 	case SO_REUSEADDR:
1573 		v.val = sk->sk_reuse;
1574 		break;
1575 
1576 	case SO_REUSEPORT:
1577 		v.val = sk->sk_reuseport;
1578 		break;
1579 
1580 	case SO_KEEPALIVE:
1581 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1582 		break;
1583 
1584 	case SO_TYPE:
1585 		v.val = sk->sk_type;
1586 		break;
1587 
1588 	case SO_PROTOCOL:
1589 		v.val = sk->sk_protocol;
1590 		break;
1591 
1592 	case SO_DOMAIN:
1593 		v.val = sk->sk_family;
1594 		break;
1595 
1596 	case SO_ERROR:
1597 		v.val = -sock_error(sk);
1598 		if (v.val == 0)
1599 			v.val = xchg(&sk->sk_err_soft, 0);
1600 		break;
1601 
1602 	case SO_OOBINLINE:
1603 		v.val = sock_flag(sk, SOCK_URGINLINE);
1604 		break;
1605 
1606 	case SO_NO_CHECK:
1607 		v.val = sk->sk_no_check_tx;
1608 		break;
1609 
1610 	case SO_PRIORITY:
1611 		v.val = sk->sk_priority;
1612 		break;
1613 
1614 	case SO_LINGER:
1615 		lv		= sizeof(v.ling);
1616 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1617 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1618 		break;
1619 
1620 	case SO_BSDCOMPAT:
1621 		break;
1622 
1623 	case SO_TIMESTAMP_OLD:
1624 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1625 				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
1626 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1627 		break;
1628 
1629 	case SO_TIMESTAMPNS_OLD:
1630 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1631 		break;
1632 
1633 	case SO_TIMESTAMP_NEW:
1634 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1635 		break;
1636 
1637 	case SO_TIMESTAMPNS_NEW:
1638 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1639 		break;
1640 
1641 	case SO_TIMESTAMPING_OLD:
1642 		lv = sizeof(v.timestamping);
1643 		v.timestamping.flags = sk->sk_tsflags;
1644 		v.timestamping.bind_phc = sk->sk_bind_phc;
1645 		break;
1646 
1647 	case SO_RCVTIMEO_OLD:
1648 	case SO_RCVTIMEO_NEW:
1649 		lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
1650 		break;
1651 
1652 	case SO_SNDTIMEO_OLD:
1653 	case SO_SNDTIMEO_NEW:
1654 		lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
1655 		break;
1656 
1657 	case SO_RCVLOWAT:
1658 		v.val = sk->sk_rcvlowat;
1659 		break;
1660 
1661 	case SO_SNDLOWAT:
1662 		v.val = 1;
1663 		break;
1664 
1665 	case SO_PASSCRED:
1666 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1667 		break;
1668 
1669 	case SO_PEERCRED:
1670 	{
1671 		struct ucred peercred;
1672 		if (len > sizeof(peercred))
1673 			len = sizeof(peercred);
1674 
1675 		spin_lock(&sk->sk_peer_lock);
1676 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1677 		spin_unlock(&sk->sk_peer_lock);
1678 
1679 		if (copy_to_user(optval, &peercred, len))
1680 			return -EFAULT;
1681 		goto lenout;
1682 	}
1683 
1684 	case SO_PEERGROUPS:
1685 	{
1686 		const struct cred *cred;
1687 		int ret, n;
1688 
1689 		cred = sk_get_peer_cred(sk);
1690 		if (!cred)
1691 			return -ENODATA;
1692 
1693 		n = cred->group_info->ngroups;
1694 		if (len < n * sizeof(gid_t)) {
1695 			len = n * sizeof(gid_t);
1696 			put_cred(cred);
1697 			return put_user(len, optlen) ? -EFAULT : -ERANGE;
1698 		}
1699 		len = n * sizeof(gid_t);
1700 
1701 		ret = groups_to_user((gid_t __user *)optval, cred->group_info);
1702 		put_cred(cred);
1703 		if (ret)
1704 			return ret;
1705 		goto lenout;
1706 	}
1707 
1708 	case SO_PEERNAME:
1709 	{
1710 		char address[128];
1711 
1712 		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1713 		if (lv < 0)
1714 			return -ENOTCONN;
1715 		if (lv < len)
1716 			return -EINVAL;
1717 		if (copy_to_user(optval, address, len))
1718 			return -EFAULT;
1719 		goto lenout;
1720 	}
1721 
1722 	/* Dubious BSD thing... Probably nobody even uses it, but
1723 	 * the UNIX standard wants it for whatever reason... -DaveM
1724 	 */
1725 	case SO_ACCEPTCONN:
1726 		v.val = sk->sk_state == TCP_LISTEN;
1727 		break;
1728 
1729 	case SO_PASSSEC:
1730 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1731 		break;
1732 
1733 	case SO_PEERSEC:
1734 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1735 
1736 	case SO_MARK:
1737 		v.val = sk->sk_mark;
1738 		break;
1739 
1740 	case SO_RXQ_OVFL:
1741 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1742 		break;
1743 
1744 	case SO_WIFI_STATUS:
1745 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1746 		break;
1747 
1748 	case SO_PEEK_OFF:
1749 		if (!sock->ops->set_peek_off)
1750 			return -EOPNOTSUPP;
1751 
1752 		v.val = sk->sk_peek_off;
1753 		break;
1754 	case SO_NOFCS:
1755 		v.val = sock_flag(sk, SOCK_NOFCS);
1756 		break;
1757 
1758 	case SO_BINDTODEVICE:
1759 		return sock_getbindtodevice(sk, optval, optlen, len);
1760 
1761 	case SO_GET_FILTER:
1762 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1763 		if (len < 0)
1764 			return len;
1765 
1766 		goto lenout;
1767 
1768 	case SO_LOCK_FILTER:
1769 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1770 		break;
1771 
1772 	case SO_BPF_EXTENSIONS:
1773 		v.val = bpf_tell_extensions();
1774 		break;
1775 
1776 	case SO_SELECT_ERR_QUEUE:
1777 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1778 		break;
1779 
1780 #ifdef CONFIG_NET_RX_BUSY_POLL
1781 	case SO_BUSY_POLL:
1782 		v.val = sk->sk_ll_usec;
1783 		break;
1784 	case SO_PREFER_BUSY_POLL:
1785 		v.val = READ_ONCE(sk->sk_prefer_busy_poll);
1786 		break;
1787 #endif
1788 
1789 	case SO_MAX_PACING_RATE:
1790 		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1791 			lv = sizeof(v.ulval);
1792 			v.ulval = sk->sk_max_pacing_rate;
1793 		} else {
1794 			/* 32bit version */
1795 			v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1796 		}
1797 		break;
1798 
1799 	case SO_INCOMING_CPU:
1800 		v.val = READ_ONCE(sk->sk_incoming_cpu);
1801 		break;
1802 
1803 	case SO_MEMINFO:
1804 	{
1805 		u32 meminfo[SK_MEMINFO_VARS];
1806 
1807 		sk_get_meminfo(sk, meminfo);
1808 
1809 		len = min_t(unsigned int, len, sizeof(meminfo));
1810 		if (copy_to_user(optval, &meminfo, len))
1811 			return -EFAULT;
1812 
1813 		goto lenout;
1814 	}
1815 
1816 #ifdef CONFIG_NET_RX_BUSY_POLL
1817 	case SO_INCOMING_NAPI_ID:
1818 		v.val = READ_ONCE(sk->sk_napi_id);
1819 
1820 		/* aggregate non-NAPI IDs down to 0 */
1821 		if (v.val < MIN_NAPI_ID)
1822 			v.val = 0;
1823 
1824 		break;
1825 #endif
1826 
1827 	case SO_COOKIE:
1828 		lv = sizeof(u64);
1829 		if (len < lv)
1830 			return -EINVAL;
1831 		v.val64 = sock_gen_cookie(sk);
1832 		break;
1833 
1834 	case SO_ZEROCOPY:
1835 		v.val = sock_flag(sk, SOCK_ZEROCOPY);
1836 		break;
1837 
1838 	case SO_TXTIME:
1839 		lv = sizeof(v.txtime);
1840 		v.txtime.clockid = sk->sk_clockid;
1841 		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1842 				  SOF_TXTIME_DEADLINE_MODE : 0;
1843 		v.txtime.flags |= sk->sk_txtime_report_errors ?
1844 				  SOF_TXTIME_REPORT_ERRORS : 0;
1845 		break;
1846 
1847 	case SO_BINDTOIFINDEX:
1848 		v.val = sk->sk_bound_dev_if;
1849 		break;
1850 
1851 	case SO_NETNS_COOKIE:
1852 		lv = sizeof(u64);
1853 		if (len != lv)
1854 			return -EINVAL;
1855 		v.val64 = sock_net(sk)->net_cookie;
1856 		break;
1857 
1858 	case SO_BUF_LOCK:
1859 		v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
1860 		break;
1861 
1862 	case SO_RESERVE_MEM:
1863 		v.val = sk->sk_reserved_mem;
1864 		break;
1865 
1866 	case SO_TXREHASH:
1867 		v.val = sk->sk_txrehash;
1868 		break;
1869 
1870 	default:
1871 		/* We implement the SO_SNDLOWAT etc to not be settable
1872 		 * (1003.1g 7).
1873 		 */
1874 		return -ENOPROTOOPT;
1875 	}
1876 
1877 	if (len > lv)
1878 		len = lv;
1879 	if (copy_to_user(optval, &v, len))
1880 		return -EFAULT;
1881 lenout:
1882 	if (put_user(len, optlen))
1883 		return -EFAULT;
1884 	return 0;
1885 }
1886 
1887 /*
1888  * Initialize an sk_lock.
1889  *
1890  * (We also register the sk_lock with the lock validator.)
1891  */
1892 static inline void sock_lock_init(struct sock *sk)
1893 {
1894 	if (sk->sk_kern_sock)
1895 		sock_lock_init_class_and_name(
1896 			sk,
1897 			af_family_kern_slock_key_strings[sk->sk_family],
1898 			af_family_kern_slock_keys + sk->sk_family,
1899 			af_family_kern_key_strings[sk->sk_family],
1900 			af_family_kern_keys + sk->sk_family);
1901 	else
1902 		sock_lock_init_class_and_name(
1903 			sk,
1904 			af_family_slock_key_strings[sk->sk_family],
1905 			af_family_slock_keys + sk->sk_family,
1906 			af_family_key_strings[sk->sk_family],
1907 			af_family_keys + sk->sk_family);
1908 }
1909 
1910 /*
1911  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1912  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1913  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1914  */
1915 static void sock_copy(struct sock *nsk, const struct sock *osk)
1916 {
1917 	const struct proto *prot = READ_ONCE(osk->sk_prot);
1918 #ifdef CONFIG_SECURITY_NETWORK
1919 	void *sptr = nsk->sk_security;
1920 #endif
1921 
1922 	/* If we move sk_tx_queue_mapping out of the private section,
1923 	 * we must check if sk_tx_queue_clear() is called after
1924 	 * sock_copy() in sk_clone_lock().
1925 	 */
1926 	BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) <
1927 		     offsetof(struct sock, sk_dontcopy_begin) ||
1928 		     offsetof(struct sock, sk_tx_queue_mapping) >=
1929 		     offsetof(struct sock, sk_dontcopy_end));
1930 
1931 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1932 
1933 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1934 	       prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1935 
1936 #ifdef CONFIG_SECURITY_NETWORK
1937 	nsk->sk_security = sptr;
1938 	security_sk_clone(osk, nsk);
1939 #endif
1940 }
1941 
1942 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1943 		int family)
1944 {
1945 	struct sock *sk;
1946 	struct kmem_cache *slab;
1947 
1948 	slab = prot->slab;
1949 	if (slab != NULL) {
1950 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1951 		if (!sk)
1952 			return sk;
1953 		if (want_init_on_alloc(priority))
1954 			sk_prot_clear_nulls(sk, prot->obj_size);
1955 	} else
1956 		sk = kmalloc(prot->obj_size, priority);
1957 
1958 	if (sk != NULL) {
1959 		if (security_sk_alloc(sk, family, priority))
1960 			goto out_free;
1961 
1962 		if (!try_module_get(prot->owner))
1963 			goto out_free_sec;
1964 	}
1965 
1966 	return sk;
1967 
1968 out_free_sec:
1969 	security_sk_free(sk);
1970 out_free:
1971 	if (slab != NULL)
1972 		kmem_cache_free(slab, sk);
1973 	else
1974 		kfree(sk);
1975 	return NULL;
1976 }
1977 
1978 static void sk_prot_free(struct proto *prot, struct sock *sk)
1979 {
1980 	struct kmem_cache *slab;
1981 	struct module *owner;
1982 
1983 	owner = prot->owner;
1984 	slab = prot->slab;
1985 
1986 	cgroup_sk_free(&sk->sk_cgrp_data);
1987 	mem_cgroup_sk_free(sk);
1988 	security_sk_free(sk);
1989 	if (slab != NULL)
1990 		kmem_cache_free(slab, sk);
1991 	else
1992 		kfree(sk);
1993 	module_put(owner);
1994 }
1995 
1996 /**
1997  *	sk_alloc - All socket objects are allocated here
1998  *	@net: the applicable net namespace
1999  *	@family: protocol family
2000  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2001  *	@prot: struct proto associated with this new sock instance
2002  *	@kern: is this to be a kernel socket?
2003  */
2004 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
2005 		      struct proto *prot, int kern)
2006 {
2007 	struct sock *sk;
2008 
2009 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
2010 	if (sk) {
2011 		sk->sk_family = family;
2012 		/*
2013 		 * See comment in struct sock definition to understand
2014 		 * why we need sk_prot_creator -acme
2015 		 */
2016 		sk->sk_prot = sk->sk_prot_creator = prot;
2017 		sk->sk_kern_sock = kern;
2018 		sock_lock_init(sk);
2019 		sk->sk_net_refcnt = kern ? 0 : 1;
2020 		if (likely(sk->sk_net_refcnt)) {
2021 			get_net_track(net, &sk->ns_tracker, priority);
2022 			sock_inuse_add(net, 1);
2023 		}
2024 
2025 		sock_net_set(sk, net);
2026 		refcount_set(&sk->sk_wmem_alloc, 1);
2027 
2028 		mem_cgroup_sk_alloc(sk);
2029 		cgroup_sk_alloc(&sk->sk_cgrp_data);
2030 		sock_update_classid(&sk->sk_cgrp_data);
2031 		sock_update_netprioidx(&sk->sk_cgrp_data);
2032 		sk_tx_queue_clear(sk);
2033 	}
2034 
2035 	return sk;
2036 }
2037 EXPORT_SYMBOL(sk_alloc);
2038 
2039 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
2040  * grace period. This is the case for UDP sockets and TCP listeners.
2041  */
2042 static void __sk_destruct(struct rcu_head *head)
2043 {
2044 	struct sock *sk = container_of(head, struct sock, sk_rcu);
2045 	struct sk_filter *filter;
2046 
2047 	if (sk->sk_destruct)
2048 		sk->sk_destruct(sk);
2049 
2050 	filter = rcu_dereference_check(sk->sk_filter,
2051 				       refcount_read(&sk->sk_wmem_alloc) == 0);
2052 	if (filter) {
2053 		sk_filter_uncharge(sk, filter);
2054 		RCU_INIT_POINTER(sk->sk_filter, NULL);
2055 	}
2056 
2057 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
2058 
2059 #ifdef CONFIG_BPF_SYSCALL
2060 	bpf_sk_storage_free(sk);
2061 #endif
2062 
2063 	if (atomic_read(&sk->sk_omem_alloc))
2064 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
2065 			 __func__, atomic_read(&sk->sk_omem_alloc));
2066 
2067 	if (sk->sk_frag.page) {
2068 		put_page(sk->sk_frag.page);
2069 		sk->sk_frag.page = NULL;
2070 	}
2071 
2072 	/* We do not need to acquire sk->sk_peer_lock, we are the last user. */
2073 	put_cred(sk->sk_peer_cred);
2074 	put_pid(sk->sk_peer_pid);
2075 
2076 	if (likely(sk->sk_net_refcnt))
2077 		put_net_track(sock_net(sk), &sk->ns_tracker);
2078 	sk_prot_free(sk->sk_prot_creator, sk);
2079 }
2080 
2081 void sk_destruct(struct sock *sk)
2082 {
2083 	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
2084 
2085 	WARN_ON_ONCE(!llist_empty(&sk->defer_list));
2086 	sk_defer_free_flush(sk);
2087 
2088 	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
2089 		reuseport_detach_sock(sk);
2090 		use_call_rcu = true;
2091 	}
2092 
2093 	if (use_call_rcu)
2094 		call_rcu(&sk->sk_rcu, __sk_destruct);
2095 	else
2096 		__sk_destruct(&sk->sk_rcu);
2097 }
2098 
2099 static void __sk_free(struct sock *sk)
2100 {
2101 	if (likely(sk->sk_net_refcnt))
2102 		sock_inuse_add(sock_net(sk), -1);
2103 
2104 	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
2105 		sock_diag_broadcast_destroy(sk);
2106 	else
2107 		sk_destruct(sk);
2108 }
2109 
2110 void sk_free(struct sock *sk)
2111 {
2112 	/*
2113 	 * We subtract one from sk_wmem_alloc and can know if
2114 	 * some packets are still in some tx queue.
2115 	 * If not null, sock_wfree() will call __sk_free(sk) later
2116 	 */
2117 	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
2118 		__sk_free(sk);
2119 }
2120 EXPORT_SYMBOL(sk_free);
2121 
2122 static void sk_init_common(struct sock *sk)
2123 {
2124 	skb_queue_head_init(&sk->sk_receive_queue);
2125 	skb_queue_head_init(&sk->sk_write_queue);
2126 	skb_queue_head_init(&sk->sk_error_queue);
2127 
2128 	rwlock_init(&sk->sk_callback_lock);
2129 	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
2130 			af_rlock_keys + sk->sk_family,
2131 			af_family_rlock_key_strings[sk->sk_family]);
2132 	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
2133 			af_wlock_keys + sk->sk_family,
2134 			af_family_wlock_key_strings[sk->sk_family]);
2135 	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
2136 			af_elock_keys + sk->sk_family,
2137 			af_family_elock_key_strings[sk->sk_family]);
2138 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2139 			af_callback_keys + sk->sk_family,
2140 			af_family_clock_key_strings[sk->sk_family]);
2141 }
2142 
2143 /**
2144  *	sk_clone_lock - clone a socket, and lock its clone
2145  *	@sk: the socket to clone
2146  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
2147  *
2148  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
2149  */
2150 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2151 {
2152 	struct proto *prot = READ_ONCE(sk->sk_prot);
2153 	struct sk_filter *filter;
2154 	bool is_charged = true;
2155 	struct sock *newsk;
2156 
2157 	newsk = sk_prot_alloc(prot, priority, sk->sk_family);
2158 	if (!newsk)
2159 		goto out;
2160 
2161 	sock_copy(newsk, sk);
2162 
2163 	newsk->sk_prot_creator = prot;
2164 
2165 	/* SANITY */
2166 	if (likely(newsk->sk_net_refcnt)) {
2167 		get_net_track(sock_net(newsk), &newsk->ns_tracker, priority);
2168 		sock_inuse_add(sock_net(newsk), 1);
2169 	}
2170 	sk_node_init(&newsk->sk_node);
2171 	sock_lock_init(newsk);
2172 	bh_lock_sock(newsk);
2173 	newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
2174 	newsk->sk_backlog.len = 0;
2175 
2176 	atomic_set(&newsk->sk_rmem_alloc, 0);
2177 
2178 	/* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
2179 	refcount_set(&newsk->sk_wmem_alloc, 1);
2180 
2181 	atomic_set(&newsk->sk_omem_alloc, 0);
2182 	sk_init_common(newsk);
2183 
2184 	newsk->sk_dst_cache	= NULL;
2185 	newsk->sk_dst_pending_confirm = 0;
2186 	newsk->sk_wmem_queued	= 0;
2187 	newsk->sk_forward_alloc = 0;
2188 	newsk->sk_reserved_mem  = 0;
2189 	atomic_set(&newsk->sk_drops, 0);
2190 	newsk->sk_send_head	= NULL;
2191 	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
2192 	atomic_set(&newsk->sk_zckey, 0);
2193 
2194 	sock_reset_flag(newsk, SOCK_DONE);
2195 
2196 	/* sk->sk_memcg will be populated at accept() time */
2197 	newsk->sk_memcg = NULL;
2198 
2199 	cgroup_sk_clone(&newsk->sk_cgrp_data);
2200 
2201 	rcu_read_lock();
2202 	filter = rcu_dereference(sk->sk_filter);
2203 	if (filter != NULL)
2204 		/* though it's an empty new sock, the charging may fail
2205 		 * if sysctl_optmem_max was changed between creation of
2206 		 * original socket and cloning
2207 		 */
2208 		is_charged = sk_filter_charge(newsk, filter);
2209 	RCU_INIT_POINTER(newsk->sk_filter, filter);
2210 	rcu_read_unlock();
2211 
2212 	if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
2213 		/* We need to make sure that we don't uncharge the new
2214 		 * socket if we couldn't charge it in the first place
2215 		 * as otherwise we uncharge the parent's filter.
2216 		 */
2217 		if (!is_charged)
2218 			RCU_INIT_POINTER(newsk->sk_filter, NULL);
2219 		sk_free_unlock_clone(newsk);
2220 		newsk = NULL;
2221 		goto out;
2222 	}
2223 	RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
2224 
2225 	if (bpf_sk_storage_clone(sk, newsk)) {
2226 		sk_free_unlock_clone(newsk);
2227 		newsk = NULL;
2228 		goto out;
2229 	}
2230 
2231 	/* Clear sk_user_data if parent had the pointer tagged
2232 	 * as not suitable for copying when cloning.
2233 	 */
2234 	if (sk_user_data_is_nocopy(newsk))
2235 		newsk->sk_user_data = NULL;
2236 
2237 	newsk->sk_err	   = 0;
2238 	newsk->sk_err_soft = 0;
2239 	newsk->sk_priority = 0;
2240 	newsk->sk_incoming_cpu = raw_smp_processor_id();
2241 
2242 	/* Before updating sk_refcnt, we must commit prior changes to memory
2243 	 * (Documentation/RCU/rculist_nulls.rst for details)
2244 	 */
2245 	smp_wmb();
2246 	refcount_set(&newsk->sk_refcnt, 2);
2247 
2248 	/* Increment the counter in the same struct proto as the master
2249 	 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
2250 	 * is the same as sk->sk_prot->socks, as this field was copied
2251 	 * with memcpy).
2252 	 *
2253 	 * This _changes_ the previous behaviour, where
2254 	 * tcp_create_openreq_child always was incrementing the
2255 	 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
2256 	 * to be taken into account in all callers. -acme
2257 	 */
2258 	sk_refcnt_debug_inc(newsk);
2259 	sk_set_socket(newsk, NULL);
2260 	sk_tx_queue_clear(newsk);
2261 	RCU_INIT_POINTER(newsk->sk_wq, NULL);
2262 
2263 	if (newsk->sk_prot->sockets_allocated)
2264 		sk_sockets_allocated_inc(newsk);
2265 
2266 	if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2267 		net_enable_timestamp();
2268 out:
2269 	return newsk;
2270 }
2271 EXPORT_SYMBOL_GPL(sk_clone_lock);
2272 
2273 void sk_free_unlock_clone(struct sock *sk)
2274 {
2275 	/* It is still raw copy of parent, so invalidate
2276 	 * destructor and make plain sk_free() */
2277 	sk->sk_destruct = NULL;
2278 	bh_unlock_sock(sk);
2279 	sk_free(sk);
2280 }
2281 EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2282 
2283 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2284 {
2285 	u32 max_segs = 1;
2286 
2287 	sk_dst_set(sk, dst);
2288 	sk->sk_route_caps = dst->dev->features;
2289 	if (sk_is_tcp(sk))
2290 		sk->sk_route_caps |= NETIF_F_GSO;
2291 	if (sk->sk_route_caps & NETIF_F_GSO)
2292 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2293 	if (unlikely(sk->sk_gso_disabled))
2294 		sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2295 	if (sk_can_gso(sk)) {
2296 		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2297 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2298 		} else {
2299 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2300 			/* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
2301 			sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
2302 			sk->sk_gso_max_size -= (MAX_TCP_HEADER + 1);
2303 			/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
2304 			max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
2305 		}
2306 	}
2307 	sk->sk_gso_max_segs = max_segs;
2308 }
2309 EXPORT_SYMBOL_GPL(sk_setup_caps);
2310 
2311 /*
2312  *	Simple resource managers for sockets.
2313  */
2314 
2315 
2316 /*
2317  * Write buffer destructor automatically called from kfree_skb.
2318  */
2319 void sock_wfree(struct sk_buff *skb)
2320 {
2321 	struct sock *sk = skb->sk;
2322 	unsigned int len = skb->truesize;
2323 
2324 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2325 		/*
2326 		 * Keep a reference on sk_wmem_alloc, this will be released
2327 		 * after sk_write_space() call
2328 		 */
2329 		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2330 		sk->sk_write_space(sk);
2331 		len = 1;
2332 	}
2333 	/*
2334 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2335 	 * could not do because of in-flight packets
2336 	 */
2337 	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2338 		__sk_free(sk);
2339 }
2340 EXPORT_SYMBOL(sock_wfree);
2341 
2342 /* This variant of sock_wfree() is used by TCP,
2343  * since it sets SOCK_USE_WRITE_QUEUE.
2344  */
2345 void __sock_wfree(struct sk_buff *skb)
2346 {
2347 	struct sock *sk = skb->sk;
2348 
2349 	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2350 		__sk_free(sk);
2351 }
2352 
2353 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2354 {
2355 	skb_orphan(skb);
2356 	skb->sk = sk;
2357 #ifdef CONFIG_INET
2358 	if (unlikely(!sk_fullsock(sk))) {
2359 		skb->destructor = sock_edemux;
2360 		sock_hold(sk);
2361 		return;
2362 	}
2363 #endif
2364 	skb->destructor = sock_wfree;
2365 	skb_set_hash_from_sk(skb, sk);
2366 	/*
2367 	 * We used to take a refcount on sk, but following operation
2368 	 * is enough to guarantee sk_free() wont free this sock until
2369 	 * all in-flight packets are completed
2370 	 */
2371 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2372 }
2373 EXPORT_SYMBOL(skb_set_owner_w);
2374 
2375 static bool can_skb_orphan_partial(const struct sk_buff *skb)
2376 {
2377 #ifdef CONFIG_TLS_DEVICE
2378 	/* Drivers depend on in-order delivery for crypto offload,
2379 	 * partial orphan breaks out-of-order-OK logic.
2380 	 */
2381 	if (skb->decrypted)
2382 		return false;
2383 #endif
2384 	return (skb->destructor == sock_wfree ||
2385 		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2386 }
2387 
2388 /* This helper is used by netem, as it can hold packets in its
2389  * delay queue. We want to allow the owner socket to send more
2390  * packets, as if they were already TX completed by a typical driver.
2391  * But we also want to keep skb->sk set because some packet schedulers
2392  * rely on it (sch_fq for example).
2393  */
2394 void skb_orphan_partial(struct sk_buff *skb)
2395 {
2396 	if (skb_is_tcp_pure_ack(skb))
2397 		return;
2398 
2399 	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2400 		return;
2401 
2402 	skb_orphan(skb);
2403 }
2404 EXPORT_SYMBOL(skb_orphan_partial);
2405 
2406 /*
2407  * Read buffer destructor automatically called from kfree_skb.
2408  */
2409 void sock_rfree(struct sk_buff *skb)
2410 {
2411 	struct sock *sk = skb->sk;
2412 	unsigned int len = skb->truesize;
2413 
2414 	atomic_sub(len, &sk->sk_rmem_alloc);
2415 	sk_mem_uncharge(sk, len);
2416 }
2417 EXPORT_SYMBOL(sock_rfree);
2418 
2419 /*
2420  * Buffer destructor for skbs that are not used directly in read or write
2421  * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2422  */
2423 void sock_efree(struct sk_buff *skb)
2424 {
2425 	sock_put(skb->sk);
2426 }
2427 EXPORT_SYMBOL(sock_efree);
2428 
2429 /* Buffer destructor for prefetch/receive path where reference count may
2430  * not be held, e.g. for listen sockets.
2431  */
2432 #ifdef CONFIG_INET
2433 void sock_pfree(struct sk_buff *skb)
2434 {
2435 	if (sk_is_refcounted(skb->sk))
2436 		sock_gen_put(skb->sk);
2437 }
2438 EXPORT_SYMBOL(sock_pfree);
2439 #endif /* CONFIG_INET */
2440 
2441 kuid_t sock_i_uid(struct sock *sk)
2442 {
2443 	kuid_t uid;
2444 
2445 	read_lock_bh(&sk->sk_callback_lock);
2446 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2447 	read_unlock_bh(&sk->sk_callback_lock);
2448 	return uid;
2449 }
2450 EXPORT_SYMBOL(sock_i_uid);
2451 
2452 unsigned long sock_i_ino(struct sock *sk)
2453 {
2454 	unsigned long ino;
2455 
2456 	read_lock_bh(&sk->sk_callback_lock);
2457 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2458 	read_unlock_bh(&sk->sk_callback_lock);
2459 	return ino;
2460 }
2461 EXPORT_SYMBOL(sock_i_ino);
2462 
2463 /*
2464  * Allocate a skb from the socket's send buffer.
2465  */
2466 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2467 			     gfp_t priority)
2468 {
2469 	if (force ||
2470 	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2471 		struct sk_buff *skb = alloc_skb(size, priority);
2472 
2473 		if (skb) {
2474 			skb_set_owner_w(skb, sk);
2475 			return skb;
2476 		}
2477 	}
2478 	return NULL;
2479 }
2480 EXPORT_SYMBOL(sock_wmalloc);
2481 
2482 static void sock_ofree(struct sk_buff *skb)
2483 {
2484 	struct sock *sk = skb->sk;
2485 
2486 	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2487 }
2488 
2489 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2490 			     gfp_t priority)
2491 {
2492 	struct sk_buff *skb;
2493 
2494 	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2495 	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2496 	    sysctl_optmem_max)
2497 		return NULL;
2498 
2499 	skb = alloc_skb(size, priority);
2500 	if (!skb)
2501 		return NULL;
2502 
2503 	atomic_add(skb->truesize, &sk->sk_omem_alloc);
2504 	skb->sk = sk;
2505 	skb->destructor = sock_ofree;
2506 	return skb;
2507 }
2508 
2509 /*
2510  * Allocate a memory block from the socket's option memory buffer.
2511  */
2512 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2513 {
2514 	if ((unsigned int)size <= sysctl_optmem_max &&
2515 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2516 		void *mem;
2517 		/* First do the add, to avoid the race if kmalloc
2518 		 * might sleep.
2519 		 */
2520 		atomic_add(size, &sk->sk_omem_alloc);
2521 		mem = kmalloc(size, priority);
2522 		if (mem)
2523 			return mem;
2524 		atomic_sub(size, &sk->sk_omem_alloc);
2525 	}
2526 	return NULL;
2527 }
2528 EXPORT_SYMBOL(sock_kmalloc);
2529 
2530 /* Free an option memory block. Note, we actually want the inline
2531  * here as this allows gcc to detect the nullify and fold away the
2532  * condition entirely.
2533  */
2534 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2535 				  const bool nullify)
2536 {
2537 	if (WARN_ON_ONCE(!mem))
2538 		return;
2539 	if (nullify)
2540 		kfree_sensitive(mem);
2541 	else
2542 		kfree(mem);
2543 	atomic_sub(size, &sk->sk_omem_alloc);
2544 }
2545 
2546 void sock_kfree_s(struct sock *sk, void *mem, int size)
2547 {
2548 	__sock_kfree_s(sk, mem, size, false);
2549 }
2550 EXPORT_SYMBOL(sock_kfree_s);
2551 
2552 void sock_kzfree_s(struct sock *sk, void *mem, int size)
2553 {
2554 	__sock_kfree_s(sk, mem, size, true);
2555 }
2556 EXPORT_SYMBOL(sock_kzfree_s);
2557 
2558 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2559    I think, these locks should be removed for datagram sockets.
2560  */
2561 static long sock_wait_for_wmem(struct sock *sk, long timeo)
2562 {
2563 	DEFINE_WAIT(wait);
2564 
2565 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2566 	for (;;) {
2567 		if (!timeo)
2568 			break;
2569 		if (signal_pending(current))
2570 			break;
2571 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2572 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2573 		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2574 			break;
2575 		if (sk->sk_shutdown & SEND_SHUTDOWN)
2576 			break;
2577 		if (sk->sk_err)
2578 			break;
2579 		timeo = schedule_timeout(timeo);
2580 	}
2581 	finish_wait(sk_sleep(sk), &wait);
2582 	return timeo;
2583 }
2584 
2585 
2586 /*
2587  *	Generic send/receive buffer handlers
2588  */
2589 
2590 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2591 				     unsigned long data_len, int noblock,
2592 				     int *errcode, int max_page_order)
2593 {
2594 	struct sk_buff *skb;
2595 	long timeo;
2596 	int err;
2597 
2598 	timeo = sock_sndtimeo(sk, noblock);
2599 	for (;;) {
2600 		err = sock_error(sk);
2601 		if (err != 0)
2602 			goto failure;
2603 
2604 		err = -EPIPE;
2605 		if (sk->sk_shutdown & SEND_SHUTDOWN)
2606 			goto failure;
2607 
2608 		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2609 			break;
2610 
2611 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2612 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2613 		err = -EAGAIN;
2614 		if (!timeo)
2615 			goto failure;
2616 		if (signal_pending(current))
2617 			goto interrupted;
2618 		timeo = sock_wait_for_wmem(sk, timeo);
2619 	}
2620 	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2621 				   errcode, sk->sk_allocation);
2622 	if (skb)
2623 		skb_set_owner_w(skb, sk);
2624 	return skb;
2625 
2626 interrupted:
2627 	err = sock_intr_errno(timeo);
2628 failure:
2629 	*errcode = err;
2630 	return NULL;
2631 }
2632 EXPORT_SYMBOL(sock_alloc_send_pskb);
2633 
2634 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2635 				    int noblock, int *errcode)
2636 {
2637 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2638 }
2639 EXPORT_SYMBOL(sock_alloc_send_skb);
2640 
2641 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2642 		     struct sockcm_cookie *sockc)
2643 {
2644 	u32 tsflags;
2645 
2646 	switch (cmsg->cmsg_type) {
2647 	case SO_MARK:
2648 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
2649 		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2650 			return -EPERM;
2651 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2652 			return -EINVAL;
2653 		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2654 		break;
2655 	case SO_TIMESTAMPING_OLD:
2656 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2657 			return -EINVAL;
2658 
2659 		tsflags = *(u32 *)CMSG_DATA(cmsg);
2660 		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2661 			return -EINVAL;
2662 
2663 		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2664 		sockc->tsflags |= tsflags;
2665 		break;
2666 	case SCM_TXTIME:
2667 		if (!sock_flag(sk, SOCK_TXTIME))
2668 			return -EINVAL;
2669 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2670 			return -EINVAL;
2671 		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2672 		break;
2673 	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2674 	case SCM_RIGHTS:
2675 	case SCM_CREDENTIALS:
2676 		break;
2677 	default:
2678 		return -EINVAL;
2679 	}
2680 	return 0;
2681 }
2682 EXPORT_SYMBOL(__sock_cmsg_send);
2683 
2684 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2685 		   struct sockcm_cookie *sockc)
2686 {
2687 	struct cmsghdr *cmsg;
2688 	int ret;
2689 
2690 	for_each_cmsghdr(cmsg, msg) {
2691 		if (!CMSG_OK(msg, cmsg))
2692 			return -EINVAL;
2693 		if (cmsg->cmsg_level != SOL_SOCKET)
2694 			continue;
2695 		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2696 		if (ret)
2697 			return ret;
2698 	}
2699 	return 0;
2700 }
2701 EXPORT_SYMBOL(sock_cmsg_send);
2702 
2703 static void sk_enter_memory_pressure(struct sock *sk)
2704 {
2705 	if (!sk->sk_prot->enter_memory_pressure)
2706 		return;
2707 
2708 	sk->sk_prot->enter_memory_pressure(sk);
2709 }
2710 
2711 static void sk_leave_memory_pressure(struct sock *sk)
2712 {
2713 	if (sk->sk_prot->leave_memory_pressure) {
2714 		sk->sk_prot->leave_memory_pressure(sk);
2715 	} else {
2716 		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2717 
2718 		if (memory_pressure && READ_ONCE(*memory_pressure))
2719 			WRITE_ONCE(*memory_pressure, 0);
2720 	}
2721 }
2722 
2723 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2724 
2725 /**
2726  * skb_page_frag_refill - check that a page_frag contains enough room
2727  * @sz: minimum size of the fragment we want to get
2728  * @pfrag: pointer to page_frag
2729  * @gfp: priority for memory allocation
2730  *
2731  * Note: While this allocator tries to use high order pages, there is
2732  * no guarantee that allocations succeed. Therefore, @sz MUST be
2733  * less or equal than PAGE_SIZE.
2734  */
2735 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2736 {
2737 	if (pfrag->page) {
2738 		if (page_ref_count(pfrag->page) == 1) {
2739 			pfrag->offset = 0;
2740 			return true;
2741 		}
2742 		if (pfrag->offset + sz <= pfrag->size)
2743 			return true;
2744 		put_page(pfrag->page);
2745 	}
2746 
2747 	pfrag->offset = 0;
2748 	if (SKB_FRAG_PAGE_ORDER &&
2749 	    !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
2750 		/* Avoid direct reclaim but allow kswapd to wake */
2751 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2752 					  __GFP_COMP | __GFP_NOWARN |
2753 					  __GFP_NORETRY,
2754 					  SKB_FRAG_PAGE_ORDER);
2755 		if (likely(pfrag->page)) {
2756 			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2757 			return true;
2758 		}
2759 	}
2760 	pfrag->page = alloc_page(gfp);
2761 	if (likely(pfrag->page)) {
2762 		pfrag->size = PAGE_SIZE;
2763 		return true;
2764 	}
2765 	return false;
2766 }
2767 EXPORT_SYMBOL(skb_page_frag_refill);
2768 
2769 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2770 {
2771 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2772 		return true;
2773 
2774 	sk_enter_memory_pressure(sk);
2775 	sk_stream_moderate_sndbuf(sk);
2776 	return false;
2777 }
2778 EXPORT_SYMBOL(sk_page_frag_refill);
2779 
2780 void __lock_sock(struct sock *sk)
2781 	__releases(&sk->sk_lock.slock)
2782 	__acquires(&sk->sk_lock.slock)
2783 {
2784 	DEFINE_WAIT(wait);
2785 
2786 	for (;;) {
2787 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2788 					TASK_UNINTERRUPTIBLE);
2789 		spin_unlock_bh(&sk->sk_lock.slock);
2790 		schedule();
2791 		spin_lock_bh(&sk->sk_lock.slock);
2792 		if (!sock_owned_by_user(sk))
2793 			break;
2794 	}
2795 	finish_wait(&sk->sk_lock.wq, &wait);
2796 }
2797 
2798 void __release_sock(struct sock *sk)
2799 	__releases(&sk->sk_lock.slock)
2800 	__acquires(&sk->sk_lock.slock)
2801 {
2802 	struct sk_buff *skb, *next;
2803 
2804 	while ((skb = sk->sk_backlog.head) != NULL) {
2805 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2806 
2807 		spin_unlock_bh(&sk->sk_lock.slock);
2808 
2809 		do {
2810 			next = skb->next;
2811 			prefetch(next);
2812 			WARN_ON_ONCE(skb_dst_is_noref(skb));
2813 			skb_mark_not_on_list(skb);
2814 			sk_backlog_rcv(sk, skb);
2815 
2816 			cond_resched();
2817 
2818 			skb = next;
2819 		} while (skb != NULL);
2820 
2821 		spin_lock_bh(&sk->sk_lock.slock);
2822 	}
2823 
2824 	/*
2825 	 * Doing the zeroing here guarantee we can not loop forever
2826 	 * while a wild producer attempts to flood us.
2827 	 */
2828 	sk->sk_backlog.len = 0;
2829 }
2830 
2831 void __sk_flush_backlog(struct sock *sk)
2832 {
2833 	spin_lock_bh(&sk->sk_lock.slock);
2834 	__release_sock(sk);
2835 	spin_unlock_bh(&sk->sk_lock.slock);
2836 }
2837 
2838 /**
2839  * sk_wait_data - wait for data to arrive at sk_receive_queue
2840  * @sk:    sock to wait on
2841  * @timeo: for how long
2842  * @skb:   last skb seen on sk_receive_queue
2843  *
2844  * Now socket state including sk->sk_err is changed only under lock,
2845  * hence we may omit checks after joining wait queue.
2846  * We check receive queue before schedule() only as optimization;
2847  * it is very likely that release_sock() added new data.
2848  */
2849 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2850 {
2851 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2852 	int rc;
2853 
2854 	add_wait_queue(sk_sleep(sk), &wait);
2855 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2856 	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2857 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2858 	remove_wait_queue(sk_sleep(sk), &wait);
2859 	return rc;
2860 }
2861 EXPORT_SYMBOL(sk_wait_data);
2862 
2863 /**
2864  *	__sk_mem_raise_allocated - increase memory_allocated
2865  *	@sk: socket
2866  *	@size: memory size to allocate
2867  *	@amt: pages to allocate
2868  *	@kind: allocation type
2869  *
2870  *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2871  */
2872 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2873 {
2874 	struct proto *prot = sk->sk_prot;
2875 	long allocated = sk_memory_allocated_add(sk, amt);
2876 	bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
2877 	bool charged = true;
2878 
2879 	if (memcg_charge &&
2880 	    !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
2881 						gfp_memcg_charge())))
2882 		goto suppress_allocation;
2883 
2884 	/* Under limit. */
2885 	if (allocated <= sk_prot_mem_limits(sk, 0)) {
2886 		sk_leave_memory_pressure(sk);
2887 		return 1;
2888 	}
2889 
2890 	/* Under pressure. */
2891 	if (allocated > sk_prot_mem_limits(sk, 1))
2892 		sk_enter_memory_pressure(sk);
2893 
2894 	/* Over hard limit. */
2895 	if (allocated > sk_prot_mem_limits(sk, 2))
2896 		goto suppress_allocation;
2897 
2898 	/* guarantee minimum buffer size under pressure */
2899 	if (kind == SK_MEM_RECV) {
2900 		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2901 			return 1;
2902 
2903 	} else { /* SK_MEM_SEND */
2904 		int wmem0 = sk_get_wmem0(sk, prot);
2905 
2906 		if (sk->sk_type == SOCK_STREAM) {
2907 			if (sk->sk_wmem_queued < wmem0)
2908 				return 1;
2909 		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2910 				return 1;
2911 		}
2912 	}
2913 
2914 	if (sk_has_memory_pressure(sk)) {
2915 		u64 alloc;
2916 
2917 		if (!sk_under_memory_pressure(sk))
2918 			return 1;
2919 		alloc = sk_sockets_allocated_read_positive(sk);
2920 		if (sk_prot_mem_limits(sk, 2) > alloc *
2921 		    sk_mem_pages(sk->sk_wmem_queued +
2922 				 atomic_read(&sk->sk_rmem_alloc) +
2923 				 sk->sk_forward_alloc))
2924 			return 1;
2925 	}
2926 
2927 suppress_allocation:
2928 
2929 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2930 		sk_stream_moderate_sndbuf(sk);
2931 
2932 		/* Fail only if socket is _under_ its sndbuf.
2933 		 * In this case we cannot block, so that we have to fail.
2934 		 */
2935 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
2936 			/* Force charge with __GFP_NOFAIL */
2937 			if (memcg_charge && !charged) {
2938 				mem_cgroup_charge_skmem(sk->sk_memcg, amt,
2939 					gfp_memcg_charge() | __GFP_NOFAIL);
2940 			}
2941 			return 1;
2942 		}
2943 	}
2944 
2945 	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2946 		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
2947 
2948 	sk_memory_allocated_sub(sk, amt);
2949 
2950 	if (memcg_charge && charged)
2951 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2952 
2953 	return 0;
2954 }
2955 EXPORT_SYMBOL(__sk_mem_raise_allocated);
2956 
2957 /**
2958  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2959  *	@sk: socket
2960  *	@size: memory size to allocate
2961  *	@kind: allocation type
2962  *
2963  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2964  *	rmem allocation. This function assumes that protocols which have
2965  *	memory_pressure use sk_wmem_queued as write buffer accounting.
2966  */
2967 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2968 {
2969 	int ret, amt = sk_mem_pages(size);
2970 
2971 	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2972 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2973 	if (!ret)
2974 		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2975 	return ret;
2976 }
2977 EXPORT_SYMBOL(__sk_mem_schedule);
2978 
2979 /**
2980  *	__sk_mem_reduce_allocated - reclaim memory_allocated
2981  *	@sk: socket
2982  *	@amount: number of quanta
2983  *
2984  *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2985  */
2986 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2987 {
2988 	sk_memory_allocated_sub(sk, amount);
2989 
2990 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2991 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2992 
2993 	if (sk_under_memory_pressure(sk) &&
2994 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2995 		sk_leave_memory_pressure(sk);
2996 }
2997 EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2998 
2999 /**
3000  *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
3001  *	@sk: socket
3002  *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
3003  */
3004 void __sk_mem_reclaim(struct sock *sk, int amount)
3005 {
3006 	amount >>= SK_MEM_QUANTUM_SHIFT;
3007 	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
3008 	__sk_mem_reduce_allocated(sk, amount);
3009 }
3010 EXPORT_SYMBOL(__sk_mem_reclaim);
3011 
3012 int sk_set_peek_off(struct sock *sk, int val)
3013 {
3014 	sk->sk_peek_off = val;
3015 	return 0;
3016 }
3017 EXPORT_SYMBOL_GPL(sk_set_peek_off);
3018 
3019 /*
3020  * Set of default routines for initialising struct proto_ops when
3021  * the protocol does not support a particular function. In certain
3022  * cases where it makes no sense for a protocol to have a "do nothing"
3023  * function, some default processing is provided.
3024  */
3025 
3026 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
3027 {
3028 	return -EOPNOTSUPP;
3029 }
3030 EXPORT_SYMBOL(sock_no_bind);
3031 
3032 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
3033 		    int len, int flags)
3034 {
3035 	return -EOPNOTSUPP;
3036 }
3037 EXPORT_SYMBOL(sock_no_connect);
3038 
3039 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
3040 {
3041 	return -EOPNOTSUPP;
3042 }
3043 EXPORT_SYMBOL(sock_no_socketpair);
3044 
3045 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
3046 		   bool kern)
3047 {
3048 	return -EOPNOTSUPP;
3049 }
3050 EXPORT_SYMBOL(sock_no_accept);
3051 
3052 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
3053 		    int peer)
3054 {
3055 	return -EOPNOTSUPP;
3056 }
3057 EXPORT_SYMBOL(sock_no_getname);
3058 
3059 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3060 {
3061 	return -EOPNOTSUPP;
3062 }
3063 EXPORT_SYMBOL(sock_no_ioctl);
3064 
3065 int sock_no_listen(struct socket *sock, int backlog)
3066 {
3067 	return -EOPNOTSUPP;
3068 }
3069 EXPORT_SYMBOL(sock_no_listen);
3070 
3071 int sock_no_shutdown(struct socket *sock, int how)
3072 {
3073 	return -EOPNOTSUPP;
3074 }
3075 EXPORT_SYMBOL(sock_no_shutdown);
3076 
3077 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
3078 {
3079 	return -EOPNOTSUPP;
3080 }
3081 EXPORT_SYMBOL(sock_no_sendmsg);
3082 
3083 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
3084 {
3085 	return -EOPNOTSUPP;
3086 }
3087 EXPORT_SYMBOL(sock_no_sendmsg_locked);
3088 
3089 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
3090 		    int flags)
3091 {
3092 	return -EOPNOTSUPP;
3093 }
3094 EXPORT_SYMBOL(sock_no_recvmsg);
3095 
3096 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
3097 {
3098 	/* Mirror missing mmap method error code */
3099 	return -ENODEV;
3100 }
3101 EXPORT_SYMBOL(sock_no_mmap);
3102 
3103 /*
3104  * When a file is received (via SCM_RIGHTS, etc), we must bump the
3105  * various sock-based usage counts.
3106  */
3107 void __receive_sock(struct file *file)
3108 {
3109 	struct socket *sock;
3110 
3111 	sock = sock_from_file(file);
3112 	if (sock) {
3113 		sock_update_netprioidx(&sock->sk->sk_cgrp_data);
3114 		sock_update_classid(&sock->sk->sk_cgrp_data);
3115 	}
3116 }
3117 
3118 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
3119 {
3120 	ssize_t res;
3121 	struct msghdr msg = {.msg_flags = flags};
3122 	struct kvec iov;
3123 	char *kaddr = kmap(page);
3124 	iov.iov_base = kaddr + offset;
3125 	iov.iov_len = size;
3126 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
3127 	kunmap(page);
3128 	return res;
3129 }
3130 EXPORT_SYMBOL(sock_no_sendpage);
3131 
3132 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
3133 				int offset, size_t size, int flags)
3134 {
3135 	ssize_t res;
3136 	struct msghdr msg = {.msg_flags = flags};
3137 	struct kvec iov;
3138 	char *kaddr = kmap(page);
3139 
3140 	iov.iov_base = kaddr + offset;
3141 	iov.iov_len = size;
3142 	res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
3143 	kunmap(page);
3144 	return res;
3145 }
3146 EXPORT_SYMBOL(sock_no_sendpage_locked);
3147 
3148 /*
3149  *	Default Socket Callbacks
3150  */
3151 
3152 static void sock_def_wakeup(struct sock *sk)
3153 {
3154 	struct socket_wq *wq;
3155 
3156 	rcu_read_lock();
3157 	wq = rcu_dereference(sk->sk_wq);
3158 	if (skwq_has_sleeper(wq))
3159 		wake_up_interruptible_all(&wq->wait);
3160 	rcu_read_unlock();
3161 }
3162 
3163 static void sock_def_error_report(struct sock *sk)
3164 {
3165 	struct socket_wq *wq;
3166 
3167 	rcu_read_lock();
3168 	wq = rcu_dereference(sk->sk_wq);
3169 	if (skwq_has_sleeper(wq))
3170 		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
3171 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
3172 	rcu_read_unlock();
3173 }
3174 
3175 void sock_def_readable(struct sock *sk)
3176 {
3177 	struct socket_wq *wq;
3178 
3179 	rcu_read_lock();
3180 	wq = rcu_dereference(sk->sk_wq);
3181 	if (skwq_has_sleeper(wq))
3182 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
3183 						EPOLLRDNORM | EPOLLRDBAND);
3184 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3185 	rcu_read_unlock();
3186 }
3187 
3188 static void sock_def_write_space(struct sock *sk)
3189 {
3190 	struct socket_wq *wq;
3191 
3192 	rcu_read_lock();
3193 
3194 	/* Do not wake up a writer until he can make "significant"
3195 	 * progress.  --DaveM
3196 	 */
3197 	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
3198 		wq = rcu_dereference(sk->sk_wq);
3199 		if (skwq_has_sleeper(wq))
3200 			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
3201 						EPOLLWRNORM | EPOLLWRBAND);
3202 
3203 		/* Should agree with poll, otherwise some programs break */
3204 		if (sock_writeable(sk))
3205 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
3206 	}
3207 
3208 	rcu_read_unlock();
3209 }
3210 
3211 static void sock_def_destruct(struct sock *sk)
3212 {
3213 }
3214 
3215 void sk_send_sigurg(struct sock *sk)
3216 {
3217 	if (sk->sk_socket && sk->sk_socket->file)
3218 		if (send_sigurg(&sk->sk_socket->file->f_owner))
3219 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
3220 }
3221 EXPORT_SYMBOL(sk_send_sigurg);
3222 
3223 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
3224 		    unsigned long expires)
3225 {
3226 	if (!mod_timer(timer, expires))
3227 		sock_hold(sk);
3228 }
3229 EXPORT_SYMBOL(sk_reset_timer);
3230 
3231 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
3232 {
3233 	if (del_timer(timer))
3234 		__sock_put(sk);
3235 }
3236 EXPORT_SYMBOL(sk_stop_timer);
3237 
3238 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
3239 {
3240 	if (del_timer_sync(timer))
3241 		__sock_put(sk);
3242 }
3243 EXPORT_SYMBOL(sk_stop_timer_sync);
3244 
3245 void sock_init_data(struct socket *sock, struct sock *sk)
3246 {
3247 	sk_init_common(sk);
3248 	sk->sk_send_head	=	NULL;
3249 
3250 	timer_setup(&sk->sk_timer, NULL, 0);
3251 
3252 	sk->sk_allocation	=	GFP_KERNEL;
3253 	sk->sk_rcvbuf		=	sysctl_rmem_default;
3254 	sk->sk_sndbuf		=	sysctl_wmem_default;
3255 	sk->sk_state		=	TCP_CLOSE;
3256 	sk_set_socket(sk, sock);
3257 
3258 	sock_set_flag(sk, SOCK_ZAPPED);
3259 
3260 	if (sock) {
3261 		sk->sk_type	=	sock->type;
3262 		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
3263 		sock->sk	=	sk;
3264 		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
3265 	} else {
3266 		RCU_INIT_POINTER(sk->sk_wq, NULL);
3267 		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
3268 	}
3269 
3270 	rwlock_init(&sk->sk_callback_lock);
3271 	if (sk->sk_kern_sock)
3272 		lockdep_set_class_and_name(
3273 			&sk->sk_callback_lock,
3274 			af_kern_callback_keys + sk->sk_family,
3275 			af_family_kern_clock_key_strings[sk->sk_family]);
3276 	else
3277 		lockdep_set_class_and_name(
3278 			&sk->sk_callback_lock,
3279 			af_callback_keys + sk->sk_family,
3280 			af_family_clock_key_strings[sk->sk_family]);
3281 
3282 	sk->sk_state_change	=	sock_def_wakeup;
3283 	sk->sk_data_ready	=	sock_def_readable;
3284 	sk->sk_write_space	=	sock_def_write_space;
3285 	sk->sk_error_report	=	sock_def_error_report;
3286 	sk->sk_destruct		=	sock_def_destruct;
3287 
3288 	sk->sk_frag.page	=	NULL;
3289 	sk->sk_frag.offset	=	0;
3290 	sk->sk_peek_off		=	-1;
3291 
3292 	sk->sk_peer_pid 	=	NULL;
3293 	sk->sk_peer_cred	=	NULL;
3294 	spin_lock_init(&sk->sk_peer_lock);
3295 
3296 	sk->sk_write_pending	=	0;
3297 	sk->sk_rcvlowat		=	1;
3298 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
3299 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
3300 
3301 	sk->sk_stamp = SK_DEFAULT_STAMP;
3302 #if BITS_PER_LONG==32
3303 	seqlock_init(&sk->sk_stamp_seq);
3304 #endif
3305 	atomic_set(&sk->sk_zckey, 0);
3306 
3307 #ifdef CONFIG_NET_RX_BUSY_POLL
3308 	sk->sk_napi_id		=	0;
3309 	sk->sk_ll_usec		=	sysctl_net_busy_read;
3310 #endif
3311 
3312 	sk->sk_max_pacing_rate = ~0UL;
3313 	sk->sk_pacing_rate = ~0UL;
3314 	WRITE_ONCE(sk->sk_pacing_shift, 10);
3315 	sk->sk_incoming_cpu = -1;
3316 	sk->sk_txrehash = SOCK_TXREHASH_DEFAULT;
3317 
3318 	sk_rx_queue_clear(sk);
3319 	/*
3320 	 * Before updating sk_refcnt, we must commit prior changes to memory
3321 	 * (Documentation/RCU/rculist_nulls.rst for details)
3322 	 */
3323 	smp_wmb();
3324 	refcount_set(&sk->sk_refcnt, 1);
3325 	atomic_set(&sk->sk_drops, 0);
3326 }
3327 EXPORT_SYMBOL(sock_init_data);
3328 
3329 void lock_sock_nested(struct sock *sk, int subclass)
3330 {
3331 	/* The sk_lock has mutex_lock() semantics here. */
3332 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3333 
3334 	might_sleep();
3335 	spin_lock_bh(&sk->sk_lock.slock);
3336 	if (sock_owned_by_user_nocheck(sk))
3337 		__lock_sock(sk);
3338 	sk->sk_lock.owned = 1;
3339 	spin_unlock_bh(&sk->sk_lock.slock);
3340 }
3341 EXPORT_SYMBOL(lock_sock_nested);
3342 
3343 void release_sock(struct sock *sk)
3344 {
3345 	spin_lock_bh(&sk->sk_lock.slock);
3346 	if (sk->sk_backlog.tail)
3347 		__release_sock(sk);
3348 
3349 	/* Warning : release_cb() might need to release sk ownership,
3350 	 * ie call sock_release_ownership(sk) before us.
3351 	 */
3352 	if (sk->sk_prot->release_cb)
3353 		sk->sk_prot->release_cb(sk);
3354 
3355 	sock_release_ownership(sk);
3356 	if (waitqueue_active(&sk->sk_lock.wq))
3357 		wake_up(&sk->sk_lock.wq);
3358 	spin_unlock_bh(&sk->sk_lock.slock);
3359 }
3360 EXPORT_SYMBOL(release_sock);
3361 
3362 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
3363 {
3364 	might_sleep();
3365 	spin_lock_bh(&sk->sk_lock.slock);
3366 
3367 	if (!sock_owned_by_user_nocheck(sk)) {
3368 		/*
3369 		 * Fast path return with bottom halves disabled and
3370 		 * sock::sk_lock.slock held.
3371 		 *
3372 		 * The 'mutex' is not contended and holding
3373 		 * sock::sk_lock.slock prevents all other lockers to
3374 		 * proceed so the corresponding unlock_sock_fast() can
3375 		 * avoid the slow path of release_sock() completely and
3376 		 * just release slock.
3377 		 *
3378 		 * From a semantical POV this is equivalent to 'acquiring'
3379 		 * the 'mutex', hence the corresponding lockdep
3380 		 * mutex_release() has to happen in the fast path of
3381 		 * unlock_sock_fast().
3382 		 */
3383 		return false;
3384 	}
3385 
3386 	__lock_sock(sk);
3387 	sk->sk_lock.owned = 1;
3388 	__acquire(&sk->sk_lock.slock);
3389 	spin_unlock_bh(&sk->sk_lock.slock);
3390 	return true;
3391 }
3392 EXPORT_SYMBOL(__lock_sock_fast);
3393 
3394 int sock_gettstamp(struct socket *sock, void __user *userstamp,
3395 		   bool timeval, bool time32)
3396 {
3397 	struct sock *sk = sock->sk;
3398 	struct timespec64 ts;
3399 
3400 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3401 	ts = ktime_to_timespec64(sock_read_timestamp(sk));
3402 	if (ts.tv_sec == -1)
3403 		return -ENOENT;
3404 	if (ts.tv_sec == 0) {
3405 		ktime_t kt = ktime_get_real();
3406 		sock_write_timestamp(sk, kt);
3407 		ts = ktime_to_timespec64(kt);
3408 	}
3409 
3410 	if (timeval)
3411 		ts.tv_nsec /= 1000;
3412 
3413 #ifdef CONFIG_COMPAT_32BIT_TIME
3414 	if (time32)
3415 		return put_old_timespec32(&ts, userstamp);
3416 #endif
3417 #ifdef CONFIG_SPARC64
3418 	/* beware of padding in sparc64 timeval */
3419 	if (timeval && !in_compat_syscall()) {
3420 		struct __kernel_old_timeval __user tv = {
3421 			.tv_sec = ts.tv_sec,
3422 			.tv_usec = ts.tv_nsec,
3423 		};
3424 		if (copy_to_user(userstamp, &tv, sizeof(tv)))
3425 			return -EFAULT;
3426 		return 0;
3427 	}
3428 #endif
3429 	return put_timespec64(&ts, userstamp);
3430 }
3431 EXPORT_SYMBOL(sock_gettstamp);
3432 
3433 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3434 {
3435 	if (!sock_flag(sk, flag)) {
3436 		unsigned long previous_flags = sk->sk_flags;
3437 
3438 		sock_set_flag(sk, flag);
3439 		/*
3440 		 * we just set one of the two flags which require net
3441 		 * time stamping, but time stamping might have been on
3442 		 * already because of the other one
3443 		 */
3444 		if (sock_needs_netstamp(sk) &&
3445 		    !(previous_flags & SK_FLAGS_TIMESTAMP))
3446 			net_enable_timestamp();
3447 	}
3448 }
3449 
3450 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3451 		       int level, int type)
3452 {
3453 	struct sock_exterr_skb *serr;
3454 	struct sk_buff *skb;
3455 	int copied, err;
3456 
3457 	err = -EAGAIN;
3458 	skb = sock_dequeue_err_skb(sk);
3459 	if (skb == NULL)
3460 		goto out;
3461 
3462 	copied = skb->len;
3463 	if (copied > len) {
3464 		msg->msg_flags |= MSG_TRUNC;
3465 		copied = len;
3466 	}
3467 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3468 	if (err)
3469 		goto out_free_skb;
3470 
3471 	sock_recv_timestamp(msg, sk, skb);
3472 
3473 	serr = SKB_EXT_ERR(skb);
3474 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3475 
3476 	msg->msg_flags |= MSG_ERRQUEUE;
3477 	err = copied;
3478 
3479 out_free_skb:
3480 	kfree_skb(skb);
3481 out:
3482 	return err;
3483 }
3484 EXPORT_SYMBOL(sock_recv_errqueue);
3485 
3486 /*
3487  *	Get a socket option on an socket.
3488  *
3489  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
3490  *	asynchronous errors should be reported by getsockopt. We assume
3491  *	this means if you specify SO_ERROR (otherwise whats the point of it).
3492  */
3493 int sock_common_getsockopt(struct socket *sock, int level, int optname,
3494 			   char __user *optval, int __user *optlen)
3495 {
3496 	struct sock *sk = sock->sk;
3497 
3498 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
3499 }
3500 EXPORT_SYMBOL(sock_common_getsockopt);
3501 
3502 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3503 			int flags)
3504 {
3505 	struct sock *sk = sock->sk;
3506 	int addr_len = 0;
3507 	int err;
3508 
3509 	err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len);
3510 	if (err >= 0)
3511 		msg->msg_namelen = addr_len;
3512 	return err;
3513 }
3514 EXPORT_SYMBOL(sock_common_recvmsg);
3515 
3516 /*
3517  *	Set socket options on an inet socket.
3518  */
3519 int sock_common_setsockopt(struct socket *sock, int level, int optname,
3520 			   sockptr_t optval, unsigned int optlen)
3521 {
3522 	struct sock *sk = sock->sk;
3523 
3524 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
3525 }
3526 EXPORT_SYMBOL(sock_common_setsockopt);
3527 
3528 void sk_common_release(struct sock *sk)
3529 {
3530 	if (sk->sk_prot->destroy)
3531 		sk->sk_prot->destroy(sk);
3532 
3533 	/*
3534 	 * Observation: when sk_common_release is called, processes have
3535 	 * no access to socket. But net still has.
3536 	 * Step one, detach it from networking:
3537 	 *
3538 	 * A. Remove from hash tables.
3539 	 */
3540 
3541 	sk->sk_prot->unhash(sk);
3542 
3543 	/*
3544 	 * In this point socket cannot receive new packets, but it is possible
3545 	 * that some packets are in flight because some CPU runs receiver and
3546 	 * did hash table lookup before we unhashed socket. They will achieve
3547 	 * receive queue and will be purged by socket destructor.
3548 	 *
3549 	 * Also we still have packets pending on receive queue and probably,
3550 	 * our own packets waiting in device queues. sock_destroy will drain
3551 	 * receive queue, but transmitted packets will delay socket destruction
3552 	 * until the last reference will be released.
3553 	 */
3554 
3555 	sock_orphan(sk);
3556 
3557 	xfrm_sk_free_policy(sk);
3558 
3559 	sk_refcnt_debug_release(sk);
3560 
3561 	sock_put(sk);
3562 }
3563 EXPORT_SYMBOL(sk_common_release);
3564 
3565 void sk_get_meminfo(const struct sock *sk, u32 *mem)
3566 {
3567 	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3568 
3569 	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3570 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3571 	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3572 	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3573 	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3574 	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3575 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3576 	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3577 	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3578 }
3579 
3580 #ifdef CONFIG_PROC_FS
3581 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3582 
3583 int sock_prot_inuse_get(struct net *net, struct proto *prot)
3584 {
3585 	int cpu, idx = prot->inuse_idx;
3586 	int res = 0;
3587 
3588 	for_each_possible_cpu(cpu)
3589 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3590 
3591 	return res >= 0 ? res : 0;
3592 }
3593 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3594 
3595 int sock_inuse_get(struct net *net)
3596 {
3597 	int cpu, res = 0;
3598 
3599 	for_each_possible_cpu(cpu)
3600 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
3601 
3602 	return res;
3603 }
3604 
3605 EXPORT_SYMBOL_GPL(sock_inuse_get);
3606 
3607 static int __net_init sock_inuse_init_net(struct net *net)
3608 {
3609 	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3610 	if (net->core.prot_inuse == NULL)
3611 		return -ENOMEM;
3612 	return 0;
3613 }
3614 
3615 static void __net_exit sock_inuse_exit_net(struct net *net)
3616 {
3617 	free_percpu(net->core.prot_inuse);
3618 }
3619 
3620 static struct pernet_operations net_inuse_ops = {
3621 	.init = sock_inuse_init_net,
3622 	.exit = sock_inuse_exit_net,
3623 };
3624 
3625 static __init int net_inuse_init(void)
3626 {
3627 	if (register_pernet_subsys(&net_inuse_ops))
3628 		panic("Cannot initialize net inuse counters");
3629 
3630 	return 0;
3631 }
3632 
3633 core_initcall(net_inuse_init);
3634 
3635 static int assign_proto_idx(struct proto *prot)
3636 {
3637 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3638 
3639 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3640 		pr_err("PROTO_INUSE_NR exhausted\n");
3641 		return -ENOSPC;
3642 	}
3643 
3644 	set_bit(prot->inuse_idx, proto_inuse_idx);
3645 	return 0;
3646 }
3647 
3648 static void release_proto_idx(struct proto *prot)
3649 {
3650 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3651 		clear_bit(prot->inuse_idx, proto_inuse_idx);
3652 }
3653 #else
3654 static inline int assign_proto_idx(struct proto *prot)
3655 {
3656 	return 0;
3657 }
3658 
3659 static inline void release_proto_idx(struct proto *prot)
3660 {
3661 }
3662 
3663 #endif
3664 
3665 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3666 {
3667 	if (!twsk_prot)
3668 		return;
3669 	kfree(twsk_prot->twsk_slab_name);
3670 	twsk_prot->twsk_slab_name = NULL;
3671 	kmem_cache_destroy(twsk_prot->twsk_slab);
3672 	twsk_prot->twsk_slab = NULL;
3673 }
3674 
3675 static int tw_prot_init(const struct proto *prot)
3676 {
3677 	struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
3678 
3679 	if (!twsk_prot)
3680 		return 0;
3681 
3682 	twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
3683 					      prot->name);
3684 	if (!twsk_prot->twsk_slab_name)
3685 		return -ENOMEM;
3686 
3687 	twsk_prot->twsk_slab =
3688 		kmem_cache_create(twsk_prot->twsk_slab_name,
3689 				  twsk_prot->twsk_obj_size, 0,
3690 				  SLAB_ACCOUNT | prot->slab_flags,
3691 				  NULL);
3692 	if (!twsk_prot->twsk_slab) {
3693 		pr_crit("%s: Can't create timewait sock SLAB cache!\n",
3694 			prot->name);
3695 		return -ENOMEM;
3696 	}
3697 
3698 	return 0;
3699 }
3700 
3701 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3702 {
3703 	if (!rsk_prot)
3704 		return;
3705 	kfree(rsk_prot->slab_name);
3706 	rsk_prot->slab_name = NULL;
3707 	kmem_cache_destroy(rsk_prot->slab);
3708 	rsk_prot->slab = NULL;
3709 }
3710 
3711 static int req_prot_init(const struct proto *prot)
3712 {
3713 	struct request_sock_ops *rsk_prot = prot->rsk_prot;
3714 
3715 	if (!rsk_prot)
3716 		return 0;
3717 
3718 	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3719 					prot->name);
3720 	if (!rsk_prot->slab_name)
3721 		return -ENOMEM;
3722 
3723 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3724 					   rsk_prot->obj_size, 0,
3725 					   SLAB_ACCOUNT | prot->slab_flags,
3726 					   NULL);
3727 
3728 	if (!rsk_prot->slab) {
3729 		pr_crit("%s: Can't create request sock SLAB cache!\n",
3730 			prot->name);
3731 		return -ENOMEM;
3732 	}
3733 	return 0;
3734 }
3735 
3736 int proto_register(struct proto *prot, int alloc_slab)
3737 {
3738 	int ret = -ENOBUFS;
3739 
3740 	if (prot->memory_allocated && !prot->sysctl_mem) {
3741 		pr_err("%s: missing sysctl_mem\n", prot->name);
3742 		return -EINVAL;
3743 	}
3744 	if (alloc_slab) {
3745 		prot->slab = kmem_cache_create_usercopy(prot->name,
3746 					prot->obj_size, 0,
3747 					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3748 					prot->slab_flags,
3749 					prot->useroffset, prot->usersize,
3750 					NULL);
3751 
3752 		if (prot->slab == NULL) {
3753 			pr_crit("%s: Can't create sock SLAB cache!\n",
3754 				prot->name);
3755 			goto out;
3756 		}
3757 
3758 		if (req_prot_init(prot))
3759 			goto out_free_request_sock_slab;
3760 
3761 		if (tw_prot_init(prot))
3762 			goto out_free_timewait_sock_slab;
3763 	}
3764 
3765 	mutex_lock(&proto_list_mutex);
3766 	ret = assign_proto_idx(prot);
3767 	if (ret) {
3768 		mutex_unlock(&proto_list_mutex);
3769 		goto out_free_timewait_sock_slab;
3770 	}
3771 	list_add(&prot->node, &proto_list);
3772 	mutex_unlock(&proto_list_mutex);
3773 	return ret;
3774 
3775 out_free_timewait_sock_slab:
3776 	if (alloc_slab)
3777 		tw_prot_cleanup(prot->twsk_prot);
3778 out_free_request_sock_slab:
3779 	if (alloc_slab) {
3780 		req_prot_cleanup(prot->rsk_prot);
3781 
3782 		kmem_cache_destroy(prot->slab);
3783 		prot->slab = NULL;
3784 	}
3785 out:
3786 	return ret;
3787 }
3788 EXPORT_SYMBOL(proto_register);
3789 
3790 void proto_unregister(struct proto *prot)
3791 {
3792 	mutex_lock(&proto_list_mutex);
3793 	release_proto_idx(prot);
3794 	list_del(&prot->node);
3795 	mutex_unlock(&proto_list_mutex);
3796 
3797 	kmem_cache_destroy(prot->slab);
3798 	prot->slab = NULL;
3799 
3800 	req_prot_cleanup(prot->rsk_prot);
3801 	tw_prot_cleanup(prot->twsk_prot);
3802 }
3803 EXPORT_SYMBOL(proto_unregister);
3804 
3805 int sock_load_diag_module(int family, int protocol)
3806 {
3807 	if (!protocol) {
3808 		if (!sock_is_registered(family))
3809 			return -ENOENT;
3810 
3811 		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3812 				      NETLINK_SOCK_DIAG, family);
3813 	}
3814 
3815 #ifdef CONFIG_INET
3816 	if (family == AF_INET &&
3817 	    protocol != IPPROTO_RAW &&
3818 	    protocol < MAX_INET_PROTOS &&
3819 	    !rcu_access_pointer(inet_protos[protocol]))
3820 		return -ENOENT;
3821 #endif
3822 
3823 	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3824 			      NETLINK_SOCK_DIAG, family, protocol);
3825 }
3826 EXPORT_SYMBOL(sock_load_diag_module);
3827 
3828 #ifdef CONFIG_PROC_FS
3829 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3830 	__acquires(proto_list_mutex)
3831 {
3832 	mutex_lock(&proto_list_mutex);
3833 	return seq_list_start_head(&proto_list, *pos);
3834 }
3835 
3836 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3837 {
3838 	return seq_list_next(v, &proto_list, pos);
3839 }
3840 
3841 static void proto_seq_stop(struct seq_file *seq, void *v)
3842 	__releases(proto_list_mutex)
3843 {
3844 	mutex_unlock(&proto_list_mutex);
3845 }
3846 
3847 static char proto_method_implemented(const void *method)
3848 {
3849 	return method == NULL ? 'n' : 'y';
3850 }
3851 static long sock_prot_memory_allocated(struct proto *proto)
3852 {
3853 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3854 }
3855 
3856 static const char *sock_prot_memory_pressure(struct proto *proto)
3857 {
3858 	return proto->memory_pressure != NULL ?
3859 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3860 }
3861 
3862 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3863 {
3864 
3865 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
3866 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3867 		   proto->name,
3868 		   proto->obj_size,
3869 		   sock_prot_inuse_get(seq_file_net(seq), proto),
3870 		   sock_prot_memory_allocated(proto),
3871 		   sock_prot_memory_pressure(proto),
3872 		   proto->max_header,
3873 		   proto->slab == NULL ? "no" : "yes",
3874 		   module_name(proto->owner),
3875 		   proto_method_implemented(proto->close),
3876 		   proto_method_implemented(proto->connect),
3877 		   proto_method_implemented(proto->disconnect),
3878 		   proto_method_implemented(proto->accept),
3879 		   proto_method_implemented(proto->ioctl),
3880 		   proto_method_implemented(proto->init),
3881 		   proto_method_implemented(proto->destroy),
3882 		   proto_method_implemented(proto->shutdown),
3883 		   proto_method_implemented(proto->setsockopt),
3884 		   proto_method_implemented(proto->getsockopt),
3885 		   proto_method_implemented(proto->sendmsg),
3886 		   proto_method_implemented(proto->recvmsg),
3887 		   proto_method_implemented(proto->sendpage),
3888 		   proto_method_implemented(proto->bind),
3889 		   proto_method_implemented(proto->backlog_rcv),
3890 		   proto_method_implemented(proto->hash),
3891 		   proto_method_implemented(proto->unhash),
3892 		   proto_method_implemented(proto->get_port),
3893 		   proto_method_implemented(proto->enter_memory_pressure));
3894 }
3895 
3896 static int proto_seq_show(struct seq_file *seq, void *v)
3897 {
3898 	if (v == &proto_list)
3899 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3900 			   "protocol",
3901 			   "size",
3902 			   "sockets",
3903 			   "memory",
3904 			   "press",
3905 			   "maxhdr",
3906 			   "slab",
3907 			   "module",
3908 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3909 	else
3910 		proto_seq_printf(seq, list_entry(v, struct proto, node));
3911 	return 0;
3912 }
3913 
3914 static const struct seq_operations proto_seq_ops = {
3915 	.start  = proto_seq_start,
3916 	.next   = proto_seq_next,
3917 	.stop   = proto_seq_stop,
3918 	.show   = proto_seq_show,
3919 };
3920 
3921 static __net_init int proto_init_net(struct net *net)
3922 {
3923 	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3924 			sizeof(struct seq_net_private)))
3925 		return -ENOMEM;
3926 
3927 	return 0;
3928 }
3929 
3930 static __net_exit void proto_exit_net(struct net *net)
3931 {
3932 	remove_proc_entry("protocols", net->proc_net);
3933 }
3934 
3935 
3936 static __net_initdata struct pernet_operations proto_net_ops = {
3937 	.init = proto_init_net,
3938 	.exit = proto_exit_net,
3939 };
3940 
3941 static int __init proto_init(void)
3942 {
3943 	return register_pernet_subsys(&proto_net_ops);
3944 }
3945 
3946 subsys_initcall(proto_init);
3947 
3948 #endif /* PROC_FS */
3949 
3950 #ifdef CONFIG_NET_RX_BUSY_POLL
3951 bool sk_busy_loop_end(void *p, unsigned long start_time)
3952 {
3953 	struct sock *sk = p;
3954 
3955 	return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3956 	       sk_busy_loop_timeout(sk, start_time);
3957 }
3958 EXPORT_SYMBOL(sk_busy_loop_end);
3959 #endif /* CONFIG_NET_RX_BUSY_POLL */
3960 
3961 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
3962 {
3963 	if (!sk->sk_prot->bind_add)
3964 		return -EOPNOTSUPP;
3965 	return sk->sk_prot->bind_add(sk, addr, addr_len);
3966 }
3967 EXPORT_SYMBOL(sock_bind_add);
3968