xref: /linux/net/core/sock.c (revision 1ce8460496c05379c66edc178c3c55ca4e953044)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93 
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120 
121 #include <linux/uaccess.h>
122 
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 #include <linux/sock_diag.h>
135 
136 #include <linux/filter.h>
137 #include <net/sock_reuseport.h>
138 
139 #include <trace/events/sock.h>
140 
141 #ifdef CONFIG_INET
142 #include <net/tcp.h>
143 #endif
144 
145 #include <net/busy_poll.h>
146 
147 static DEFINE_MUTEX(proto_list_mutex);
148 static LIST_HEAD(proto_list);
149 
150 /**
151  * sk_ns_capable - General socket capability test
152  * @sk: Socket to use a capability on or through
153  * @user_ns: The user namespace of the capability to use
154  * @cap: The capability to use
155  *
156  * Test to see if the opener of the socket had when the socket was
157  * created and the current process has the capability @cap in the user
158  * namespace @user_ns.
159  */
160 bool sk_ns_capable(const struct sock *sk,
161 		   struct user_namespace *user_ns, int cap)
162 {
163 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
164 		ns_capable(user_ns, cap);
165 }
166 EXPORT_SYMBOL(sk_ns_capable);
167 
168 /**
169  * sk_capable - Socket global capability test
170  * @sk: Socket to use a capability on or through
171  * @cap: The global capability to use
172  *
173  * Test to see if the opener of the socket had when the socket was
174  * created and the current process has the capability @cap in all user
175  * namespaces.
176  */
177 bool sk_capable(const struct sock *sk, int cap)
178 {
179 	return sk_ns_capable(sk, &init_user_ns, cap);
180 }
181 EXPORT_SYMBOL(sk_capable);
182 
183 /**
184  * sk_net_capable - Network namespace socket capability test
185  * @sk: Socket to use a capability on or through
186  * @cap: The capability to use
187  *
188  * Test to see if the opener of the socket had when the socket was created
189  * and the current process has the capability @cap over the network namespace
190  * the socket is a member of.
191  */
192 bool sk_net_capable(const struct sock *sk, int cap)
193 {
194 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
195 }
196 EXPORT_SYMBOL(sk_net_capable);
197 
198 /*
199  * Each address family might have different locking rules, so we have
200  * one slock key per address family:
201  */
202 static struct lock_class_key af_family_keys[AF_MAX];
203 static struct lock_class_key af_family_slock_keys[AF_MAX];
204 
205 /*
206  * Make lock validator output more readable. (we pre-construct these
207  * strings build-time, so that runtime initialization of socket
208  * locks is fast):
209  */
210 static const char *const af_family_key_strings[AF_MAX+1] = {
211   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
212   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
213   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
214   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
215   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
216   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
217   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
218   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
219   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
220   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
221   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
222   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
223   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
224   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
225   "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC"     , "sk_lock-AF_MAX"
226 };
227 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
228   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
229   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
230   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
231   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
232   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
233   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
234   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
235   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
236   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
237   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
238   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
239   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
240   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
241   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
242   "slock-AF_QIPCRTR", "slock-AF_SMC"     , "slock-AF_MAX"
243 };
244 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
245   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
246   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
247   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
248   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
249   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
250   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
251   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
252   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
253   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
254   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
255   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
256   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
257   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
258   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
259   "clock-AF_QIPCRTR", "clock-AF_SMC"     , "clock-AF_MAX"
260 };
261 
262 /*
263  * sk_callback_lock locking rules are per-address-family,
264  * so split the lock classes by using a per-AF key:
265  */
266 static struct lock_class_key af_callback_keys[AF_MAX];
267 
268 /* Take into consideration the size of the struct sk_buff overhead in the
269  * determination of these values, since that is non-constant across
270  * platforms.  This makes socket queueing behavior and performance
271  * not depend upon such differences.
272  */
273 #define _SK_MEM_PACKETS		256
274 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
275 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
276 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
277 
278 /* Run time adjustable parameters. */
279 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
280 EXPORT_SYMBOL(sysctl_wmem_max);
281 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
282 EXPORT_SYMBOL(sysctl_rmem_max);
283 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
284 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
285 
286 /* Maximal space eaten by iovec or ancillary data plus some space */
287 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
288 EXPORT_SYMBOL(sysctl_optmem_max);
289 
290 int sysctl_tstamp_allow_data __read_mostly = 1;
291 
292 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
293 EXPORT_SYMBOL_GPL(memalloc_socks);
294 
295 /**
296  * sk_set_memalloc - sets %SOCK_MEMALLOC
297  * @sk: socket to set it on
298  *
299  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
300  * It's the responsibility of the admin to adjust min_free_kbytes
301  * to meet the requirements
302  */
303 void sk_set_memalloc(struct sock *sk)
304 {
305 	sock_set_flag(sk, SOCK_MEMALLOC);
306 	sk->sk_allocation |= __GFP_MEMALLOC;
307 	static_key_slow_inc(&memalloc_socks);
308 }
309 EXPORT_SYMBOL_GPL(sk_set_memalloc);
310 
311 void sk_clear_memalloc(struct sock *sk)
312 {
313 	sock_reset_flag(sk, SOCK_MEMALLOC);
314 	sk->sk_allocation &= ~__GFP_MEMALLOC;
315 	static_key_slow_dec(&memalloc_socks);
316 
317 	/*
318 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
319 	 * progress of swapping. SOCK_MEMALLOC may be cleared while
320 	 * it has rmem allocations due to the last swapfile being deactivated
321 	 * but there is a risk that the socket is unusable due to exceeding
322 	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
323 	 */
324 	sk_mem_reclaim(sk);
325 }
326 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
327 
328 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
329 {
330 	int ret;
331 	unsigned long pflags = current->flags;
332 
333 	/* these should have been dropped before queueing */
334 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
335 
336 	current->flags |= PF_MEMALLOC;
337 	ret = sk->sk_backlog_rcv(sk, skb);
338 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
339 
340 	return ret;
341 }
342 EXPORT_SYMBOL(__sk_backlog_rcv);
343 
344 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
345 {
346 	struct timeval tv;
347 
348 	if (optlen < sizeof(tv))
349 		return -EINVAL;
350 	if (copy_from_user(&tv, optval, sizeof(tv)))
351 		return -EFAULT;
352 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
353 		return -EDOM;
354 
355 	if (tv.tv_sec < 0) {
356 		static int warned __read_mostly;
357 
358 		*timeo_p = 0;
359 		if (warned < 10 && net_ratelimit()) {
360 			warned++;
361 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
362 				__func__, current->comm, task_pid_nr(current));
363 		}
364 		return 0;
365 	}
366 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
367 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
368 		return 0;
369 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
370 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
371 	return 0;
372 }
373 
374 static void sock_warn_obsolete_bsdism(const char *name)
375 {
376 	static int warned;
377 	static char warncomm[TASK_COMM_LEN];
378 	if (strcmp(warncomm, current->comm) && warned < 5) {
379 		strcpy(warncomm,  current->comm);
380 		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
381 			warncomm, name);
382 		warned++;
383 	}
384 }
385 
386 static bool sock_needs_netstamp(const struct sock *sk)
387 {
388 	switch (sk->sk_family) {
389 	case AF_UNSPEC:
390 	case AF_UNIX:
391 		return false;
392 	default:
393 		return true;
394 	}
395 }
396 
397 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
398 {
399 	if (sk->sk_flags & flags) {
400 		sk->sk_flags &= ~flags;
401 		if (sock_needs_netstamp(sk) &&
402 		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
403 			net_disable_timestamp();
404 	}
405 }
406 
407 
408 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
409 {
410 	unsigned long flags;
411 	struct sk_buff_head *list = &sk->sk_receive_queue;
412 
413 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
414 		atomic_inc(&sk->sk_drops);
415 		trace_sock_rcvqueue_full(sk, skb);
416 		return -ENOMEM;
417 	}
418 
419 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
420 		atomic_inc(&sk->sk_drops);
421 		return -ENOBUFS;
422 	}
423 
424 	skb->dev = NULL;
425 	skb_set_owner_r(skb, sk);
426 
427 	/* we escape from rcu protected region, make sure we dont leak
428 	 * a norefcounted dst
429 	 */
430 	skb_dst_force(skb);
431 
432 	spin_lock_irqsave(&list->lock, flags);
433 	sock_skb_set_dropcount(sk, skb);
434 	__skb_queue_tail(list, skb);
435 	spin_unlock_irqrestore(&list->lock, flags);
436 
437 	if (!sock_flag(sk, SOCK_DEAD))
438 		sk->sk_data_ready(sk);
439 	return 0;
440 }
441 EXPORT_SYMBOL(__sock_queue_rcv_skb);
442 
443 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
444 {
445 	int err;
446 
447 	err = sk_filter(sk, skb);
448 	if (err)
449 		return err;
450 
451 	return __sock_queue_rcv_skb(sk, skb);
452 }
453 EXPORT_SYMBOL(sock_queue_rcv_skb);
454 
455 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
456 		     const int nested, unsigned int trim_cap, bool refcounted)
457 {
458 	int rc = NET_RX_SUCCESS;
459 
460 	if (sk_filter_trim_cap(sk, skb, trim_cap))
461 		goto discard_and_relse;
462 
463 	skb->dev = NULL;
464 
465 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
466 		atomic_inc(&sk->sk_drops);
467 		goto discard_and_relse;
468 	}
469 	if (nested)
470 		bh_lock_sock_nested(sk);
471 	else
472 		bh_lock_sock(sk);
473 	if (!sock_owned_by_user(sk)) {
474 		/*
475 		 * trylock + unlock semantics:
476 		 */
477 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
478 
479 		rc = sk_backlog_rcv(sk, skb);
480 
481 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
482 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
483 		bh_unlock_sock(sk);
484 		atomic_inc(&sk->sk_drops);
485 		goto discard_and_relse;
486 	}
487 
488 	bh_unlock_sock(sk);
489 out:
490 	if (refcounted)
491 		sock_put(sk);
492 	return rc;
493 discard_and_relse:
494 	kfree_skb(skb);
495 	goto out;
496 }
497 EXPORT_SYMBOL(__sk_receive_skb);
498 
499 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
500 {
501 	struct dst_entry *dst = __sk_dst_get(sk);
502 
503 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
504 		sk_tx_queue_clear(sk);
505 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
506 		dst_release(dst);
507 		return NULL;
508 	}
509 
510 	return dst;
511 }
512 EXPORT_SYMBOL(__sk_dst_check);
513 
514 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
515 {
516 	struct dst_entry *dst = sk_dst_get(sk);
517 
518 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
519 		sk_dst_reset(sk);
520 		dst_release(dst);
521 		return NULL;
522 	}
523 
524 	return dst;
525 }
526 EXPORT_SYMBOL(sk_dst_check);
527 
528 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
529 				int optlen)
530 {
531 	int ret = -ENOPROTOOPT;
532 #ifdef CONFIG_NETDEVICES
533 	struct net *net = sock_net(sk);
534 	char devname[IFNAMSIZ];
535 	int index;
536 
537 	/* Sorry... */
538 	ret = -EPERM;
539 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
540 		goto out;
541 
542 	ret = -EINVAL;
543 	if (optlen < 0)
544 		goto out;
545 
546 	/* Bind this socket to a particular device like "eth0",
547 	 * as specified in the passed interface name. If the
548 	 * name is "" or the option length is zero the socket
549 	 * is not bound.
550 	 */
551 	if (optlen > IFNAMSIZ - 1)
552 		optlen = IFNAMSIZ - 1;
553 	memset(devname, 0, sizeof(devname));
554 
555 	ret = -EFAULT;
556 	if (copy_from_user(devname, optval, optlen))
557 		goto out;
558 
559 	index = 0;
560 	if (devname[0] != '\0') {
561 		struct net_device *dev;
562 
563 		rcu_read_lock();
564 		dev = dev_get_by_name_rcu(net, devname);
565 		if (dev)
566 			index = dev->ifindex;
567 		rcu_read_unlock();
568 		ret = -ENODEV;
569 		if (!dev)
570 			goto out;
571 	}
572 
573 	lock_sock(sk);
574 	sk->sk_bound_dev_if = index;
575 	sk_dst_reset(sk);
576 	release_sock(sk);
577 
578 	ret = 0;
579 
580 out:
581 #endif
582 
583 	return ret;
584 }
585 
586 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
587 				int __user *optlen, int len)
588 {
589 	int ret = -ENOPROTOOPT;
590 #ifdef CONFIG_NETDEVICES
591 	struct net *net = sock_net(sk);
592 	char devname[IFNAMSIZ];
593 
594 	if (sk->sk_bound_dev_if == 0) {
595 		len = 0;
596 		goto zero;
597 	}
598 
599 	ret = -EINVAL;
600 	if (len < IFNAMSIZ)
601 		goto out;
602 
603 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
604 	if (ret)
605 		goto out;
606 
607 	len = strlen(devname) + 1;
608 
609 	ret = -EFAULT;
610 	if (copy_to_user(optval, devname, len))
611 		goto out;
612 
613 zero:
614 	ret = -EFAULT;
615 	if (put_user(len, optlen))
616 		goto out;
617 
618 	ret = 0;
619 
620 out:
621 #endif
622 
623 	return ret;
624 }
625 
626 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
627 {
628 	if (valbool)
629 		sock_set_flag(sk, bit);
630 	else
631 		sock_reset_flag(sk, bit);
632 }
633 
634 bool sk_mc_loop(struct sock *sk)
635 {
636 	if (dev_recursion_level())
637 		return false;
638 	if (!sk)
639 		return true;
640 	switch (sk->sk_family) {
641 	case AF_INET:
642 		return inet_sk(sk)->mc_loop;
643 #if IS_ENABLED(CONFIG_IPV6)
644 	case AF_INET6:
645 		return inet6_sk(sk)->mc_loop;
646 #endif
647 	}
648 	WARN_ON(1);
649 	return true;
650 }
651 EXPORT_SYMBOL(sk_mc_loop);
652 
653 /*
654  *	This is meant for all protocols to use and covers goings on
655  *	at the socket level. Everything here is generic.
656  */
657 
658 int sock_setsockopt(struct socket *sock, int level, int optname,
659 		    char __user *optval, unsigned int optlen)
660 {
661 	struct sock *sk = sock->sk;
662 	int val;
663 	int valbool;
664 	struct linger ling;
665 	int ret = 0;
666 
667 	/*
668 	 *	Options without arguments
669 	 */
670 
671 	if (optname == SO_BINDTODEVICE)
672 		return sock_setbindtodevice(sk, optval, optlen);
673 
674 	if (optlen < sizeof(int))
675 		return -EINVAL;
676 
677 	if (get_user(val, (int __user *)optval))
678 		return -EFAULT;
679 
680 	valbool = val ? 1 : 0;
681 
682 	lock_sock(sk);
683 
684 	switch (optname) {
685 	case SO_DEBUG:
686 		if (val && !capable(CAP_NET_ADMIN))
687 			ret = -EACCES;
688 		else
689 			sock_valbool_flag(sk, SOCK_DBG, valbool);
690 		break;
691 	case SO_REUSEADDR:
692 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
693 		break;
694 	case SO_REUSEPORT:
695 		sk->sk_reuseport = valbool;
696 		break;
697 	case SO_TYPE:
698 	case SO_PROTOCOL:
699 	case SO_DOMAIN:
700 	case SO_ERROR:
701 		ret = -ENOPROTOOPT;
702 		break;
703 	case SO_DONTROUTE:
704 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
705 		break;
706 	case SO_BROADCAST:
707 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
708 		break;
709 	case SO_SNDBUF:
710 		/* Don't error on this BSD doesn't and if you think
711 		 * about it this is right. Otherwise apps have to
712 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
713 		 * are treated in BSD as hints
714 		 */
715 		val = min_t(u32, val, sysctl_wmem_max);
716 set_sndbuf:
717 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
718 		sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
719 		/* Wake up sending tasks if we upped the value. */
720 		sk->sk_write_space(sk);
721 		break;
722 
723 	case SO_SNDBUFFORCE:
724 		if (!capable(CAP_NET_ADMIN)) {
725 			ret = -EPERM;
726 			break;
727 		}
728 		goto set_sndbuf;
729 
730 	case SO_RCVBUF:
731 		/* Don't error on this BSD doesn't and if you think
732 		 * about it this is right. Otherwise apps have to
733 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
734 		 * are treated in BSD as hints
735 		 */
736 		val = min_t(u32, val, sysctl_rmem_max);
737 set_rcvbuf:
738 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
739 		/*
740 		 * We double it on the way in to account for
741 		 * "struct sk_buff" etc. overhead.   Applications
742 		 * assume that the SO_RCVBUF setting they make will
743 		 * allow that much actual data to be received on that
744 		 * socket.
745 		 *
746 		 * Applications are unaware that "struct sk_buff" and
747 		 * other overheads allocate from the receive buffer
748 		 * during socket buffer allocation.
749 		 *
750 		 * And after considering the possible alternatives,
751 		 * returning the value we actually used in getsockopt
752 		 * is the most desirable behavior.
753 		 */
754 		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
755 		break;
756 
757 	case SO_RCVBUFFORCE:
758 		if (!capable(CAP_NET_ADMIN)) {
759 			ret = -EPERM;
760 			break;
761 		}
762 		goto set_rcvbuf;
763 
764 	case SO_KEEPALIVE:
765 		if (sk->sk_prot->keepalive)
766 			sk->sk_prot->keepalive(sk, valbool);
767 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
768 		break;
769 
770 	case SO_OOBINLINE:
771 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
772 		break;
773 
774 	case SO_NO_CHECK:
775 		sk->sk_no_check_tx = valbool;
776 		break;
777 
778 	case SO_PRIORITY:
779 		if ((val >= 0 && val <= 6) ||
780 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
781 			sk->sk_priority = val;
782 		else
783 			ret = -EPERM;
784 		break;
785 
786 	case SO_LINGER:
787 		if (optlen < sizeof(ling)) {
788 			ret = -EINVAL;	/* 1003.1g */
789 			break;
790 		}
791 		if (copy_from_user(&ling, optval, sizeof(ling))) {
792 			ret = -EFAULT;
793 			break;
794 		}
795 		if (!ling.l_onoff)
796 			sock_reset_flag(sk, SOCK_LINGER);
797 		else {
798 #if (BITS_PER_LONG == 32)
799 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
800 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
801 			else
802 #endif
803 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
804 			sock_set_flag(sk, SOCK_LINGER);
805 		}
806 		break;
807 
808 	case SO_BSDCOMPAT:
809 		sock_warn_obsolete_bsdism("setsockopt");
810 		break;
811 
812 	case SO_PASSCRED:
813 		if (valbool)
814 			set_bit(SOCK_PASSCRED, &sock->flags);
815 		else
816 			clear_bit(SOCK_PASSCRED, &sock->flags);
817 		break;
818 
819 	case SO_TIMESTAMP:
820 	case SO_TIMESTAMPNS:
821 		if (valbool)  {
822 			if (optname == SO_TIMESTAMP)
823 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
824 			else
825 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
826 			sock_set_flag(sk, SOCK_RCVTSTAMP);
827 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
828 		} else {
829 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
830 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
831 		}
832 		break;
833 
834 	case SO_TIMESTAMPING:
835 		if (val & ~SOF_TIMESTAMPING_MASK) {
836 			ret = -EINVAL;
837 			break;
838 		}
839 
840 		if (val & SOF_TIMESTAMPING_OPT_ID &&
841 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
842 			if (sk->sk_protocol == IPPROTO_TCP &&
843 			    sk->sk_type == SOCK_STREAM) {
844 				if ((1 << sk->sk_state) &
845 				    (TCPF_CLOSE | TCPF_LISTEN)) {
846 					ret = -EINVAL;
847 					break;
848 				}
849 				sk->sk_tskey = tcp_sk(sk)->snd_una;
850 			} else {
851 				sk->sk_tskey = 0;
852 			}
853 		}
854 
855 		if (val & SOF_TIMESTAMPING_OPT_STATS &&
856 		    !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
857 			ret = -EINVAL;
858 			break;
859 		}
860 
861 		sk->sk_tsflags = val;
862 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
863 			sock_enable_timestamp(sk,
864 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
865 		else
866 			sock_disable_timestamp(sk,
867 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
868 		break;
869 
870 	case SO_RCVLOWAT:
871 		if (val < 0)
872 			val = INT_MAX;
873 		sk->sk_rcvlowat = val ? : 1;
874 		break;
875 
876 	case SO_RCVTIMEO:
877 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
878 		break;
879 
880 	case SO_SNDTIMEO:
881 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
882 		break;
883 
884 	case SO_ATTACH_FILTER:
885 		ret = -EINVAL;
886 		if (optlen == sizeof(struct sock_fprog)) {
887 			struct sock_fprog fprog;
888 
889 			ret = -EFAULT;
890 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
891 				break;
892 
893 			ret = sk_attach_filter(&fprog, sk);
894 		}
895 		break;
896 
897 	case SO_ATTACH_BPF:
898 		ret = -EINVAL;
899 		if (optlen == sizeof(u32)) {
900 			u32 ufd;
901 
902 			ret = -EFAULT;
903 			if (copy_from_user(&ufd, optval, sizeof(ufd)))
904 				break;
905 
906 			ret = sk_attach_bpf(ufd, sk);
907 		}
908 		break;
909 
910 	case SO_ATTACH_REUSEPORT_CBPF:
911 		ret = -EINVAL;
912 		if (optlen == sizeof(struct sock_fprog)) {
913 			struct sock_fprog fprog;
914 
915 			ret = -EFAULT;
916 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
917 				break;
918 
919 			ret = sk_reuseport_attach_filter(&fprog, sk);
920 		}
921 		break;
922 
923 	case SO_ATTACH_REUSEPORT_EBPF:
924 		ret = -EINVAL;
925 		if (optlen == sizeof(u32)) {
926 			u32 ufd;
927 
928 			ret = -EFAULT;
929 			if (copy_from_user(&ufd, optval, sizeof(ufd)))
930 				break;
931 
932 			ret = sk_reuseport_attach_bpf(ufd, sk);
933 		}
934 		break;
935 
936 	case SO_DETACH_FILTER:
937 		ret = sk_detach_filter(sk);
938 		break;
939 
940 	case SO_LOCK_FILTER:
941 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
942 			ret = -EPERM;
943 		else
944 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
945 		break;
946 
947 	case SO_PASSSEC:
948 		if (valbool)
949 			set_bit(SOCK_PASSSEC, &sock->flags);
950 		else
951 			clear_bit(SOCK_PASSSEC, &sock->flags);
952 		break;
953 	case SO_MARK:
954 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
955 			ret = -EPERM;
956 		else
957 			sk->sk_mark = val;
958 		break;
959 
960 	case SO_RXQ_OVFL:
961 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
962 		break;
963 
964 	case SO_WIFI_STATUS:
965 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
966 		break;
967 
968 	case SO_PEEK_OFF:
969 		if (sock->ops->set_peek_off)
970 			ret = sock->ops->set_peek_off(sk, val);
971 		else
972 			ret = -EOPNOTSUPP;
973 		break;
974 
975 	case SO_NOFCS:
976 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
977 		break;
978 
979 	case SO_SELECT_ERR_QUEUE:
980 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
981 		break;
982 
983 #ifdef CONFIG_NET_RX_BUSY_POLL
984 	case SO_BUSY_POLL:
985 		/* allow unprivileged users to decrease the value */
986 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
987 			ret = -EPERM;
988 		else {
989 			if (val < 0)
990 				ret = -EINVAL;
991 			else
992 				sk->sk_ll_usec = val;
993 		}
994 		break;
995 #endif
996 
997 	case SO_MAX_PACING_RATE:
998 		sk->sk_max_pacing_rate = val;
999 		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
1000 					 sk->sk_max_pacing_rate);
1001 		break;
1002 
1003 	case SO_INCOMING_CPU:
1004 		sk->sk_incoming_cpu = val;
1005 		break;
1006 
1007 	case SO_CNX_ADVICE:
1008 		if (val == 1)
1009 			dst_negative_advice(sk);
1010 		break;
1011 	default:
1012 		ret = -ENOPROTOOPT;
1013 		break;
1014 	}
1015 	release_sock(sk);
1016 	return ret;
1017 }
1018 EXPORT_SYMBOL(sock_setsockopt);
1019 
1020 
1021 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1022 			  struct ucred *ucred)
1023 {
1024 	ucred->pid = pid_vnr(pid);
1025 	ucred->uid = ucred->gid = -1;
1026 	if (cred) {
1027 		struct user_namespace *current_ns = current_user_ns();
1028 
1029 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1030 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1031 	}
1032 }
1033 
1034 int sock_getsockopt(struct socket *sock, int level, int optname,
1035 		    char __user *optval, int __user *optlen)
1036 {
1037 	struct sock *sk = sock->sk;
1038 
1039 	union {
1040 		int val;
1041 		struct linger ling;
1042 		struct timeval tm;
1043 	} v;
1044 
1045 	int lv = sizeof(int);
1046 	int len;
1047 
1048 	if (get_user(len, optlen))
1049 		return -EFAULT;
1050 	if (len < 0)
1051 		return -EINVAL;
1052 
1053 	memset(&v, 0, sizeof(v));
1054 
1055 	switch (optname) {
1056 	case SO_DEBUG:
1057 		v.val = sock_flag(sk, SOCK_DBG);
1058 		break;
1059 
1060 	case SO_DONTROUTE:
1061 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1062 		break;
1063 
1064 	case SO_BROADCAST:
1065 		v.val = sock_flag(sk, SOCK_BROADCAST);
1066 		break;
1067 
1068 	case SO_SNDBUF:
1069 		v.val = sk->sk_sndbuf;
1070 		break;
1071 
1072 	case SO_RCVBUF:
1073 		v.val = sk->sk_rcvbuf;
1074 		break;
1075 
1076 	case SO_REUSEADDR:
1077 		v.val = sk->sk_reuse;
1078 		break;
1079 
1080 	case SO_REUSEPORT:
1081 		v.val = sk->sk_reuseport;
1082 		break;
1083 
1084 	case SO_KEEPALIVE:
1085 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1086 		break;
1087 
1088 	case SO_TYPE:
1089 		v.val = sk->sk_type;
1090 		break;
1091 
1092 	case SO_PROTOCOL:
1093 		v.val = sk->sk_protocol;
1094 		break;
1095 
1096 	case SO_DOMAIN:
1097 		v.val = sk->sk_family;
1098 		break;
1099 
1100 	case SO_ERROR:
1101 		v.val = -sock_error(sk);
1102 		if (v.val == 0)
1103 			v.val = xchg(&sk->sk_err_soft, 0);
1104 		break;
1105 
1106 	case SO_OOBINLINE:
1107 		v.val = sock_flag(sk, SOCK_URGINLINE);
1108 		break;
1109 
1110 	case SO_NO_CHECK:
1111 		v.val = sk->sk_no_check_tx;
1112 		break;
1113 
1114 	case SO_PRIORITY:
1115 		v.val = sk->sk_priority;
1116 		break;
1117 
1118 	case SO_LINGER:
1119 		lv		= sizeof(v.ling);
1120 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1121 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1122 		break;
1123 
1124 	case SO_BSDCOMPAT:
1125 		sock_warn_obsolete_bsdism("getsockopt");
1126 		break;
1127 
1128 	case SO_TIMESTAMP:
1129 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1130 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1131 		break;
1132 
1133 	case SO_TIMESTAMPNS:
1134 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1135 		break;
1136 
1137 	case SO_TIMESTAMPING:
1138 		v.val = sk->sk_tsflags;
1139 		break;
1140 
1141 	case SO_RCVTIMEO:
1142 		lv = sizeof(struct timeval);
1143 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1144 			v.tm.tv_sec = 0;
1145 			v.tm.tv_usec = 0;
1146 		} else {
1147 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1148 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1149 		}
1150 		break;
1151 
1152 	case SO_SNDTIMEO:
1153 		lv = sizeof(struct timeval);
1154 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1155 			v.tm.tv_sec = 0;
1156 			v.tm.tv_usec = 0;
1157 		} else {
1158 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1159 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1160 		}
1161 		break;
1162 
1163 	case SO_RCVLOWAT:
1164 		v.val = sk->sk_rcvlowat;
1165 		break;
1166 
1167 	case SO_SNDLOWAT:
1168 		v.val = 1;
1169 		break;
1170 
1171 	case SO_PASSCRED:
1172 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1173 		break;
1174 
1175 	case SO_PEERCRED:
1176 	{
1177 		struct ucred peercred;
1178 		if (len > sizeof(peercred))
1179 			len = sizeof(peercred);
1180 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1181 		if (copy_to_user(optval, &peercred, len))
1182 			return -EFAULT;
1183 		goto lenout;
1184 	}
1185 
1186 	case SO_PEERNAME:
1187 	{
1188 		char address[128];
1189 
1190 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1191 			return -ENOTCONN;
1192 		if (lv < len)
1193 			return -EINVAL;
1194 		if (copy_to_user(optval, address, len))
1195 			return -EFAULT;
1196 		goto lenout;
1197 	}
1198 
1199 	/* Dubious BSD thing... Probably nobody even uses it, but
1200 	 * the UNIX standard wants it for whatever reason... -DaveM
1201 	 */
1202 	case SO_ACCEPTCONN:
1203 		v.val = sk->sk_state == TCP_LISTEN;
1204 		break;
1205 
1206 	case SO_PASSSEC:
1207 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1208 		break;
1209 
1210 	case SO_PEERSEC:
1211 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1212 
1213 	case SO_MARK:
1214 		v.val = sk->sk_mark;
1215 		break;
1216 
1217 	case SO_RXQ_OVFL:
1218 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1219 		break;
1220 
1221 	case SO_WIFI_STATUS:
1222 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1223 		break;
1224 
1225 	case SO_PEEK_OFF:
1226 		if (!sock->ops->set_peek_off)
1227 			return -EOPNOTSUPP;
1228 
1229 		v.val = sk->sk_peek_off;
1230 		break;
1231 	case SO_NOFCS:
1232 		v.val = sock_flag(sk, SOCK_NOFCS);
1233 		break;
1234 
1235 	case SO_BINDTODEVICE:
1236 		return sock_getbindtodevice(sk, optval, optlen, len);
1237 
1238 	case SO_GET_FILTER:
1239 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1240 		if (len < 0)
1241 			return len;
1242 
1243 		goto lenout;
1244 
1245 	case SO_LOCK_FILTER:
1246 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1247 		break;
1248 
1249 	case SO_BPF_EXTENSIONS:
1250 		v.val = bpf_tell_extensions();
1251 		break;
1252 
1253 	case SO_SELECT_ERR_QUEUE:
1254 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1255 		break;
1256 
1257 #ifdef CONFIG_NET_RX_BUSY_POLL
1258 	case SO_BUSY_POLL:
1259 		v.val = sk->sk_ll_usec;
1260 		break;
1261 #endif
1262 
1263 	case SO_MAX_PACING_RATE:
1264 		v.val = sk->sk_max_pacing_rate;
1265 		break;
1266 
1267 	case SO_INCOMING_CPU:
1268 		v.val = sk->sk_incoming_cpu;
1269 		break;
1270 
1271 	default:
1272 		/* We implement the SO_SNDLOWAT etc to not be settable
1273 		 * (1003.1g 7).
1274 		 */
1275 		return -ENOPROTOOPT;
1276 	}
1277 
1278 	if (len > lv)
1279 		len = lv;
1280 	if (copy_to_user(optval, &v, len))
1281 		return -EFAULT;
1282 lenout:
1283 	if (put_user(len, optlen))
1284 		return -EFAULT;
1285 	return 0;
1286 }
1287 
1288 /*
1289  * Initialize an sk_lock.
1290  *
1291  * (We also register the sk_lock with the lock validator.)
1292  */
1293 static inline void sock_lock_init(struct sock *sk)
1294 {
1295 	sock_lock_init_class_and_name(sk,
1296 			af_family_slock_key_strings[sk->sk_family],
1297 			af_family_slock_keys + sk->sk_family,
1298 			af_family_key_strings[sk->sk_family],
1299 			af_family_keys + sk->sk_family);
1300 }
1301 
1302 /*
1303  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1304  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1305  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1306  */
1307 static void sock_copy(struct sock *nsk, const struct sock *osk)
1308 {
1309 #ifdef CONFIG_SECURITY_NETWORK
1310 	void *sptr = nsk->sk_security;
1311 #endif
1312 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1313 
1314 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1315 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1316 
1317 #ifdef CONFIG_SECURITY_NETWORK
1318 	nsk->sk_security = sptr;
1319 	security_sk_clone(osk, nsk);
1320 #endif
1321 }
1322 
1323 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1324 		int family)
1325 {
1326 	struct sock *sk;
1327 	struct kmem_cache *slab;
1328 
1329 	slab = prot->slab;
1330 	if (slab != NULL) {
1331 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1332 		if (!sk)
1333 			return sk;
1334 		if (priority & __GFP_ZERO)
1335 			sk_prot_clear_nulls(sk, prot->obj_size);
1336 	} else
1337 		sk = kmalloc(prot->obj_size, priority);
1338 
1339 	if (sk != NULL) {
1340 		kmemcheck_annotate_bitfield(sk, flags);
1341 
1342 		if (security_sk_alloc(sk, family, priority))
1343 			goto out_free;
1344 
1345 		if (!try_module_get(prot->owner))
1346 			goto out_free_sec;
1347 		sk_tx_queue_clear(sk);
1348 	}
1349 
1350 	return sk;
1351 
1352 out_free_sec:
1353 	security_sk_free(sk);
1354 out_free:
1355 	if (slab != NULL)
1356 		kmem_cache_free(slab, sk);
1357 	else
1358 		kfree(sk);
1359 	return NULL;
1360 }
1361 
1362 static void sk_prot_free(struct proto *prot, struct sock *sk)
1363 {
1364 	struct kmem_cache *slab;
1365 	struct module *owner;
1366 
1367 	owner = prot->owner;
1368 	slab = prot->slab;
1369 
1370 	cgroup_sk_free(&sk->sk_cgrp_data);
1371 	mem_cgroup_sk_free(sk);
1372 	security_sk_free(sk);
1373 	if (slab != NULL)
1374 		kmem_cache_free(slab, sk);
1375 	else
1376 		kfree(sk);
1377 	module_put(owner);
1378 }
1379 
1380 /**
1381  *	sk_alloc - All socket objects are allocated here
1382  *	@net: the applicable net namespace
1383  *	@family: protocol family
1384  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1385  *	@prot: struct proto associated with this new sock instance
1386  *	@kern: is this to be a kernel socket?
1387  */
1388 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1389 		      struct proto *prot, int kern)
1390 {
1391 	struct sock *sk;
1392 
1393 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1394 	if (sk) {
1395 		sk->sk_family = family;
1396 		/*
1397 		 * See comment in struct sock definition to understand
1398 		 * why we need sk_prot_creator -acme
1399 		 */
1400 		sk->sk_prot = sk->sk_prot_creator = prot;
1401 		sock_lock_init(sk);
1402 		sk->sk_net_refcnt = kern ? 0 : 1;
1403 		if (likely(sk->sk_net_refcnt))
1404 			get_net(net);
1405 		sock_net_set(sk, net);
1406 		atomic_set(&sk->sk_wmem_alloc, 1);
1407 
1408 		mem_cgroup_sk_alloc(sk);
1409 		cgroup_sk_alloc(&sk->sk_cgrp_data);
1410 		sock_update_classid(&sk->sk_cgrp_data);
1411 		sock_update_netprioidx(&sk->sk_cgrp_data);
1412 	}
1413 
1414 	return sk;
1415 }
1416 EXPORT_SYMBOL(sk_alloc);
1417 
1418 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
1419  * grace period. This is the case for UDP sockets and TCP listeners.
1420  */
1421 static void __sk_destruct(struct rcu_head *head)
1422 {
1423 	struct sock *sk = container_of(head, struct sock, sk_rcu);
1424 	struct sk_filter *filter;
1425 
1426 	if (sk->sk_destruct)
1427 		sk->sk_destruct(sk);
1428 
1429 	filter = rcu_dereference_check(sk->sk_filter,
1430 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1431 	if (filter) {
1432 		sk_filter_uncharge(sk, filter);
1433 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1434 	}
1435 	if (rcu_access_pointer(sk->sk_reuseport_cb))
1436 		reuseport_detach_sock(sk);
1437 
1438 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1439 
1440 	if (atomic_read(&sk->sk_omem_alloc))
1441 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1442 			 __func__, atomic_read(&sk->sk_omem_alloc));
1443 
1444 	if (sk->sk_peer_cred)
1445 		put_cred(sk->sk_peer_cred);
1446 	put_pid(sk->sk_peer_pid);
1447 	if (likely(sk->sk_net_refcnt))
1448 		put_net(sock_net(sk));
1449 	sk_prot_free(sk->sk_prot_creator, sk);
1450 }
1451 
1452 void sk_destruct(struct sock *sk)
1453 {
1454 	if (sock_flag(sk, SOCK_RCU_FREE))
1455 		call_rcu(&sk->sk_rcu, __sk_destruct);
1456 	else
1457 		__sk_destruct(&sk->sk_rcu);
1458 }
1459 
1460 static void __sk_free(struct sock *sk)
1461 {
1462 	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
1463 		sock_diag_broadcast_destroy(sk);
1464 	else
1465 		sk_destruct(sk);
1466 }
1467 
1468 void sk_free(struct sock *sk)
1469 {
1470 	/*
1471 	 * We subtract one from sk_wmem_alloc and can know if
1472 	 * some packets are still in some tx queue.
1473 	 * If not null, sock_wfree() will call __sk_free(sk) later
1474 	 */
1475 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1476 		__sk_free(sk);
1477 }
1478 EXPORT_SYMBOL(sk_free);
1479 
1480 /**
1481  *	sk_clone_lock - clone a socket, and lock its clone
1482  *	@sk: the socket to clone
1483  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1484  *
1485  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1486  */
1487 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1488 {
1489 	struct sock *newsk;
1490 	bool is_charged = true;
1491 
1492 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1493 	if (newsk != NULL) {
1494 		struct sk_filter *filter;
1495 
1496 		sock_copy(newsk, sk);
1497 
1498 		/* SANITY */
1499 		if (likely(newsk->sk_net_refcnt))
1500 			get_net(sock_net(newsk));
1501 		sk_node_init(&newsk->sk_node);
1502 		sock_lock_init(newsk);
1503 		bh_lock_sock(newsk);
1504 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1505 		newsk->sk_backlog.len = 0;
1506 
1507 		atomic_set(&newsk->sk_rmem_alloc, 0);
1508 		/*
1509 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1510 		 */
1511 		atomic_set(&newsk->sk_wmem_alloc, 1);
1512 		atomic_set(&newsk->sk_omem_alloc, 0);
1513 		skb_queue_head_init(&newsk->sk_receive_queue);
1514 		skb_queue_head_init(&newsk->sk_write_queue);
1515 
1516 		rwlock_init(&newsk->sk_callback_lock);
1517 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1518 				af_callback_keys + newsk->sk_family,
1519 				af_family_clock_key_strings[newsk->sk_family]);
1520 
1521 		newsk->sk_dst_cache	= NULL;
1522 		newsk->sk_wmem_queued	= 0;
1523 		newsk->sk_forward_alloc = 0;
1524 		atomic_set(&newsk->sk_drops, 0);
1525 		newsk->sk_send_head	= NULL;
1526 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1527 
1528 		sock_reset_flag(newsk, SOCK_DONE);
1529 		skb_queue_head_init(&newsk->sk_error_queue);
1530 
1531 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1532 		if (filter != NULL)
1533 			/* though it's an empty new sock, the charging may fail
1534 			 * if sysctl_optmem_max was changed between creation of
1535 			 * original socket and cloning
1536 			 */
1537 			is_charged = sk_filter_charge(newsk, filter);
1538 
1539 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1540 			/* It is still raw copy of parent, so invalidate
1541 			 * destructor and make plain sk_free() */
1542 			newsk->sk_destruct = NULL;
1543 			bh_unlock_sock(newsk);
1544 			sk_free(newsk);
1545 			newsk = NULL;
1546 			goto out;
1547 		}
1548 		RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1549 
1550 		newsk->sk_err	   = 0;
1551 		newsk->sk_err_soft = 0;
1552 		newsk->sk_priority = 0;
1553 		newsk->sk_incoming_cpu = raw_smp_processor_id();
1554 		atomic64_set(&newsk->sk_cookie, 0);
1555 
1556 		mem_cgroup_sk_alloc(newsk);
1557 		cgroup_sk_alloc(&newsk->sk_cgrp_data);
1558 
1559 		/*
1560 		 * Before updating sk_refcnt, we must commit prior changes to memory
1561 		 * (Documentation/RCU/rculist_nulls.txt for details)
1562 		 */
1563 		smp_wmb();
1564 		atomic_set(&newsk->sk_refcnt, 2);
1565 
1566 		/*
1567 		 * Increment the counter in the same struct proto as the master
1568 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1569 		 * is the same as sk->sk_prot->socks, as this field was copied
1570 		 * with memcpy).
1571 		 *
1572 		 * This _changes_ the previous behaviour, where
1573 		 * tcp_create_openreq_child always was incrementing the
1574 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1575 		 * to be taken into account in all callers. -acme
1576 		 */
1577 		sk_refcnt_debug_inc(newsk);
1578 		sk_set_socket(newsk, NULL);
1579 		newsk->sk_wq = NULL;
1580 
1581 		if (newsk->sk_prot->sockets_allocated)
1582 			sk_sockets_allocated_inc(newsk);
1583 
1584 		if (sock_needs_netstamp(sk) &&
1585 		    newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1586 			net_enable_timestamp();
1587 	}
1588 out:
1589 	return newsk;
1590 }
1591 EXPORT_SYMBOL_GPL(sk_clone_lock);
1592 
1593 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1594 {
1595 	u32 max_segs = 1;
1596 
1597 	sk_dst_set(sk, dst);
1598 	sk->sk_route_caps = dst->dev->features;
1599 	if (sk->sk_route_caps & NETIF_F_GSO)
1600 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1601 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1602 	if (sk_can_gso(sk)) {
1603 		if (dst->header_len) {
1604 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1605 		} else {
1606 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1607 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1608 			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1609 		}
1610 	}
1611 	sk->sk_gso_max_segs = max_segs;
1612 }
1613 EXPORT_SYMBOL_GPL(sk_setup_caps);
1614 
1615 /*
1616  *	Simple resource managers for sockets.
1617  */
1618 
1619 
1620 /*
1621  * Write buffer destructor automatically called from kfree_skb.
1622  */
1623 void sock_wfree(struct sk_buff *skb)
1624 {
1625 	struct sock *sk = skb->sk;
1626 	unsigned int len = skb->truesize;
1627 
1628 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1629 		/*
1630 		 * Keep a reference on sk_wmem_alloc, this will be released
1631 		 * after sk_write_space() call
1632 		 */
1633 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1634 		sk->sk_write_space(sk);
1635 		len = 1;
1636 	}
1637 	/*
1638 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1639 	 * could not do because of in-flight packets
1640 	 */
1641 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1642 		__sk_free(sk);
1643 }
1644 EXPORT_SYMBOL(sock_wfree);
1645 
1646 /* This variant of sock_wfree() is used by TCP,
1647  * since it sets SOCK_USE_WRITE_QUEUE.
1648  */
1649 void __sock_wfree(struct sk_buff *skb)
1650 {
1651 	struct sock *sk = skb->sk;
1652 
1653 	if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1654 		__sk_free(sk);
1655 }
1656 
1657 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1658 {
1659 	skb_orphan(skb);
1660 	skb->sk = sk;
1661 #ifdef CONFIG_INET
1662 	if (unlikely(!sk_fullsock(sk))) {
1663 		skb->destructor = sock_edemux;
1664 		sock_hold(sk);
1665 		return;
1666 	}
1667 #endif
1668 	skb->destructor = sock_wfree;
1669 	skb_set_hash_from_sk(skb, sk);
1670 	/*
1671 	 * We used to take a refcount on sk, but following operation
1672 	 * is enough to guarantee sk_free() wont free this sock until
1673 	 * all in-flight packets are completed
1674 	 */
1675 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1676 }
1677 EXPORT_SYMBOL(skb_set_owner_w);
1678 
1679 /* This helper is used by netem, as it can hold packets in its
1680  * delay queue. We want to allow the owner socket to send more
1681  * packets, as if they were already TX completed by a typical driver.
1682  * But we also want to keep skb->sk set because some packet schedulers
1683  * rely on it (sch_fq for example). So we set skb->truesize to a small
1684  * amount (1) and decrease sk_wmem_alloc accordingly.
1685  */
1686 void skb_orphan_partial(struct sk_buff *skb)
1687 {
1688 	/* If this skb is a TCP pure ACK or already went here,
1689 	 * we have nothing to do. 2 is already a very small truesize.
1690 	 */
1691 	if (skb->truesize <= 2)
1692 		return;
1693 
1694 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1695 	 * so we do not completely orphan skb, but transfert all
1696 	 * accounted bytes but one, to avoid unexpected reorders.
1697 	 */
1698 	if (skb->destructor == sock_wfree
1699 #ifdef CONFIG_INET
1700 	    || skb->destructor == tcp_wfree
1701 #endif
1702 		) {
1703 		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1704 		skb->truesize = 1;
1705 	} else {
1706 		skb_orphan(skb);
1707 	}
1708 }
1709 EXPORT_SYMBOL(skb_orphan_partial);
1710 
1711 /*
1712  * Read buffer destructor automatically called from kfree_skb.
1713  */
1714 void sock_rfree(struct sk_buff *skb)
1715 {
1716 	struct sock *sk = skb->sk;
1717 	unsigned int len = skb->truesize;
1718 
1719 	atomic_sub(len, &sk->sk_rmem_alloc);
1720 	sk_mem_uncharge(sk, len);
1721 }
1722 EXPORT_SYMBOL(sock_rfree);
1723 
1724 /*
1725  * Buffer destructor for skbs that are not used directly in read or write
1726  * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1727  */
1728 void sock_efree(struct sk_buff *skb)
1729 {
1730 	sock_put(skb->sk);
1731 }
1732 EXPORT_SYMBOL(sock_efree);
1733 
1734 kuid_t sock_i_uid(struct sock *sk)
1735 {
1736 	kuid_t uid;
1737 
1738 	read_lock_bh(&sk->sk_callback_lock);
1739 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1740 	read_unlock_bh(&sk->sk_callback_lock);
1741 	return uid;
1742 }
1743 EXPORT_SYMBOL(sock_i_uid);
1744 
1745 unsigned long sock_i_ino(struct sock *sk)
1746 {
1747 	unsigned long ino;
1748 
1749 	read_lock_bh(&sk->sk_callback_lock);
1750 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1751 	read_unlock_bh(&sk->sk_callback_lock);
1752 	return ino;
1753 }
1754 EXPORT_SYMBOL(sock_i_ino);
1755 
1756 /*
1757  * Allocate a skb from the socket's send buffer.
1758  */
1759 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1760 			     gfp_t priority)
1761 {
1762 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1763 		struct sk_buff *skb = alloc_skb(size, priority);
1764 		if (skb) {
1765 			skb_set_owner_w(skb, sk);
1766 			return skb;
1767 		}
1768 	}
1769 	return NULL;
1770 }
1771 EXPORT_SYMBOL(sock_wmalloc);
1772 
1773 /*
1774  * Allocate a memory block from the socket's option memory buffer.
1775  */
1776 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1777 {
1778 	if ((unsigned int)size <= sysctl_optmem_max &&
1779 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1780 		void *mem;
1781 		/* First do the add, to avoid the race if kmalloc
1782 		 * might sleep.
1783 		 */
1784 		atomic_add(size, &sk->sk_omem_alloc);
1785 		mem = kmalloc(size, priority);
1786 		if (mem)
1787 			return mem;
1788 		atomic_sub(size, &sk->sk_omem_alloc);
1789 	}
1790 	return NULL;
1791 }
1792 EXPORT_SYMBOL(sock_kmalloc);
1793 
1794 /* Free an option memory block. Note, we actually want the inline
1795  * here as this allows gcc to detect the nullify and fold away the
1796  * condition entirely.
1797  */
1798 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1799 				  const bool nullify)
1800 {
1801 	if (WARN_ON_ONCE(!mem))
1802 		return;
1803 	if (nullify)
1804 		kzfree(mem);
1805 	else
1806 		kfree(mem);
1807 	atomic_sub(size, &sk->sk_omem_alloc);
1808 }
1809 
1810 void sock_kfree_s(struct sock *sk, void *mem, int size)
1811 {
1812 	__sock_kfree_s(sk, mem, size, false);
1813 }
1814 EXPORT_SYMBOL(sock_kfree_s);
1815 
1816 void sock_kzfree_s(struct sock *sk, void *mem, int size)
1817 {
1818 	__sock_kfree_s(sk, mem, size, true);
1819 }
1820 EXPORT_SYMBOL(sock_kzfree_s);
1821 
1822 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1823    I think, these locks should be removed for datagram sockets.
1824  */
1825 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1826 {
1827 	DEFINE_WAIT(wait);
1828 
1829 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1830 	for (;;) {
1831 		if (!timeo)
1832 			break;
1833 		if (signal_pending(current))
1834 			break;
1835 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1836 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1837 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1838 			break;
1839 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1840 			break;
1841 		if (sk->sk_err)
1842 			break;
1843 		timeo = schedule_timeout(timeo);
1844 	}
1845 	finish_wait(sk_sleep(sk), &wait);
1846 	return timeo;
1847 }
1848 
1849 
1850 /*
1851  *	Generic send/receive buffer handlers
1852  */
1853 
1854 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1855 				     unsigned long data_len, int noblock,
1856 				     int *errcode, int max_page_order)
1857 {
1858 	struct sk_buff *skb;
1859 	long timeo;
1860 	int err;
1861 
1862 	timeo = sock_sndtimeo(sk, noblock);
1863 	for (;;) {
1864 		err = sock_error(sk);
1865 		if (err != 0)
1866 			goto failure;
1867 
1868 		err = -EPIPE;
1869 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1870 			goto failure;
1871 
1872 		if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1873 			break;
1874 
1875 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1876 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1877 		err = -EAGAIN;
1878 		if (!timeo)
1879 			goto failure;
1880 		if (signal_pending(current))
1881 			goto interrupted;
1882 		timeo = sock_wait_for_wmem(sk, timeo);
1883 	}
1884 	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1885 				   errcode, sk->sk_allocation);
1886 	if (skb)
1887 		skb_set_owner_w(skb, sk);
1888 	return skb;
1889 
1890 interrupted:
1891 	err = sock_intr_errno(timeo);
1892 failure:
1893 	*errcode = err;
1894 	return NULL;
1895 }
1896 EXPORT_SYMBOL(sock_alloc_send_pskb);
1897 
1898 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1899 				    int noblock, int *errcode)
1900 {
1901 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1902 }
1903 EXPORT_SYMBOL(sock_alloc_send_skb);
1904 
1905 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1906 		     struct sockcm_cookie *sockc)
1907 {
1908 	u32 tsflags;
1909 
1910 	switch (cmsg->cmsg_type) {
1911 	case SO_MARK:
1912 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1913 			return -EPERM;
1914 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1915 			return -EINVAL;
1916 		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1917 		break;
1918 	case SO_TIMESTAMPING:
1919 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1920 			return -EINVAL;
1921 
1922 		tsflags = *(u32 *)CMSG_DATA(cmsg);
1923 		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
1924 			return -EINVAL;
1925 
1926 		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
1927 		sockc->tsflags |= tsflags;
1928 		break;
1929 	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
1930 	case SCM_RIGHTS:
1931 	case SCM_CREDENTIALS:
1932 		break;
1933 	default:
1934 		return -EINVAL;
1935 	}
1936 	return 0;
1937 }
1938 EXPORT_SYMBOL(__sock_cmsg_send);
1939 
1940 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1941 		   struct sockcm_cookie *sockc)
1942 {
1943 	struct cmsghdr *cmsg;
1944 	int ret;
1945 
1946 	for_each_cmsghdr(cmsg, msg) {
1947 		if (!CMSG_OK(msg, cmsg))
1948 			return -EINVAL;
1949 		if (cmsg->cmsg_level != SOL_SOCKET)
1950 			continue;
1951 		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
1952 		if (ret)
1953 			return ret;
1954 	}
1955 	return 0;
1956 }
1957 EXPORT_SYMBOL(sock_cmsg_send);
1958 
1959 /* On 32bit arches, an skb frag is limited to 2^15 */
1960 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
1961 
1962 /**
1963  * skb_page_frag_refill - check that a page_frag contains enough room
1964  * @sz: minimum size of the fragment we want to get
1965  * @pfrag: pointer to page_frag
1966  * @gfp: priority for memory allocation
1967  *
1968  * Note: While this allocator tries to use high order pages, there is
1969  * no guarantee that allocations succeed. Therefore, @sz MUST be
1970  * less or equal than PAGE_SIZE.
1971  */
1972 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1973 {
1974 	if (pfrag->page) {
1975 		if (page_ref_count(pfrag->page) == 1) {
1976 			pfrag->offset = 0;
1977 			return true;
1978 		}
1979 		if (pfrag->offset + sz <= pfrag->size)
1980 			return true;
1981 		put_page(pfrag->page);
1982 	}
1983 
1984 	pfrag->offset = 0;
1985 	if (SKB_FRAG_PAGE_ORDER) {
1986 		/* Avoid direct reclaim but allow kswapd to wake */
1987 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
1988 					  __GFP_COMP | __GFP_NOWARN |
1989 					  __GFP_NORETRY,
1990 					  SKB_FRAG_PAGE_ORDER);
1991 		if (likely(pfrag->page)) {
1992 			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
1993 			return true;
1994 		}
1995 	}
1996 	pfrag->page = alloc_page(gfp);
1997 	if (likely(pfrag->page)) {
1998 		pfrag->size = PAGE_SIZE;
1999 		return true;
2000 	}
2001 	return false;
2002 }
2003 EXPORT_SYMBOL(skb_page_frag_refill);
2004 
2005 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2006 {
2007 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2008 		return true;
2009 
2010 	sk_enter_memory_pressure(sk);
2011 	sk_stream_moderate_sndbuf(sk);
2012 	return false;
2013 }
2014 EXPORT_SYMBOL(sk_page_frag_refill);
2015 
2016 static void __lock_sock(struct sock *sk)
2017 	__releases(&sk->sk_lock.slock)
2018 	__acquires(&sk->sk_lock.slock)
2019 {
2020 	DEFINE_WAIT(wait);
2021 
2022 	for (;;) {
2023 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2024 					TASK_UNINTERRUPTIBLE);
2025 		spin_unlock_bh(&sk->sk_lock.slock);
2026 		schedule();
2027 		spin_lock_bh(&sk->sk_lock.slock);
2028 		if (!sock_owned_by_user(sk))
2029 			break;
2030 	}
2031 	finish_wait(&sk->sk_lock.wq, &wait);
2032 }
2033 
2034 static void __release_sock(struct sock *sk)
2035 	__releases(&sk->sk_lock.slock)
2036 	__acquires(&sk->sk_lock.slock)
2037 {
2038 	struct sk_buff *skb, *next;
2039 
2040 	while ((skb = sk->sk_backlog.head) != NULL) {
2041 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2042 
2043 		spin_unlock_bh(&sk->sk_lock.slock);
2044 
2045 		do {
2046 			next = skb->next;
2047 			prefetch(next);
2048 			WARN_ON_ONCE(skb_dst_is_noref(skb));
2049 			skb->next = NULL;
2050 			sk_backlog_rcv(sk, skb);
2051 
2052 			cond_resched();
2053 
2054 			skb = next;
2055 		} while (skb != NULL);
2056 
2057 		spin_lock_bh(&sk->sk_lock.slock);
2058 	}
2059 
2060 	/*
2061 	 * Doing the zeroing here guarantee we can not loop forever
2062 	 * while a wild producer attempts to flood us.
2063 	 */
2064 	sk->sk_backlog.len = 0;
2065 }
2066 
2067 void __sk_flush_backlog(struct sock *sk)
2068 {
2069 	spin_lock_bh(&sk->sk_lock.slock);
2070 	__release_sock(sk);
2071 	spin_unlock_bh(&sk->sk_lock.slock);
2072 }
2073 
2074 /**
2075  * sk_wait_data - wait for data to arrive at sk_receive_queue
2076  * @sk:    sock to wait on
2077  * @timeo: for how long
2078  * @skb:   last skb seen on sk_receive_queue
2079  *
2080  * Now socket state including sk->sk_err is changed only under lock,
2081  * hence we may omit checks after joining wait queue.
2082  * We check receive queue before schedule() only as optimization;
2083  * it is very likely that release_sock() added new data.
2084  */
2085 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2086 {
2087 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2088 	int rc;
2089 
2090 	add_wait_queue(sk_sleep(sk), &wait);
2091 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2092 	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2093 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2094 	remove_wait_queue(sk_sleep(sk), &wait);
2095 	return rc;
2096 }
2097 EXPORT_SYMBOL(sk_wait_data);
2098 
2099 /**
2100  *	__sk_mem_raise_allocated - increase memory_allocated
2101  *	@sk: socket
2102  *	@size: memory size to allocate
2103  *	@amt: pages to allocate
2104  *	@kind: allocation type
2105  *
2106  *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2107  */
2108 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2109 {
2110 	struct proto *prot = sk->sk_prot;
2111 	long allocated = sk_memory_allocated_add(sk, amt);
2112 
2113 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2114 	    !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
2115 		goto suppress_allocation;
2116 
2117 	/* Under limit. */
2118 	if (allocated <= sk_prot_mem_limits(sk, 0)) {
2119 		sk_leave_memory_pressure(sk);
2120 		return 1;
2121 	}
2122 
2123 	/* Under pressure. */
2124 	if (allocated > sk_prot_mem_limits(sk, 1))
2125 		sk_enter_memory_pressure(sk);
2126 
2127 	/* Over hard limit. */
2128 	if (allocated > sk_prot_mem_limits(sk, 2))
2129 		goto suppress_allocation;
2130 
2131 	/* guarantee minimum buffer size under pressure */
2132 	if (kind == SK_MEM_RECV) {
2133 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2134 			return 1;
2135 
2136 	} else { /* SK_MEM_SEND */
2137 		if (sk->sk_type == SOCK_STREAM) {
2138 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2139 				return 1;
2140 		} else if (atomic_read(&sk->sk_wmem_alloc) <
2141 			   prot->sysctl_wmem[0])
2142 				return 1;
2143 	}
2144 
2145 	if (sk_has_memory_pressure(sk)) {
2146 		int alloc;
2147 
2148 		if (!sk_under_memory_pressure(sk))
2149 			return 1;
2150 		alloc = sk_sockets_allocated_read_positive(sk);
2151 		if (sk_prot_mem_limits(sk, 2) > alloc *
2152 		    sk_mem_pages(sk->sk_wmem_queued +
2153 				 atomic_read(&sk->sk_rmem_alloc) +
2154 				 sk->sk_forward_alloc))
2155 			return 1;
2156 	}
2157 
2158 suppress_allocation:
2159 
2160 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2161 		sk_stream_moderate_sndbuf(sk);
2162 
2163 		/* Fail only if socket is _under_ its sndbuf.
2164 		 * In this case we cannot block, so that we have to fail.
2165 		 */
2166 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2167 			return 1;
2168 	}
2169 
2170 	trace_sock_exceed_buf_limit(sk, prot, allocated);
2171 
2172 	sk_memory_allocated_sub(sk, amt);
2173 
2174 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2175 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2176 
2177 	return 0;
2178 }
2179 EXPORT_SYMBOL(__sk_mem_raise_allocated);
2180 
2181 /**
2182  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2183  *	@sk: socket
2184  *	@size: memory size to allocate
2185  *	@kind: allocation type
2186  *
2187  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2188  *	rmem allocation. This function assumes that protocols which have
2189  *	memory_pressure use sk_wmem_queued as write buffer accounting.
2190  */
2191 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2192 {
2193 	int ret, amt = sk_mem_pages(size);
2194 
2195 	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2196 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2197 	if (!ret)
2198 		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2199 	return ret;
2200 }
2201 EXPORT_SYMBOL(__sk_mem_schedule);
2202 
2203 /**
2204  *	__sk_mem_reduce_allocated - reclaim memory_allocated
2205  *	@sk: socket
2206  *	@amount: number of quanta
2207  *
2208  *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2209  */
2210 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2211 {
2212 	sk_memory_allocated_sub(sk, amount);
2213 
2214 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2215 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2216 
2217 	if (sk_under_memory_pressure(sk) &&
2218 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2219 		sk_leave_memory_pressure(sk);
2220 }
2221 EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2222 
2223 /**
2224  *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2225  *	@sk: socket
2226  *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2227  */
2228 void __sk_mem_reclaim(struct sock *sk, int amount)
2229 {
2230 	amount >>= SK_MEM_QUANTUM_SHIFT;
2231 	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2232 	__sk_mem_reduce_allocated(sk, amount);
2233 }
2234 EXPORT_SYMBOL(__sk_mem_reclaim);
2235 
2236 int sk_set_peek_off(struct sock *sk, int val)
2237 {
2238 	if (val < 0)
2239 		return -EINVAL;
2240 
2241 	sk->sk_peek_off = val;
2242 	return 0;
2243 }
2244 EXPORT_SYMBOL_GPL(sk_set_peek_off);
2245 
2246 /*
2247  * Set of default routines for initialising struct proto_ops when
2248  * the protocol does not support a particular function. In certain
2249  * cases where it makes no sense for a protocol to have a "do nothing"
2250  * function, some default processing is provided.
2251  */
2252 
2253 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2254 {
2255 	return -EOPNOTSUPP;
2256 }
2257 EXPORT_SYMBOL(sock_no_bind);
2258 
2259 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2260 		    int len, int flags)
2261 {
2262 	return -EOPNOTSUPP;
2263 }
2264 EXPORT_SYMBOL(sock_no_connect);
2265 
2266 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2267 {
2268 	return -EOPNOTSUPP;
2269 }
2270 EXPORT_SYMBOL(sock_no_socketpair);
2271 
2272 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2273 {
2274 	return -EOPNOTSUPP;
2275 }
2276 EXPORT_SYMBOL(sock_no_accept);
2277 
2278 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2279 		    int *len, int peer)
2280 {
2281 	return -EOPNOTSUPP;
2282 }
2283 EXPORT_SYMBOL(sock_no_getname);
2284 
2285 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2286 {
2287 	return 0;
2288 }
2289 EXPORT_SYMBOL(sock_no_poll);
2290 
2291 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2292 {
2293 	return -EOPNOTSUPP;
2294 }
2295 EXPORT_SYMBOL(sock_no_ioctl);
2296 
2297 int sock_no_listen(struct socket *sock, int backlog)
2298 {
2299 	return -EOPNOTSUPP;
2300 }
2301 EXPORT_SYMBOL(sock_no_listen);
2302 
2303 int sock_no_shutdown(struct socket *sock, int how)
2304 {
2305 	return -EOPNOTSUPP;
2306 }
2307 EXPORT_SYMBOL(sock_no_shutdown);
2308 
2309 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2310 		    char __user *optval, unsigned int optlen)
2311 {
2312 	return -EOPNOTSUPP;
2313 }
2314 EXPORT_SYMBOL(sock_no_setsockopt);
2315 
2316 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2317 		    char __user *optval, int __user *optlen)
2318 {
2319 	return -EOPNOTSUPP;
2320 }
2321 EXPORT_SYMBOL(sock_no_getsockopt);
2322 
2323 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2324 {
2325 	return -EOPNOTSUPP;
2326 }
2327 EXPORT_SYMBOL(sock_no_sendmsg);
2328 
2329 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2330 		    int flags)
2331 {
2332 	return -EOPNOTSUPP;
2333 }
2334 EXPORT_SYMBOL(sock_no_recvmsg);
2335 
2336 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2337 {
2338 	/* Mirror missing mmap method error code */
2339 	return -ENODEV;
2340 }
2341 EXPORT_SYMBOL(sock_no_mmap);
2342 
2343 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2344 {
2345 	ssize_t res;
2346 	struct msghdr msg = {.msg_flags = flags};
2347 	struct kvec iov;
2348 	char *kaddr = kmap(page);
2349 	iov.iov_base = kaddr + offset;
2350 	iov.iov_len = size;
2351 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2352 	kunmap(page);
2353 	return res;
2354 }
2355 EXPORT_SYMBOL(sock_no_sendpage);
2356 
2357 /*
2358  *	Default Socket Callbacks
2359  */
2360 
2361 static void sock_def_wakeup(struct sock *sk)
2362 {
2363 	struct socket_wq *wq;
2364 
2365 	rcu_read_lock();
2366 	wq = rcu_dereference(sk->sk_wq);
2367 	if (skwq_has_sleeper(wq))
2368 		wake_up_interruptible_all(&wq->wait);
2369 	rcu_read_unlock();
2370 }
2371 
2372 static void sock_def_error_report(struct sock *sk)
2373 {
2374 	struct socket_wq *wq;
2375 
2376 	rcu_read_lock();
2377 	wq = rcu_dereference(sk->sk_wq);
2378 	if (skwq_has_sleeper(wq))
2379 		wake_up_interruptible_poll(&wq->wait, POLLERR);
2380 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2381 	rcu_read_unlock();
2382 }
2383 
2384 static void sock_def_readable(struct sock *sk)
2385 {
2386 	struct socket_wq *wq;
2387 
2388 	rcu_read_lock();
2389 	wq = rcu_dereference(sk->sk_wq);
2390 	if (skwq_has_sleeper(wq))
2391 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2392 						POLLRDNORM | POLLRDBAND);
2393 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2394 	rcu_read_unlock();
2395 }
2396 
2397 static void sock_def_write_space(struct sock *sk)
2398 {
2399 	struct socket_wq *wq;
2400 
2401 	rcu_read_lock();
2402 
2403 	/* Do not wake up a writer until he can make "significant"
2404 	 * progress.  --DaveM
2405 	 */
2406 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2407 		wq = rcu_dereference(sk->sk_wq);
2408 		if (skwq_has_sleeper(wq))
2409 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2410 						POLLWRNORM | POLLWRBAND);
2411 
2412 		/* Should agree with poll, otherwise some programs break */
2413 		if (sock_writeable(sk))
2414 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2415 	}
2416 
2417 	rcu_read_unlock();
2418 }
2419 
2420 static void sock_def_destruct(struct sock *sk)
2421 {
2422 }
2423 
2424 void sk_send_sigurg(struct sock *sk)
2425 {
2426 	if (sk->sk_socket && sk->sk_socket->file)
2427 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2428 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2429 }
2430 EXPORT_SYMBOL(sk_send_sigurg);
2431 
2432 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2433 		    unsigned long expires)
2434 {
2435 	if (!mod_timer(timer, expires))
2436 		sock_hold(sk);
2437 }
2438 EXPORT_SYMBOL(sk_reset_timer);
2439 
2440 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2441 {
2442 	if (del_timer(timer))
2443 		__sock_put(sk);
2444 }
2445 EXPORT_SYMBOL(sk_stop_timer);
2446 
2447 void sock_init_data(struct socket *sock, struct sock *sk)
2448 {
2449 	skb_queue_head_init(&sk->sk_receive_queue);
2450 	skb_queue_head_init(&sk->sk_write_queue);
2451 	skb_queue_head_init(&sk->sk_error_queue);
2452 
2453 	sk->sk_send_head	=	NULL;
2454 
2455 	init_timer(&sk->sk_timer);
2456 
2457 	sk->sk_allocation	=	GFP_KERNEL;
2458 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2459 	sk->sk_sndbuf		=	sysctl_wmem_default;
2460 	sk->sk_state		=	TCP_CLOSE;
2461 	sk_set_socket(sk, sock);
2462 
2463 	sock_set_flag(sk, SOCK_ZAPPED);
2464 
2465 	if (sock) {
2466 		sk->sk_type	=	sock->type;
2467 		sk->sk_wq	=	sock->wq;
2468 		sock->sk	=	sk;
2469 		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
2470 	} else {
2471 		sk->sk_wq	=	NULL;
2472 		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
2473 	}
2474 
2475 	rwlock_init(&sk->sk_callback_lock);
2476 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2477 			af_callback_keys + sk->sk_family,
2478 			af_family_clock_key_strings[sk->sk_family]);
2479 
2480 	sk->sk_state_change	=	sock_def_wakeup;
2481 	sk->sk_data_ready	=	sock_def_readable;
2482 	sk->sk_write_space	=	sock_def_write_space;
2483 	sk->sk_error_report	=	sock_def_error_report;
2484 	sk->sk_destruct		=	sock_def_destruct;
2485 
2486 	sk->sk_frag.page	=	NULL;
2487 	sk->sk_frag.offset	=	0;
2488 	sk->sk_peek_off		=	-1;
2489 
2490 	sk->sk_peer_pid 	=	NULL;
2491 	sk->sk_peer_cred	=	NULL;
2492 	sk->sk_write_pending	=	0;
2493 	sk->sk_rcvlowat		=	1;
2494 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2495 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2496 
2497 	sk->sk_stamp = ktime_set(-1L, 0);
2498 
2499 #ifdef CONFIG_NET_RX_BUSY_POLL
2500 	sk->sk_napi_id		=	0;
2501 	sk->sk_ll_usec		=	sysctl_net_busy_read;
2502 #endif
2503 
2504 	sk->sk_max_pacing_rate = ~0U;
2505 	sk->sk_pacing_rate = ~0U;
2506 	sk->sk_incoming_cpu = -1;
2507 	/*
2508 	 * Before updating sk_refcnt, we must commit prior changes to memory
2509 	 * (Documentation/RCU/rculist_nulls.txt for details)
2510 	 */
2511 	smp_wmb();
2512 	atomic_set(&sk->sk_refcnt, 1);
2513 	atomic_set(&sk->sk_drops, 0);
2514 }
2515 EXPORT_SYMBOL(sock_init_data);
2516 
2517 void lock_sock_nested(struct sock *sk, int subclass)
2518 {
2519 	might_sleep();
2520 	spin_lock_bh(&sk->sk_lock.slock);
2521 	if (sk->sk_lock.owned)
2522 		__lock_sock(sk);
2523 	sk->sk_lock.owned = 1;
2524 	spin_unlock(&sk->sk_lock.slock);
2525 	/*
2526 	 * The sk_lock has mutex_lock() semantics here:
2527 	 */
2528 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2529 	local_bh_enable();
2530 }
2531 EXPORT_SYMBOL(lock_sock_nested);
2532 
2533 void release_sock(struct sock *sk)
2534 {
2535 	spin_lock_bh(&sk->sk_lock.slock);
2536 	if (sk->sk_backlog.tail)
2537 		__release_sock(sk);
2538 
2539 	/* Warning : release_cb() might need to release sk ownership,
2540 	 * ie call sock_release_ownership(sk) before us.
2541 	 */
2542 	if (sk->sk_prot->release_cb)
2543 		sk->sk_prot->release_cb(sk);
2544 
2545 	sock_release_ownership(sk);
2546 	if (waitqueue_active(&sk->sk_lock.wq))
2547 		wake_up(&sk->sk_lock.wq);
2548 	spin_unlock_bh(&sk->sk_lock.slock);
2549 }
2550 EXPORT_SYMBOL(release_sock);
2551 
2552 /**
2553  * lock_sock_fast - fast version of lock_sock
2554  * @sk: socket
2555  *
2556  * This version should be used for very small section, where process wont block
2557  * return false if fast path is taken
2558  *   sk_lock.slock locked, owned = 0, BH disabled
2559  * return true if slow path is taken
2560  *   sk_lock.slock unlocked, owned = 1, BH enabled
2561  */
2562 bool lock_sock_fast(struct sock *sk)
2563 {
2564 	might_sleep();
2565 	spin_lock_bh(&sk->sk_lock.slock);
2566 
2567 	if (!sk->sk_lock.owned)
2568 		/*
2569 		 * Note : We must disable BH
2570 		 */
2571 		return false;
2572 
2573 	__lock_sock(sk);
2574 	sk->sk_lock.owned = 1;
2575 	spin_unlock(&sk->sk_lock.slock);
2576 	/*
2577 	 * The sk_lock has mutex_lock() semantics here:
2578 	 */
2579 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2580 	local_bh_enable();
2581 	return true;
2582 }
2583 EXPORT_SYMBOL(lock_sock_fast);
2584 
2585 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2586 {
2587 	struct timeval tv;
2588 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2589 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2590 	tv = ktime_to_timeval(sk->sk_stamp);
2591 	if (tv.tv_sec == -1)
2592 		return -ENOENT;
2593 	if (tv.tv_sec == 0) {
2594 		sk->sk_stamp = ktime_get_real();
2595 		tv = ktime_to_timeval(sk->sk_stamp);
2596 	}
2597 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2598 }
2599 EXPORT_SYMBOL(sock_get_timestamp);
2600 
2601 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2602 {
2603 	struct timespec ts;
2604 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2605 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2606 	ts = ktime_to_timespec(sk->sk_stamp);
2607 	if (ts.tv_sec == -1)
2608 		return -ENOENT;
2609 	if (ts.tv_sec == 0) {
2610 		sk->sk_stamp = ktime_get_real();
2611 		ts = ktime_to_timespec(sk->sk_stamp);
2612 	}
2613 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2614 }
2615 EXPORT_SYMBOL(sock_get_timestampns);
2616 
2617 void sock_enable_timestamp(struct sock *sk, int flag)
2618 {
2619 	if (!sock_flag(sk, flag)) {
2620 		unsigned long previous_flags = sk->sk_flags;
2621 
2622 		sock_set_flag(sk, flag);
2623 		/*
2624 		 * we just set one of the two flags which require net
2625 		 * time stamping, but time stamping might have been on
2626 		 * already because of the other one
2627 		 */
2628 		if (sock_needs_netstamp(sk) &&
2629 		    !(previous_flags & SK_FLAGS_TIMESTAMP))
2630 			net_enable_timestamp();
2631 	}
2632 }
2633 
2634 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2635 		       int level, int type)
2636 {
2637 	struct sock_exterr_skb *serr;
2638 	struct sk_buff *skb;
2639 	int copied, err;
2640 
2641 	err = -EAGAIN;
2642 	skb = sock_dequeue_err_skb(sk);
2643 	if (skb == NULL)
2644 		goto out;
2645 
2646 	copied = skb->len;
2647 	if (copied > len) {
2648 		msg->msg_flags |= MSG_TRUNC;
2649 		copied = len;
2650 	}
2651 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
2652 	if (err)
2653 		goto out_free_skb;
2654 
2655 	sock_recv_timestamp(msg, sk, skb);
2656 
2657 	serr = SKB_EXT_ERR(skb);
2658 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2659 
2660 	msg->msg_flags |= MSG_ERRQUEUE;
2661 	err = copied;
2662 
2663 out_free_skb:
2664 	kfree_skb(skb);
2665 out:
2666 	return err;
2667 }
2668 EXPORT_SYMBOL(sock_recv_errqueue);
2669 
2670 /*
2671  *	Get a socket option on an socket.
2672  *
2673  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2674  *	asynchronous errors should be reported by getsockopt. We assume
2675  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2676  */
2677 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2678 			   char __user *optval, int __user *optlen)
2679 {
2680 	struct sock *sk = sock->sk;
2681 
2682 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2683 }
2684 EXPORT_SYMBOL(sock_common_getsockopt);
2685 
2686 #ifdef CONFIG_COMPAT
2687 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2688 				  char __user *optval, int __user *optlen)
2689 {
2690 	struct sock *sk = sock->sk;
2691 
2692 	if (sk->sk_prot->compat_getsockopt != NULL)
2693 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2694 						      optval, optlen);
2695 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2696 }
2697 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2698 #endif
2699 
2700 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2701 			int flags)
2702 {
2703 	struct sock *sk = sock->sk;
2704 	int addr_len = 0;
2705 	int err;
2706 
2707 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2708 				   flags & ~MSG_DONTWAIT, &addr_len);
2709 	if (err >= 0)
2710 		msg->msg_namelen = addr_len;
2711 	return err;
2712 }
2713 EXPORT_SYMBOL(sock_common_recvmsg);
2714 
2715 /*
2716  *	Set socket options on an inet socket.
2717  */
2718 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2719 			   char __user *optval, unsigned int optlen)
2720 {
2721 	struct sock *sk = sock->sk;
2722 
2723 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2724 }
2725 EXPORT_SYMBOL(sock_common_setsockopt);
2726 
2727 #ifdef CONFIG_COMPAT
2728 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2729 				  char __user *optval, unsigned int optlen)
2730 {
2731 	struct sock *sk = sock->sk;
2732 
2733 	if (sk->sk_prot->compat_setsockopt != NULL)
2734 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2735 						      optval, optlen);
2736 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2737 }
2738 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2739 #endif
2740 
2741 void sk_common_release(struct sock *sk)
2742 {
2743 	if (sk->sk_prot->destroy)
2744 		sk->sk_prot->destroy(sk);
2745 
2746 	/*
2747 	 * Observation: when sock_common_release is called, processes have
2748 	 * no access to socket. But net still has.
2749 	 * Step one, detach it from networking:
2750 	 *
2751 	 * A. Remove from hash tables.
2752 	 */
2753 
2754 	sk->sk_prot->unhash(sk);
2755 
2756 	/*
2757 	 * In this point socket cannot receive new packets, but it is possible
2758 	 * that some packets are in flight because some CPU runs receiver and
2759 	 * did hash table lookup before we unhashed socket. They will achieve
2760 	 * receive queue and will be purged by socket destructor.
2761 	 *
2762 	 * Also we still have packets pending on receive queue and probably,
2763 	 * our own packets waiting in device queues. sock_destroy will drain
2764 	 * receive queue, but transmitted packets will delay socket destruction
2765 	 * until the last reference will be released.
2766 	 */
2767 
2768 	sock_orphan(sk);
2769 
2770 	xfrm_sk_free_policy(sk);
2771 
2772 	sk_refcnt_debug_release(sk);
2773 
2774 	if (sk->sk_frag.page) {
2775 		put_page(sk->sk_frag.page);
2776 		sk->sk_frag.page = NULL;
2777 	}
2778 
2779 	sock_put(sk);
2780 }
2781 EXPORT_SYMBOL(sk_common_release);
2782 
2783 #ifdef CONFIG_PROC_FS
2784 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2785 struct prot_inuse {
2786 	int val[PROTO_INUSE_NR];
2787 };
2788 
2789 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2790 
2791 #ifdef CONFIG_NET_NS
2792 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2793 {
2794 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2795 }
2796 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2797 
2798 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2799 {
2800 	int cpu, idx = prot->inuse_idx;
2801 	int res = 0;
2802 
2803 	for_each_possible_cpu(cpu)
2804 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2805 
2806 	return res >= 0 ? res : 0;
2807 }
2808 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2809 
2810 static int __net_init sock_inuse_init_net(struct net *net)
2811 {
2812 	net->core.inuse = alloc_percpu(struct prot_inuse);
2813 	return net->core.inuse ? 0 : -ENOMEM;
2814 }
2815 
2816 static void __net_exit sock_inuse_exit_net(struct net *net)
2817 {
2818 	free_percpu(net->core.inuse);
2819 }
2820 
2821 static struct pernet_operations net_inuse_ops = {
2822 	.init = sock_inuse_init_net,
2823 	.exit = sock_inuse_exit_net,
2824 };
2825 
2826 static __init int net_inuse_init(void)
2827 {
2828 	if (register_pernet_subsys(&net_inuse_ops))
2829 		panic("Cannot initialize net inuse counters");
2830 
2831 	return 0;
2832 }
2833 
2834 core_initcall(net_inuse_init);
2835 #else
2836 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2837 
2838 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2839 {
2840 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2841 }
2842 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2843 
2844 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2845 {
2846 	int cpu, idx = prot->inuse_idx;
2847 	int res = 0;
2848 
2849 	for_each_possible_cpu(cpu)
2850 		res += per_cpu(prot_inuse, cpu).val[idx];
2851 
2852 	return res >= 0 ? res : 0;
2853 }
2854 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2855 #endif
2856 
2857 static void assign_proto_idx(struct proto *prot)
2858 {
2859 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2860 
2861 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2862 		pr_err("PROTO_INUSE_NR exhausted\n");
2863 		return;
2864 	}
2865 
2866 	set_bit(prot->inuse_idx, proto_inuse_idx);
2867 }
2868 
2869 static void release_proto_idx(struct proto *prot)
2870 {
2871 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2872 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2873 }
2874 #else
2875 static inline void assign_proto_idx(struct proto *prot)
2876 {
2877 }
2878 
2879 static inline void release_proto_idx(struct proto *prot)
2880 {
2881 }
2882 #endif
2883 
2884 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2885 {
2886 	if (!rsk_prot)
2887 		return;
2888 	kfree(rsk_prot->slab_name);
2889 	rsk_prot->slab_name = NULL;
2890 	kmem_cache_destroy(rsk_prot->slab);
2891 	rsk_prot->slab = NULL;
2892 }
2893 
2894 static int req_prot_init(const struct proto *prot)
2895 {
2896 	struct request_sock_ops *rsk_prot = prot->rsk_prot;
2897 
2898 	if (!rsk_prot)
2899 		return 0;
2900 
2901 	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2902 					prot->name);
2903 	if (!rsk_prot->slab_name)
2904 		return -ENOMEM;
2905 
2906 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2907 					   rsk_prot->obj_size, 0,
2908 					   prot->slab_flags, NULL);
2909 
2910 	if (!rsk_prot->slab) {
2911 		pr_crit("%s: Can't create request sock SLAB cache!\n",
2912 			prot->name);
2913 		return -ENOMEM;
2914 	}
2915 	return 0;
2916 }
2917 
2918 int proto_register(struct proto *prot, int alloc_slab)
2919 {
2920 	if (alloc_slab) {
2921 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2922 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2923 					NULL);
2924 
2925 		if (prot->slab == NULL) {
2926 			pr_crit("%s: Can't create sock SLAB cache!\n",
2927 				prot->name);
2928 			goto out;
2929 		}
2930 
2931 		if (req_prot_init(prot))
2932 			goto out_free_request_sock_slab;
2933 
2934 		if (prot->twsk_prot != NULL) {
2935 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2936 
2937 			if (prot->twsk_prot->twsk_slab_name == NULL)
2938 				goto out_free_request_sock_slab;
2939 
2940 			prot->twsk_prot->twsk_slab =
2941 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2942 						  prot->twsk_prot->twsk_obj_size,
2943 						  0,
2944 						  prot->slab_flags,
2945 						  NULL);
2946 			if (prot->twsk_prot->twsk_slab == NULL)
2947 				goto out_free_timewait_sock_slab_name;
2948 		}
2949 	}
2950 
2951 	mutex_lock(&proto_list_mutex);
2952 	list_add(&prot->node, &proto_list);
2953 	assign_proto_idx(prot);
2954 	mutex_unlock(&proto_list_mutex);
2955 	return 0;
2956 
2957 out_free_timewait_sock_slab_name:
2958 	kfree(prot->twsk_prot->twsk_slab_name);
2959 out_free_request_sock_slab:
2960 	req_prot_cleanup(prot->rsk_prot);
2961 
2962 	kmem_cache_destroy(prot->slab);
2963 	prot->slab = NULL;
2964 out:
2965 	return -ENOBUFS;
2966 }
2967 EXPORT_SYMBOL(proto_register);
2968 
2969 void proto_unregister(struct proto *prot)
2970 {
2971 	mutex_lock(&proto_list_mutex);
2972 	release_proto_idx(prot);
2973 	list_del(&prot->node);
2974 	mutex_unlock(&proto_list_mutex);
2975 
2976 	kmem_cache_destroy(prot->slab);
2977 	prot->slab = NULL;
2978 
2979 	req_prot_cleanup(prot->rsk_prot);
2980 
2981 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2982 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2983 		kfree(prot->twsk_prot->twsk_slab_name);
2984 		prot->twsk_prot->twsk_slab = NULL;
2985 	}
2986 }
2987 EXPORT_SYMBOL(proto_unregister);
2988 
2989 #ifdef CONFIG_PROC_FS
2990 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2991 	__acquires(proto_list_mutex)
2992 {
2993 	mutex_lock(&proto_list_mutex);
2994 	return seq_list_start_head(&proto_list, *pos);
2995 }
2996 
2997 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2998 {
2999 	return seq_list_next(v, &proto_list, pos);
3000 }
3001 
3002 static void proto_seq_stop(struct seq_file *seq, void *v)
3003 	__releases(proto_list_mutex)
3004 {
3005 	mutex_unlock(&proto_list_mutex);
3006 }
3007 
3008 static char proto_method_implemented(const void *method)
3009 {
3010 	return method == NULL ? 'n' : 'y';
3011 }
3012 static long sock_prot_memory_allocated(struct proto *proto)
3013 {
3014 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3015 }
3016 
3017 static char *sock_prot_memory_pressure(struct proto *proto)
3018 {
3019 	return proto->memory_pressure != NULL ?
3020 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3021 }
3022 
3023 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3024 {
3025 
3026 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
3027 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3028 		   proto->name,
3029 		   proto->obj_size,
3030 		   sock_prot_inuse_get(seq_file_net(seq), proto),
3031 		   sock_prot_memory_allocated(proto),
3032 		   sock_prot_memory_pressure(proto),
3033 		   proto->max_header,
3034 		   proto->slab == NULL ? "no" : "yes",
3035 		   module_name(proto->owner),
3036 		   proto_method_implemented(proto->close),
3037 		   proto_method_implemented(proto->connect),
3038 		   proto_method_implemented(proto->disconnect),
3039 		   proto_method_implemented(proto->accept),
3040 		   proto_method_implemented(proto->ioctl),
3041 		   proto_method_implemented(proto->init),
3042 		   proto_method_implemented(proto->destroy),
3043 		   proto_method_implemented(proto->shutdown),
3044 		   proto_method_implemented(proto->setsockopt),
3045 		   proto_method_implemented(proto->getsockopt),
3046 		   proto_method_implemented(proto->sendmsg),
3047 		   proto_method_implemented(proto->recvmsg),
3048 		   proto_method_implemented(proto->sendpage),
3049 		   proto_method_implemented(proto->bind),
3050 		   proto_method_implemented(proto->backlog_rcv),
3051 		   proto_method_implemented(proto->hash),
3052 		   proto_method_implemented(proto->unhash),
3053 		   proto_method_implemented(proto->get_port),
3054 		   proto_method_implemented(proto->enter_memory_pressure));
3055 }
3056 
3057 static int proto_seq_show(struct seq_file *seq, void *v)
3058 {
3059 	if (v == &proto_list)
3060 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3061 			   "protocol",
3062 			   "size",
3063 			   "sockets",
3064 			   "memory",
3065 			   "press",
3066 			   "maxhdr",
3067 			   "slab",
3068 			   "module",
3069 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3070 	else
3071 		proto_seq_printf(seq, list_entry(v, struct proto, node));
3072 	return 0;
3073 }
3074 
3075 static const struct seq_operations proto_seq_ops = {
3076 	.start  = proto_seq_start,
3077 	.next   = proto_seq_next,
3078 	.stop   = proto_seq_stop,
3079 	.show   = proto_seq_show,
3080 };
3081 
3082 static int proto_seq_open(struct inode *inode, struct file *file)
3083 {
3084 	return seq_open_net(inode, file, &proto_seq_ops,
3085 			    sizeof(struct seq_net_private));
3086 }
3087 
3088 static const struct file_operations proto_seq_fops = {
3089 	.owner		= THIS_MODULE,
3090 	.open		= proto_seq_open,
3091 	.read		= seq_read,
3092 	.llseek		= seq_lseek,
3093 	.release	= seq_release_net,
3094 };
3095 
3096 static __net_init int proto_init_net(struct net *net)
3097 {
3098 	if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
3099 		return -ENOMEM;
3100 
3101 	return 0;
3102 }
3103 
3104 static __net_exit void proto_exit_net(struct net *net)
3105 {
3106 	remove_proc_entry("protocols", net->proc_net);
3107 }
3108 
3109 
3110 static __net_initdata struct pernet_operations proto_net_ops = {
3111 	.init = proto_init_net,
3112 	.exit = proto_exit_net,
3113 };
3114 
3115 static int __init proto_init(void)
3116 {
3117 	return register_pernet_subsys(&proto_net_ops);
3118 }
3119 
3120 subsys_initcall(proto_init);
3121 
3122 #endif /* PROC_FS */
3123