xref: /linux/net/core/sock.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic socket support routines. Memory allocators, socket lock/release
7  *		handler for protocols to use and generic option handler.
8  *
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  *
85  *
86  *		This program is free software; you can redistribute it and/or
87  *		modify it under the terms of the GNU General Public License
88  *		as published by the Free Software Foundation; either version
89  *		2 of the License, or (at your option) any later version.
90  */
91 
92 #include <linux/capability.h>
93 #include <linux/errno.h>
94 #include <linux/types.h>
95 #include <linux/socket.h>
96 #include <linux/in.h>
97 #include <linux/kernel.h>
98 #include <linux/module.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/sched.h>
102 #include <linux/timer.h>
103 #include <linux/string.h>
104 #include <linux/sockios.h>
105 #include <linux/net.h>
106 #include <linux/mm.h>
107 #include <linux/slab.h>
108 #include <linux/interrupt.h>
109 #include <linux/poll.h>
110 #include <linux/tcp.h>
111 #include <linux/init.h>
112 #include <linux/highmem.h>
113 #include <linux/user_namespace.h>
114 #include <linux/jump_label.h>
115 
116 #include <asm/uaccess.h>
117 #include <asm/system.h>
118 
119 #include <linux/netdevice.h>
120 #include <net/protocol.h>
121 #include <linux/skbuff.h>
122 #include <net/net_namespace.h>
123 #include <net/request_sock.h>
124 #include <net/sock.h>
125 #include <linux/net_tstamp.h>
126 #include <net/xfrm.h>
127 #include <linux/ipsec.h>
128 #include <net/cls_cgroup.h>
129 #include <net/netprio_cgroup.h>
130 
131 #include <linux/filter.h>
132 
133 #include <trace/events/sock.h>
134 
135 #ifdef CONFIG_INET
136 #include <net/tcp.h>
137 #endif
138 
139 static DEFINE_MUTEX(proto_list_mutex);
140 static LIST_HEAD(proto_list);
141 
142 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
143 int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
144 {
145 	struct proto *proto;
146 	int ret = 0;
147 
148 	mutex_lock(&proto_list_mutex);
149 	list_for_each_entry(proto, &proto_list, node) {
150 		if (proto->init_cgroup) {
151 			ret = proto->init_cgroup(cgrp, ss);
152 			if (ret)
153 				goto out;
154 		}
155 	}
156 
157 	mutex_unlock(&proto_list_mutex);
158 	return ret;
159 out:
160 	list_for_each_entry_continue_reverse(proto, &proto_list, node)
161 		if (proto->destroy_cgroup)
162 			proto->destroy_cgroup(cgrp, ss);
163 	mutex_unlock(&proto_list_mutex);
164 	return ret;
165 }
166 
167 void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
168 {
169 	struct proto *proto;
170 
171 	mutex_lock(&proto_list_mutex);
172 	list_for_each_entry_reverse(proto, &proto_list, node)
173 		if (proto->destroy_cgroup)
174 			proto->destroy_cgroup(cgrp, ss);
175 	mutex_unlock(&proto_list_mutex);
176 }
177 #endif
178 
179 /*
180  * Each address family might have different locking rules, so we have
181  * one slock key per address family:
182  */
183 static struct lock_class_key af_family_keys[AF_MAX];
184 static struct lock_class_key af_family_slock_keys[AF_MAX];
185 
186 struct jump_label_key memcg_socket_limit_enabled;
187 EXPORT_SYMBOL(memcg_socket_limit_enabled);
188 
189 /*
190  * Make lock validator output more readable. (we pre-construct these
191  * strings build-time, so that runtime initialization of socket
192  * locks is fast):
193  */
194 static const char *const af_family_key_strings[AF_MAX+1] = {
195   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
196   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
197   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
198   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
199   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
200   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
201   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
202   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
203   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
204   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
205   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
206   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
207   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
208   "sk_lock-AF_NFC"   , "sk_lock-AF_MAX"
209 };
210 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
211   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
212   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
213   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
214   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
215   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
216   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
217   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
218   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
219   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
220   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
221   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
222   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
223   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
224   "slock-AF_NFC"   , "slock-AF_MAX"
225 };
226 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
227   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
228   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
229   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
230   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
231   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
232   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
233   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
234   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
235   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
236   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
237   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
238   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
239   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
240   "clock-AF_NFC"   , "clock-AF_MAX"
241 };
242 
243 /*
244  * sk_callback_lock locking rules are per-address-family,
245  * so split the lock classes by using a per-AF key:
246  */
247 static struct lock_class_key af_callback_keys[AF_MAX];
248 
249 /* Take into consideration the size of the struct sk_buff overhead in the
250  * determination of these values, since that is non-constant across
251  * platforms.  This makes socket queueing behavior and performance
252  * not depend upon such differences.
253  */
254 #define _SK_MEM_PACKETS		256
255 #define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
256 #define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
257 #define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
258 
259 /* Run time adjustable parameters. */
260 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
261 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
262 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
263 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
264 
265 /* Maximal space eaten by iovec or ancillary data plus some space */
266 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
267 EXPORT_SYMBOL(sysctl_optmem_max);
268 
269 #if defined(CONFIG_CGROUPS)
270 #if !defined(CONFIG_NET_CLS_CGROUP)
271 int net_cls_subsys_id = -1;
272 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
273 #endif
274 #if !defined(CONFIG_NETPRIO_CGROUP)
275 int net_prio_subsys_id = -1;
276 EXPORT_SYMBOL_GPL(net_prio_subsys_id);
277 #endif
278 #endif
279 
280 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
281 {
282 	struct timeval tv;
283 
284 	if (optlen < sizeof(tv))
285 		return -EINVAL;
286 	if (copy_from_user(&tv, optval, sizeof(tv)))
287 		return -EFAULT;
288 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
289 		return -EDOM;
290 
291 	if (tv.tv_sec < 0) {
292 		static int warned __read_mostly;
293 
294 		*timeo_p = 0;
295 		if (warned < 10 && net_ratelimit()) {
296 			warned++;
297 			printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
298 			       "tries to set negative timeout\n",
299 				current->comm, task_pid_nr(current));
300 		}
301 		return 0;
302 	}
303 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
304 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
305 		return 0;
306 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
307 		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
308 	return 0;
309 }
310 
311 static void sock_warn_obsolete_bsdism(const char *name)
312 {
313 	static int warned;
314 	static char warncomm[TASK_COMM_LEN];
315 	if (strcmp(warncomm, current->comm) && warned < 5) {
316 		strcpy(warncomm,  current->comm);
317 		printk(KERN_WARNING "process `%s' is using obsolete "
318 		       "%s SO_BSDCOMPAT\n", warncomm, name);
319 		warned++;
320 	}
321 }
322 
323 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
324 
325 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
326 {
327 	if (sk->sk_flags & flags) {
328 		sk->sk_flags &= ~flags;
329 		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
330 			net_disable_timestamp();
331 	}
332 }
333 
334 
335 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
336 {
337 	int err;
338 	int skb_len;
339 	unsigned long flags;
340 	struct sk_buff_head *list = &sk->sk_receive_queue;
341 
342 	/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
343 	   number of warnings when compiling with -W --ANK
344 	 */
345 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
346 	    (unsigned)sk->sk_rcvbuf) {
347 		atomic_inc(&sk->sk_drops);
348 		trace_sock_rcvqueue_full(sk, skb);
349 		return -ENOMEM;
350 	}
351 
352 	err = sk_filter(sk, skb);
353 	if (err)
354 		return err;
355 
356 	if (!sk_rmem_schedule(sk, skb->truesize)) {
357 		atomic_inc(&sk->sk_drops);
358 		return -ENOBUFS;
359 	}
360 
361 	skb->dev = NULL;
362 	skb_set_owner_r(skb, sk);
363 
364 	/* Cache the SKB length before we tack it onto the receive
365 	 * queue.  Once it is added it no longer belongs to us and
366 	 * may be freed by other threads of control pulling packets
367 	 * from the queue.
368 	 */
369 	skb_len = skb->len;
370 
371 	/* we escape from rcu protected region, make sure we dont leak
372 	 * a norefcounted dst
373 	 */
374 	skb_dst_force(skb);
375 
376 	spin_lock_irqsave(&list->lock, flags);
377 	skb->dropcount = atomic_read(&sk->sk_drops);
378 	__skb_queue_tail(list, skb);
379 	spin_unlock_irqrestore(&list->lock, flags);
380 
381 	if (!sock_flag(sk, SOCK_DEAD))
382 		sk->sk_data_ready(sk, skb_len);
383 	return 0;
384 }
385 EXPORT_SYMBOL(sock_queue_rcv_skb);
386 
387 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
388 {
389 	int rc = NET_RX_SUCCESS;
390 
391 	if (sk_filter(sk, skb))
392 		goto discard_and_relse;
393 
394 	skb->dev = NULL;
395 
396 	if (sk_rcvqueues_full(sk, skb)) {
397 		atomic_inc(&sk->sk_drops);
398 		goto discard_and_relse;
399 	}
400 	if (nested)
401 		bh_lock_sock_nested(sk);
402 	else
403 		bh_lock_sock(sk);
404 	if (!sock_owned_by_user(sk)) {
405 		/*
406 		 * trylock + unlock semantics:
407 		 */
408 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
409 
410 		rc = sk_backlog_rcv(sk, skb);
411 
412 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
413 	} else if (sk_add_backlog(sk, skb)) {
414 		bh_unlock_sock(sk);
415 		atomic_inc(&sk->sk_drops);
416 		goto discard_and_relse;
417 	}
418 
419 	bh_unlock_sock(sk);
420 out:
421 	sock_put(sk);
422 	return rc;
423 discard_and_relse:
424 	kfree_skb(skb);
425 	goto out;
426 }
427 EXPORT_SYMBOL(sk_receive_skb);
428 
429 void sk_reset_txq(struct sock *sk)
430 {
431 	sk_tx_queue_clear(sk);
432 }
433 EXPORT_SYMBOL(sk_reset_txq);
434 
435 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
436 {
437 	struct dst_entry *dst = __sk_dst_get(sk);
438 
439 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
440 		sk_tx_queue_clear(sk);
441 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
442 		dst_release(dst);
443 		return NULL;
444 	}
445 
446 	return dst;
447 }
448 EXPORT_SYMBOL(__sk_dst_check);
449 
450 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
451 {
452 	struct dst_entry *dst = sk_dst_get(sk);
453 
454 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
455 		sk_dst_reset(sk);
456 		dst_release(dst);
457 		return NULL;
458 	}
459 
460 	return dst;
461 }
462 EXPORT_SYMBOL(sk_dst_check);
463 
464 static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
465 {
466 	int ret = -ENOPROTOOPT;
467 #ifdef CONFIG_NETDEVICES
468 	struct net *net = sock_net(sk);
469 	char devname[IFNAMSIZ];
470 	int index;
471 
472 	/* Sorry... */
473 	ret = -EPERM;
474 	if (!capable(CAP_NET_RAW))
475 		goto out;
476 
477 	ret = -EINVAL;
478 	if (optlen < 0)
479 		goto out;
480 
481 	/* Bind this socket to a particular device like "eth0",
482 	 * as specified in the passed interface name. If the
483 	 * name is "" or the option length is zero the socket
484 	 * is not bound.
485 	 */
486 	if (optlen > IFNAMSIZ - 1)
487 		optlen = IFNAMSIZ - 1;
488 	memset(devname, 0, sizeof(devname));
489 
490 	ret = -EFAULT;
491 	if (copy_from_user(devname, optval, optlen))
492 		goto out;
493 
494 	index = 0;
495 	if (devname[0] != '\0') {
496 		struct net_device *dev;
497 
498 		rcu_read_lock();
499 		dev = dev_get_by_name_rcu(net, devname);
500 		if (dev)
501 			index = dev->ifindex;
502 		rcu_read_unlock();
503 		ret = -ENODEV;
504 		if (!dev)
505 			goto out;
506 	}
507 
508 	lock_sock(sk);
509 	sk->sk_bound_dev_if = index;
510 	sk_dst_reset(sk);
511 	release_sock(sk);
512 
513 	ret = 0;
514 
515 out:
516 #endif
517 
518 	return ret;
519 }
520 
521 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
522 {
523 	if (valbool)
524 		sock_set_flag(sk, bit);
525 	else
526 		sock_reset_flag(sk, bit);
527 }
528 
529 /*
530  *	This is meant for all protocols to use and covers goings on
531  *	at the socket level. Everything here is generic.
532  */
533 
534 int sock_setsockopt(struct socket *sock, int level, int optname,
535 		    char __user *optval, unsigned int optlen)
536 {
537 	struct sock *sk = sock->sk;
538 	int val;
539 	int valbool;
540 	struct linger ling;
541 	int ret = 0;
542 
543 	/*
544 	 *	Options without arguments
545 	 */
546 
547 	if (optname == SO_BINDTODEVICE)
548 		return sock_bindtodevice(sk, optval, optlen);
549 
550 	if (optlen < sizeof(int))
551 		return -EINVAL;
552 
553 	if (get_user(val, (int __user *)optval))
554 		return -EFAULT;
555 
556 	valbool = val ? 1 : 0;
557 
558 	lock_sock(sk);
559 
560 	switch (optname) {
561 	case SO_DEBUG:
562 		if (val && !capable(CAP_NET_ADMIN))
563 			ret = -EACCES;
564 		else
565 			sock_valbool_flag(sk, SOCK_DBG, valbool);
566 		break;
567 	case SO_REUSEADDR:
568 		sk->sk_reuse = valbool;
569 		break;
570 	case SO_TYPE:
571 	case SO_PROTOCOL:
572 	case SO_DOMAIN:
573 	case SO_ERROR:
574 		ret = -ENOPROTOOPT;
575 		break;
576 	case SO_DONTROUTE:
577 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
578 		break;
579 	case SO_BROADCAST:
580 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
581 		break;
582 	case SO_SNDBUF:
583 		/* Don't error on this BSD doesn't and if you think
584 		   about it this is right. Otherwise apps have to
585 		   play 'guess the biggest size' games. RCVBUF/SNDBUF
586 		   are treated in BSD as hints */
587 
588 		if (val > sysctl_wmem_max)
589 			val = sysctl_wmem_max;
590 set_sndbuf:
591 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
592 		if ((val * 2) < SOCK_MIN_SNDBUF)
593 			sk->sk_sndbuf = SOCK_MIN_SNDBUF;
594 		else
595 			sk->sk_sndbuf = val * 2;
596 
597 		/*
598 		 *	Wake up sending tasks if we
599 		 *	upped the value.
600 		 */
601 		sk->sk_write_space(sk);
602 		break;
603 
604 	case SO_SNDBUFFORCE:
605 		if (!capable(CAP_NET_ADMIN)) {
606 			ret = -EPERM;
607 			break;
608 		}
609 		goto set_sndbuf;
610 
611 	case SO_RCVBUF:
612 		/* Don't error on this BSD doesn't and if you think
613 		   about it this is right. Otherwise apps have to
614 		   play 'guess the biggest size' games. RCVBUF/SNDBUF
615 		   are treated in BSD as hints */
616 
617 		if (val > sysctl_rmem_max)
618 			val = sysctl_rmem_max;
619 set_rcvbuf:
620 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
621 		/*
622 		 * We double it on the way in to account for
623 		 * "struct sk_buff" etc. overhead.   Applications
624 		 * assume that the SO_RCVBUF setting they make will
625 		 * allow that much actual data to be received on that
626 		 * socket.
627 		 *
628 		 * Applications are unaware that "struct sk_buff" and
629 		 * other overheads allocate from the receive buffer
630 		 * during socket buffer allocation.
631 		 *
632 		 * And after considering the possible alternatives,
633 		 * returning the value we actually used in getsockopt
634 		 * is the most desirable behavior.
635 		 */
636 		if ((val * 2) < SOCK_MIN_RCVBUF)
637 			sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
638 		else
639 			sk->sk_rcvbuf = val * 2;
640 		break;
641 
642 	case SO_RCVBUFFORCE:
643 		if (!capable(CAP_NET_ADMIN)) {
644 			ret = -EPERM;
645 			break;
646 		}
647 		goto set_rcvbuf;
648 
649 	case SO_KEEPALIVE:
650 #ifdef CONFIG_INET
651 		if (sk->sk_protocol == IPPROTO_TCP)
652 			tcp_set_keepalive(sk, valbool);
653 #endif
654 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
655 		break;
656 
657 	case SO_OOBINLINE:
658 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
659 		break;
660 
661 	case SO_NO_CHECK:
662 		sk->sk_no_check = valbool;
663 		break;
664 
665 	case SO_PRIORITY:
666 		if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
667 			sk->sk_priority = val;
668 		else
669 			ret = -EPERM;
670 		break;
671 
672 	case SO_LINGER:
673 		if (optlen < sizeof(ling)) {
674 			ret = -EINVAL;	/* 1003.1g */
675 			break;
676 		}
677 		if (copy_from_user(&ling, optval, sizeof(ling))) {
678 			ret = -EFAULT;
679 			break;
680 		}
681 		if (!ling.l_onoff)
682 			sock_reset_flag(sk, SOCK_LINGER);
683 		else {
684 #if (BITS_PER_LONG == 32)
685 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
686 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
687 			else
688 #endif
689 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
690 			sock_set_flag(sk, SOCK_LINGER);
691 		}
692 		break;
693 
694 	case SO_BSDCOMPAT:
695 		sock_warn_obsolete_bsdism("setsockopt");
696 		break;
697 
698 	case SO_PASSCRED:
699 		if (valbool)
700 			set_bit(SOCK_PASSCRED, &sock->flags);
701 		else
702 			clear_bit(SOCK_PASSCRED, &sock->flags);
703 		break;
704 
705 	case SO_TIMESTAMP:
706 	case SO_TIMESTAMPNS:
707 		if (valbool)  {
708 			if (optname == SO_TIMESTAMP)
709 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
710 			else
711 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
712 			sock_set_flag(sk, SOCK_RCVTSTAMP);
713 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
714 		} else {
715 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
716 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
717 		}
718 		break;
719 
720 	case SO_TIMESTAMPING:
721 		if (val & ~SOF_TIMESTAMPING_MASK) {
722 			ret = -EINVAL;
723 			break;
724 		}
725 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
726 				  val & SOF_TIMESTAMPING_TX_HARDWARE);
727 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
728 				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
729 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
730 				  val & SOF_TIMESTAMPING_RX_HARDWARE);
731 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
732 			sock_enable_timestamp(sk,
733 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
734 		else
735 			sock_disable_timestamp(sk,
736 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
737 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
738 				  val & SOF_TIMESTAMPING_SOFTWARE);
739 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
740 				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
741 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
742 				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
743 		break;
744 
745 	case SO_RCVLOWAT:
746 		if (val < 0)
747 			val = INT_MAX;
748 		sk->sk_rcvlowat = val ? : 1;
749 		break;
750 
751 	case SO_RCVTIMEO:
752 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
753 		break;
754 
755 	case SO_SNDTIMEO:
756 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
757 		break;
758 
759 	case SO_ATTACH_FILTER:
760 		ret = -EINVAL;
761 		if (optlen == sizeof(struct sock_fprog)) {
762 			struct sock_fprog fprog;
763 
764 			ret = -EFAULT;
765 			if (copy_from_user(&fprog, optval, sizeof(fprog)))
766 				break;
767 
768 			ret = sk_attach_filter(&fprog, sk);
769 		}
770 		break;
771 
772 	case SO_DETACH_FILTER:
773 		ret = sk_detach_filter(sk);
774 		break;
775 
776 	case SO_PASSSEC:
777 		if (valbool)
778 			set_bit(SOCK_PASSSEC, &sock->flags);
779 		else
780 			clear_bit(SOCK_PASSSEC, &sock->flags);
781 		break;
782 	case SO_MARK:
783 		if (!capable(CAP_NET_ADMIN))
784 			ret = -EPERM;
785 		else
786 			sk->sk_mark = val;
787 		break;
788 
789 		/* We implement the SO_SNDLOWAT etc to
790 		   not be settable (1003.1g 5.3) */
791 	case SO_RXQ_OVFL:
792 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
793 		break;
794 
795 	case SO_WIFI_STATUS:
796 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
797 		break;
798 
799 	default:
800 		ret = -ENOPROTOOPT;
801 		break;
802 	}
803 	release_sock(sk);
804 	return ret;
805 }
806 EXPORT_SYMBOL(sock_setsockopt);
807 
808 
809 void cred_to_ucred(struct pid *pid, const struct cred *cred,
810 		   struct ucred *ucred)
811 {
812 	ucred->pid = pid_vnr(pid);
813 	ucred->uid = ucred->gid = -1;
814 	if (cred) {
815 		struct user_namespace *current_ns = current_user_ns();
816 
817 		ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
818 		ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
819 	}
820 }
821 EXPORT_SYMBOL_GPL(cred_to_ucred);
822 
823 int sock_getsockopt(struct socket *sock, int level, int optname,
824 		    char __user *optval, int __user *optlen)
825 {
826 	struct sock *sk = sock->sk;
827 
828 	union {
829 		int val;
830 		struct linger ling;
831 		struct timeval tm;
832 	} v;
833 
834 	int lv = sizeof(int);
835 	int len;
836 
837 	if (get_user(len, optlen))
838 		return -EFAULT;
839 	if (len < 0)
840 		return -EINVAL;
841 
842 	memset(&v, 0, sizeof(v));
843 
844 	switch (optname) {
845 	case SO_DEBUG:
846 		v.val = sock_flag(sk, SOCK_DBG);
847 		break;
848 
849 	case SO_DONTROUTE:
850 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
851 		break;
852 
853 	case SO_BROADCAST:
854 		v.val = !!sock_flag(sk, SOCK_BROADCAST);
855 		break;
856 
857 	case SO_SNDBUF:
858 		v.val = sk->sk_sndbuf;
859 		break;
860 
861 	case SO_RCVBUF:
862 		v.val = sk->sk_rcvbuf;
863 		break;
864 
865 	case SO_REUSEADDR:
866 		v.val = sk->sk_reuse;
867 		break;
868 
869 	case SO_KEEPALIVE:
870 		v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
871 		break;
872 
873 	case SO_TYPE:
874 		v.val = sk->sk_type;
875 		break;
876 
877 	case SO_PROTOCOL:
878 		v.val = sk->sk_protocol;
879 		break;
880 
881 	case SO_DOMAIN:
882 		v.val = sk->sk_family;
883 		break;
884 
885 	case SO_ERROR:
886 		v.val = -sock_error(sk);
887 		if (v.val == 0)
888 			v.val = xchg(&sk->sk_err_soft, 0);
889 		break;
890 
891 	case SO_OOBINLINE:
892 		v.val = !!sock_flag(sk, SOCK_URGINLINE);
893 		break;
894 
895 	case SO_NO_CHECK:
896 		v.val = sk->sk_no_check;
897 		break;
898 
899 	case SO_PRIORITY:
900 		v.val = sk->sk_priority;
901 		break;
902 
903 	case SO_LINGER:
904 		lv		= sizeof(v.ling);
905 		v.ling.l_onoff	= !!sock_flag(sk, SOCK_LINGER);
906 		v.ling.l_linger	= sk->sk_lingertime / HZ;
907 		break;
908 
909 	case SO_BSDCOMPAT:
910 		sock_warn_obsolete_bsdism("getsockopt");
911 		break;
912 
913 	case SO_TIMESTAMP:
914 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
915 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
916 		break;
917 
918 	case SO_TIMESTAMPNS:
919 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
920 		break;
921 
922 	case SO_TIMESTAMPING:
923 		v.val = 0;
924 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
925 			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
926 		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
927 			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
928 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
929 			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
930 		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
931 			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
932 		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
933 			v.val |= SOF_TIMESTAMPING_SOFTWARE;
934 		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
935 			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
936 		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
937 			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
938 		break;
939 
940 	case SO_RCVTIMEO:
941 		lv = sizeof(struct timeval);
942 		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
943 			v.tm.tv_sec = 0;
944 			v.tm.tv_usec = 0;
945 		} else {
946 			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
947 			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
948 		}
949 		break;
950 
951 	case SO_SNDTIMEO:
952 		lv = sizeof(struct timeval);
953 		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
954 			v.tm.tv_sec = 0;
955 			v.tm.tv_usec = 0;
956 		} else {
957 			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
958 			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
959 		}
960 		break;
961 
962 	case SO_RCVLOWAT:
963 		v.val = sk->sk_rcvlowat;
964 		break;
965 
966 	case SO_SNDLOWAT:
967 		v.val = 1;
968 		break;
969 
970 	case SO_PASSCRED:
971 		v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
972 		break;
973 
974 	case SO_PEERCRED:
975 	{
976 		struct ucred peercred;
977 		if (len > sizeof(peercred))
978 			len = sizeof(peercred);
979 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
980 		if (copy_to_user(optval, &peercred, len))
981 			return -EFAULT;
982 		goto lenout;
983 	}
984 
985 	case SO_PEERNAME:
986 	{
987 		char address[128];
988 
989 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
990 			return -ENOTCONN;
991 		if (lv < len)
992 			return -EINVAL;
993 		if (copy_to_user(optval, address, len))
994 			return -EFAULT;
995 		goto lenout;
996 	}
997 
998 	/* Dubious BSD thing... Probably nobody even uses it, but
999 	 * the UNIX standard wants it for whatever reason... -DaveM
1000 	 */
1001 	case SO_ACCEPTCONN:
1002 		v.val = sk->sk_state == TCP_LISTEN;
1003 		break;
1004 
1005 	case SO_PASSSEC:
1006 		v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
1007 		break;
1008 
1009 	case SO_PEERSEC:
1010 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1011 
1012 	case SO_MARK:
1013 		v.val = sk->sk_mark;
1014 		break;
1015 
1016 	case SO_RXQ_OVFL:
1017 		v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
1018 		break;
1019 
1020 	case SO_WIFI_STATUS:
1021 		v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
1022 		break;
1023 
1024 	default:
1025 		return -ENOPROTOOPT;
1026 	}
1027 
1028 	if (len > lv)
1029 		len = lv;
1030 	if (copy_to_user(optval, &v, len))
1031 		return -EFAULT;
1032 lenout:
1033 	if (put_user(len, optlen))
1034 		return -EFAULT;
1035 	return 0;
1036 }
1037 
1038 /*
1039  * Initialize an sk_lock.
1040  *
1041  * (We also register the sk_lock with the lock validator.)
1042  */
1043 static inline void sock_lock_init(struct sock *sk)
1044 {
1045 	sock_lock_init_class_and_name(sk,
1046 			af_family_slock_key_strings[sk->sk_family],
1047 			af_family_slock_keys + sk->sk_family,
1048 			af_family_key_strings[sk->sk_family],
1049 			af_family_keys + sk->sk_family);
1050 }
1051 
1052 /*
1053  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1054  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1055  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1056  */
1057 static void sock_copy(struct sock *nsk, const struct sock *osk)
1058 {
1059 #ifdef CONFIG_SECURITY_NETWORK
1060 	void *sptr = nsk->sk_security;
1061 #endif
1062 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1063 
1064 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1065 	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1066 
1067 #ifdef CONFIG_SECURITY_NETWORK
1068 	nsk->sk_security = sptr;
1069 	security_sk_clone(osk, nsk);
1070 #endif
1071 }
1072 
1073 /*
1074  * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1075  * un-modified. Special care is taken when initializing object to zero.
1076  */
1077 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1078 {
1079 	if (offsetof(struct sock, sk_node.next) != 0)
1080 		memset(sk, 0, offsetof(struct sock, sk_node.next));
1081 	memset(&sk->sk_node.pprev, 0,
1082 	       size - offsetof(struct sock, sk_node.pprev));
1083 }
1084 
1085 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1086 {
1087 	unsigned long nulls1, nulls2;
1088 
1089 	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1090 	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1091 	if (nulls1 > nulls2)
1092 		swap(nulls1, nulls2);
1093 
1094 	if (nulls1 != 0)
1095 		memset((char *)sk, 0, nulls1);
1096 	memset((char *)sk + nulls1 + sizeof(void *), 0,
1097 	       nulls2 - nulls1 - sizeof(void *));
1098 	memset((char *)sk + nulls2 + sizeof(void *), 0,
1099 	       size - nulls2 - sizeof(void *));
1100 }
1101 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1102 
1103 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1104 		int family)
1105 {
1106 	struct sock *sk;
1107 	struct kmem_cache *slab;
1108 
1109 	slab = prot->slab;
1110 	if (slab != NULL) {
1111 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1112 		if (!sk)
1113 			return sk;
1114 		if (priority & __GFP_ZERO) {
1115 			if (prot->clear_sk)
1116 				prot->clear_sk(sk, prot->obj_size);
1117 			else
1118 				sk_prot_clear_nulls(sk, prot->obj_size);
1119 		}
1120 	} else
1121 		sk = kmalloc(prot->obj_size, priority);
1122 
1123 	if (sk != NULL) {
1124 		kmemcheck_annotate_bitfield(sk, flags);
1125 
1126 		if (security_sk_alloc(sk, family, priority))
1127 			goto out_free;
1128 
1129 		if (!try_module_get(prot->owner))
1130 			goto out_free_sec;
1131 		sk_tx_queue_clear(sk);
1132 	}
1133 
1134 	return sk;
1135 
1136 out_free_sec:
1137 	security_sk_free(sk);
1138 out_free:
1139 	if (slab != NULL)
1140 		kmem_cache_free(slab, sk);
1141 	else
1142 		kfree(sk);
1143 	return NULL;
1144 }
1145 
1146 static void sk_prot_free(struct proto *prot, struct sock *sk)
1147 {
1148 	struct kmem_cache *slab;
1149 	struct module *owner;
1150 
1151 	owner = prot->owner;
1152 	slab = prot->slab;
1153 
1154 	security_sk_free(sk);
1155 	if (slab != NULL)
1156 		kmem_cache_free(slab, sk);
1157 	else
1158 		kfree(sk);
1159 	module_put(owner);
1160 }
1161 
1162 #ifdef CONFIG_CGROUPS
1163 void sock_update_classid(struct sock *sk)
1164 {
1165 	u32 classid;
1166 
1167 	rcu_read_lock();  /* doing current task, which cannot vanish. */
1168 	classid = task_cls_classid(current);
1169 	rcu_read_unlock();
1170 	if (classid && classid != sk->sk_classid)
1171 		sk->sk_classid = classid;
1172 }
1173 EXPORT_SYMBOL(sock_update_classid);
1174 
1175 void sock_update_netprioidx(struct sock *sk)
1176 {
1177 	struct cgroup_netprio_state *state;
1178 	if (in_interrupt())
1179 		return;
1180 	rcu_read_lock();
1181 	state = task_netprio_state(current);
1182 	sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1183 	rcu_read_unlock();
1184 }
1185 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1186 #endif
1187 
1188 /**
1189  *	sk_alloc - All socket objects are allocated here
1190  *	@net: the applicable net namespace
1191  *	@family: protocol family
1192  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1193  *	@prot: struct proto associated with this new sock instance
1194  */
1195 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1196 		      struct proto *prot)
1197 {
1198 	struct sock *sk;
1199 
1200 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1201 	if (sk) {
1202 		sk->sk_family = family;
1203 		/*
1204 		 * See comment in struct sock definition to understand
1205 		 * why we need sk_prot_creator -acme
1206 		 */
1207 		sk->sk_prot = sk->sk_prot_creator = prot;
1208 		sock_lock_init(sk);
1209 		sock_net_set(sk, get_net(net));
1210 		atomic_set(&sk->sk_wmem_alloc, 1);
1211 
1212 		sock_update_classid(sk);
1213 		sock_update_netprioidx(sk);
1214 	}
1215 
1216 	return sk;
1217 }
1218 EXPORT_SYMBOL(sk_alloc);
1219 
1220 static void __sk_free(struct sock *sk)
1221 {
1222 	struct sk_filter *filter;
1223 
1224 	if (sk->sk_destruct)
1225 		sk->sk_destruct(sk);
1226 
1227 	filter = rcu_dereference_check(sk->sk_filter,
1228 				       atomic_read(&sk->sk_wmem_alloc) == 0);
1229 	if (filter) {
1230 		sk_filter_uncharge(sk, filter);
1231 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1232 	}
1233 
1234 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1235 
1236 	if (atomic_read(&sk->sk_omem_alloc))
1237 		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1238 		       __func__, atomic_read(&sk->sk_omem_alloc));
1239 
1240 	if (sk->sk_peer_cred)
1241 		put_cred(sk->sk_peer_cred);
1242 	put_pid(sk->sk_peer_pid);
1243 	put_net(sock_net(sk));
1244 	sk_prot_free(sk->sk_prot_creator, sk);
1245 }
1246 
1247 void sk_free(struct sock *sk)
1248 {
1249 	/*
1250 	 * We subtract one from sk_wmem_alloc and can know if
1251 	 * some packets are still in some tx queue.
1252 	 * If not null, sock_wfree() will call __sk_free(sk) later
1253 	 */
1254 	if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1255 		__sk_free(sk);
1256 }
1257 EXPORT_SYMBOL(sk_free);
1258 
1259 /*
1260  * Last sock_put should drop reference to sk->sk_net. It has already
1261  * been dropped in sk_change_net. Taking reference to stopping namespace
1262  * is not an option.
1263  * Take reference to a socket to remove it from hash _alive_ and after that
1264  * destroy it in the context of init_net.
1265  */
1266 void sk_release_kernel(struct sock *sk)
1267 {
1268 	if (sk == NULL || sk->sk_socket == NULL)
1269 		return;
1270 
1271 	sock_hold(sk);
1272 	sock_release(sk->sk_socket);
1273 	release_net(sock_net(sk));
1274 	sock_net_set(sk, get_net(&init_net));
1275 	sock_put(sk);
1276 }
1277 EXPORT_SYMBOL(sk_release_kernel);
1278 
1279 /**
1280  *	sk_clone_lock - clone a socket, and lock its clone
1281  *	@sk: the socket to clone
1282  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1283  *
1284  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1285  */
1286 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1287 {
1288 	struct sock *newsk;
1289 
1290 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1291 	if (newsk != NULL) {
1292 		struct sk_filter *filter;
1293 
1294 		sock_copy(newsk, sk);
1295 
1296 		/* SANITY */
1297 		get_net(sock_net(newsk));
1298 		sk_node_init(&newsk->sk_node);
1299 		sock_lock_init(newsk);
1300 		bh_lock_sock(newsk);
1301 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1302 		newsk->sk_backlog.len = 0;
1303 
1304 		atomic_set(&newsk->sk_rmem_alloc, 0);
1305 		/*
1306 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1307 		 */
1308 		atomic_set(&newsk->sk_wmem_alloc, 1);
1309 		atomic_set(&newsk->sk_omem_alloc, 0);
1310 		skb_queue_head_init(&newsk->sk_receive_queue);
1311 		skb_queue_head_init(&newsk->sk_write_queue);
1312 #ifdef CONFIG_NET_DMA
1313 		skb_queue_head_init(&newsk->sk_async_wait_queue);
1314 #endif
1315 
1316 		spin_lock_init(&newsk->sk_dst_lock);
1317 		rwlock_init(&newsk->sk_callback_lock);
1318 		lockdep_set_class_and_name(&newsk->sk_callback_lock,
1319 				af_callback_keys + newsk->sk_family,
1320 				af_family_clock_key_strings[newsk->sk_family]);
1321 
1322 		newsk->sk_dst_cache	= NULL;
1323 		newsk->sk_wmem_queued	= 0;
1324 		newsk->sk_forward_alloc = 0;
1325 		newsk->sk_send_head	= NULL;
1326 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1327 
1328 		sock_reset_flag(newsk, SOCK_DONE);
1329 		skb_queue_head_init(&newsk->sk_error_queue);
1330 
1331 		filter = rcu_dereference_protected(newsk->sk_filter, 1);
1332 		if (filter != NULL)
1333 			sk_filter_charge(newsk, filter);
1334 
1335 		if (unlikely(xfrm_sk_clone_policy(newsk))) {
1336 			/* It is still raw copy of parent, so invalidate
1337 			 * destructor and make plain sk_free() */
1338 			newsk->sk_destruct = NULL;
1339 			bh_unlock_sock(newsk);
1340 			sk_free(newsk);
1341 			newsk = NULL;
1342 			goto out;
1343 		}
1344 
1345 		newsk->sk_err	   = 0;
1346 		newsk->sk_priority = 0;
1347 		/*
1348 		 * Before updating sk_refcnt, we must commit prior changes to memory
1349 		 * (Documentation/RCU/rculist_nulls.txt for details)
1350 		 */
1351 		smp_wmb();
1352 		atomic_set(&newsk->sk_refcnt, 2);
1353 
1354 		/*
1355 		 * Increment the counter in the same struct proto as the master
1356 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1357 		 * is the same as sk->sk_prot->socks, as this field was copied
1358 		 * with memcpy).
1359 		 *
1360 		 * This _changes_ the previous behaviour, where
1361 		 * tcp_create_openreq_child always was incrementing the
1362 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1363 		 * to be taken into account in all callers. -acme
1364 		 */
1365 		sk_refcnt_debug_inc(newsk);
1366 		sk_set_socket(newsk, NULL);
1367 		newsk->sk_wq = NULL;
1368 
1369 		if (newsk->sk_prot->sockets_allocated)
1370 			sk_sockets_allocated_inc(newsk);
1371 
1372 		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1373 			net_enable_timestamp();
1374 	}
1375 out:
1376 	return newsk;
1377 }
1378 EXPORT_SYMBOL_GPL(sk_clone_lock);
1379 
1380 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1381 {
1382 	__sk_dst_set(sk, dst);
1383 	sk->sk_route_caps = dst->dev->features;
1384 	if (sk->sk_route_caps & NETIF_F_GSO)
1385 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1386 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1387 	if (sk_can_gso(sk)) {
1388 		if (dst->header_len) {
1389 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1390 		} else {
1391 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1392 			sk->sk_gso_max_size = dst->dev->gso_max_size;
1393 		}
1394 	}
1395 }
1396 EXPORT_SYMBOL_GPL(sk_setup_caps);
1397 
1398 void __init sk_init(void)
1399 {
1400 	if (totalram_pages <= 4096) {
1401 		sysctl_wmem_max = 32767;
1402 		sysctl_rmem_max = 32767;
1403 		sysctl_wmem_default = 32767;
1404 		sysctl_rmem_default = 32767;
1405 	} else if (totalram_pages >= 131072) {
1406 		sysctl_wmem_max = 131071;
1407 		sysctl_rmem_max = 131071;
1408 	}
1409 }
1410 
1411 /*
1412  *	Simple resource managers for sockets.
1413  */
1414 
1415 
1416 /*
1417  * Write buffer destructor automatically called from kfree_skb.
1418  */
1419 void sock_wfree(struct sk_buff *skb)
1420 {
1421 	struct sock *sk = skb->sk;
1422 	unsigned int len = skb->truesize;
1423 
1424 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1425 		/*
1426 		 * Keep a reference on sk_wmem_alloc, this will be released
1427 		 * after sk_write_space() call
1428 		 */
1429 		atomic_sub(len - 1, &sk->sk_wmem_alloc);
1430 		sk->sk_write_space(sk);
1431 		len = 1;
1432 	}
1433 	/*
1434 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1435 	 * could not do because of in-flight packets
1436 	 */
1437 	if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1438 		__sk_free(sk);
1439 }
1440 EXPORT_SYMBOL(sock_wfree);
1441 
1442 /*
1443  * Read buffer destructor automatically called from kfree_skb.
1444  */
1445 void sock_rfree(struct sk_buff *skb)
1446 {
1447 	struct sock *sk = skb->sk;
1448 	unsigned int len = skb->truesize;
1449 
1450 	atomic_sub(len, &sk->sk_rmem_alloc);
1451 	sk_mem_uncharge(sk, len);
1452 }
1453 EXPORT_SYMBOL(sock_rfree);
1454 
1455 
1456 int sock_i_uid(struct sock *sk)
1457 {
1458 	int uid;
1459 
1460 	read_lock_bh(&sk->sk_callback_lock);
1461 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1462 	read_unlock_bh(&sk->sk_callback_lock);
1463 	return uid;
1464 }
1465 EXPORT_SYMBOL(sock_i_uid);
1466 
1467 unsigned long sock_i_ino(struct sock *sk)
1468 {
1469 	unsigned long ino;
1470 
1471 	read_lock_bh(&sk->sk_callback_lock);
1472 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1473 	read_unlock_bh(&sk->sk_callback_lock);
1474 	return ino;
1475 }
1476 EXPORT_SYMBOL(sock_i_ino);
1477 
1478 /*
1479  * Allocate a skb from the socket's send buffer.
1480  */
1481 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1482 			     gfp_t priority)
1483 {
1484 	if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1485 		struct sk_buff *skb = alloc_skb(size, priority);
1486 		if (skb) {
1487 			skb_set_owner_w(skb, sk);
1488 			return skb;
1489 		}
1490 	}
1491 	return NULL;
1492 }
1493 EXPORT_SYMBOL(sock_wmalloc);
1494 
1495 /*
1496  * Allocate a skb from the socket's receive buffer.
1497  */
1498 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1499 			     gfp_t priority)
1500 {
1501 	if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1502 		struct sk_buff *skb = alloc_skb(size, priority);
1503 		if (skb) {
1504 			skb_set_owner_r(skb, sk);
1505 			return skb;
1506 		}
1507 	}
1508 	return NULL;
1509 }
1510 
1511 /*
1512  * Allocate a memory block from the socket's option memory buffer.
1513  */
1514 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1515 {
1516 	if ((unsigned)size <= sysctl_optmem_max &&
1517 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1518 		void *mem;
1519 		/* First do the add, to avoid the race if kmalloc
1520 		 * might sleep.
1521 		 */
1522 		atomic_add(size, &sk->sk_omem_alloc);
1523 		mem = kmalloc(size, priority);
1524 		if (mem)
1525 			return mem;
1526 		atomic_sub(size, &sk->sk_omem_alloc);
1527 	}
1528 	return NULL;
1529 }
1530 EXPORT_SYMBOL(sock_kmalloc);
1531 
1532 /*
1533  * Free an option memory block.
1534  */
1535 void sock_kfree_s(struct sock *sk, void *mem, int size)
1536 {
1537 	kfree(mem);
1538 	atomic_sub(size, &sk->sk_omem_alloc);
1539 }
1540 EXPORT_SYMBOL(sock_kfree_s);
1541 
1542 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1543    I think, these locks should be removed for datagram sockets.
1544  */
1545 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1546 {
1547 	DEFINE_WAIT(wait);
1548 
1549 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1550 	for (;;) {
1551 		if (!timeo)
1552 			break;
1553 		if (signal_pending(current))
1554 			break;
1555 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1556 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1557 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1558 			break;
1559 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1560 			break;
1561 		if (sk->sk_err)
1562 			break;
1563 		timeo = schedule_timeout(timeo);
1564 	}
1565 	finish_wait(sk_sleep(sk), &wait);
1566 	return timeo;
1567 }
1568 
1569 
1570 /*
1571  *	Generic send/receive buffer handlers
1572  */
1573 
1574 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1575 				     unsigned long data_len, int noblock,
1576 				     int *errcode)
1577 {
1578 	struct sk_buff *skb;
1579 	gfp_t gfp_mask;
1580 	long timeo;
1581 	int err;
1582 
1583 	gfp_mask = sk->sk_allocation;
1584 	if (gfp_mask & __GFP_WAIT)
1585 		gfp_mask |= __GFP_REPEAT;
1586 
1587 	timeo = sock_sndtimeo(sk, noblock);
1588 	while (1) {
1589 		err = sock_error(sk);
1590 		if (err != 0)
1591 			goto failure;
1592 
1593 		err = -EPIPE;
1594 		if (sk->sk_shutdown & SEND_SHUTDOWN)
1595 			goto failure;
1596 
1597 		if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1598 			skb = alloc_skb(header_len, gfp_mask);
1599 			if (skb) {
1600 				int npages;
1601 				int i;
1602 
1603 				/* No pages, we're done... */
1604 				if (!data_len)
1605 					break;
1606 
1607 				npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1608 				skb->truesize += data_len;
1609 				skb_shinfo(skb)->nr_frags = npages;
1610 				for (i = 0; i < npages; i++) {
1611 					struct page *page;
1612 
1613 					page = alloc_pages(sk->sk_allocation, 0);
1614 					if (!page) {
1615 						err = -ENOBUFS;
1616 						skb_shinfo(skb)->nr_frags = i;
1617 						kfree_skb(skb);
1618 						goto failure;
1619 					}
1620 
1621 					__skb_fill_page_desc(skb, i,
1622 							page, 0,
1623 							(data_len >= PAGE_SIZE ?
1624 							 PAGE_SIZE :
1625 							 data_len));
1626 					data_len -= PAGE_SIZE;
1627 				}
1628 
1629 				/* Full success... */
1630 				break;
1631 			}
1632 			err = -ENOBUFS;
1633 			goto failure;
1634 		}
1635 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1636 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1637 		err = -EAGAIN;
1638 		if (!timeo)
1639 			goto failure;
1640 		if (signal_pending(current))
1641 			goto interrupted;
1642 		timeo = sock_wait_for_wmem(sk, timeo);
1643 	}
1644 
1645 	skb_set_owner_w(skb, sk);
1646 	return skb;
1647 
1648 interrupted:
1649 	err = sock_intr_errno(timeo);
1650 failure:
1651 	*errcode = err;
1652 	return NULL;
1653 }
1654 EXPORT_SYMBOL(sock_alloc_send_pskb);
1655 
1656 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1657 				    int noblock, int *errcode)
1658 {
1659 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1660 }
1661 EXPORT_SYMBOL(sock_alloc_send_skb);
1662 
1663 static void __lock_sock(struct sock *sk)
1664 	__releases(&sk->sk_lock.slock)
1665 	__acquires(&sk->sk_lock.slock)
1666 {
1667 	DEFINE_WAIT(wait);
1668 
1669 	for (;;) {
1670 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1671 					TASK_UNINTERRUPTIBLE);
1672 		spin_unlock_bh(&sk->sk_lock.slock);
1673 		schedule();
1674 		spin_lock_bh(&sk->sk_lock.slock);
1675 		if (!sock_owned_by_user(sk))
1676 			break;
1677 	}
1678 	finish_wait(&sk->sk_lock.wq, &wait);
1679 }
1680 
1681 static void __release_sock(struct sock *sk)
1682 	__releases(&sk->sk_lock.slock)
1683 	__acquires(&sk->sk_lock.slock)
1684 {
1685 	struct sk_buff *skb = sk->sk_backlog.head;
1686 
1687 	do {
1688 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1689 		bh_unlock_sock(sk);
1690 
1691 		do {
1692 			struct sk_buff *next = skb->next;
1693 
1694 			WARN_ON_ONCE(skb_dst_is_noref(skb));
1695 			skb->next = NULL;
1696 			sk_backlog_rcv(sk, skb);
1697 
1698 			/*
1699 			 * We are in process context here with softirqs
1700 			 * disabled, use cond_resched_softirq() to preempt.
1701 			 * This is safe to do because we've taken the backlog
1702 			 * queue private:
1703 			 */
1704 			cond_resched_softirq();
1705 
1706 			skb = next;
1707 		} while (skb != NULL);
1708 
1709 		bh_lock_sock(sk);
1710 	} while ((skb = sk->sk_backlog.head) != NULL);
1711 
1712 	/*
1713 	 * Doing the zeroing here guarantee we can not loop forever
1714 	 * while a wild producer attempts to flood us.
1715 	 */
1716 	sk->sk_backlog.len = 0;
1717 }
1718 
1719 /**
1720  * sk_wait_data - wait for data to arrive at sk_receive_queue
1721  * @sk:    sock to wait on
1722  * @timeo: for how long
1723  *
1724  * Now socket state including sk->sk_err is changed only under lock,
1725  * hence we may omit checks after joining wait queue.
1726  * We check receive queue before schedule() only as optimization;
1727  * it is very likely that release_sock() added new data.
1728  */
1729 int sk_wait_data(struct sock *sk, long *timeo)
1730 {
1731 	int rc;
1732 	DEFINE_WAIT(wait);
1733 
1734 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1735 	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1736 	rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1737 	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1738 	finish_wait(sk_sleep(sk), &wait);
1739 	return rc;
1740 }
1741 EXPORT_SYMBOL(sk_wait_data);
1742 
1743 /**
1744  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1745  *	@sk: socket
1746  *	@size: memory size to allocate
1747  *	@kind: allocation type
1748  *
1749  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1750  *	rmem allocation. This function assumes that protocols which have
1751  *	memory_pressure use sk_wmem_queued as write buffer accounting.
1752  */
1753 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1754 {
1755 	struct proto *prot = sk->sk_prot;
1756 	int amt = sk_mem_pages(size);
1757 	long allocated;
1758 	int parent_status = UNDER_LIMIT;
1759 
1760 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1761 
1762 	allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1763 
1764 	/* Under limit. */
1765 	if (parent_status == UNDER_LIMIT &&
1766 			allocated <= sk_prot_mem_limits(sk, 0)) {
1767 		sk_leave_memory_pressure(sk);
1768 		return 1;
1769 	}
1770 
1771 	/* Under pressure. (we or our parents) */
1772 	if ((parent_status > SOFT_LIMIT) ||
1773 			allocated > sk_prot_mem_limits(sk, 1))
1774 		sk_enter_memory_pressure(sk);
1775 
1776 	/* Over hard limit (we or our parents) */
1777 	if ((parent_status == OVER_LIMIT) ||
1778 			(allocated > sk_prot_mem_limits(sk, 2)))
1779 		goto suppress_allocation;
1780 
1781 	/* guarantee minimum buffer size under pressure */
1782 	if (kind == SK_MEM_RECV) {
1783 		if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1784 			return 1;
1785 
1786 	} else { /* SK_MEM_SEND */
1787 		if (sk->sk_type == SOCK_STREAM) {
1788 			if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1789 				return 1;
1790 		} else if (atomic_read(&sk->sk_wmem_alloc) <
1791 			   prot->sysctl_wmem[0])
1792 				return 1;
1793 	}
1794 
1795 	if (sk_has_memory_pressure(sk)) {
1796 		int alloc;
1797 
1798 		if (!sk_under_memory_pressure(sk))
1799 			return 1;
1800 		alloc = sk_sockets_allocated_read_positive(sk);
1801 		if (sk_prot_mem_limits(sk, 2) > alloc *
1802 		    sk_mem_pages(sk->sk_wmem_queued +
1803 				 atomic_read(&sk->sk_rmem_alloc) +
1804 				 sk->sk_forward_alloc))
1805 			return 1;
1806 	}
1807 
1808 suppress_allocation:
1809 
1810 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1811 		sk_stream_moderate_sndbuf(sk);
1812 
1813 		/* Fail only if socket is _under_ its sndbuf.
1814 		 * In this case we cannot block, so that we have to fail.
1815 		 */
1816 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1817 			return 1;
1818 	}
1819 
1820 	trace_sock_exceed_buf_limit(sk, prot, allocated);
1821 
1822 	/* Alas. Undo changes. */
1823 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1824 
1825 	sk_memory_allocated_sub(sk, amt, parent_status);
1826 
1827 	return 0;
1828 }
1829 EXPORT_SYMBOL(__sk_mem_schedule);
1830 
1831 /**
1832  *	__sk_reclaim - reclaim memory_allocated
1833  *	@sk: socket
1834  */
1835 void __sk_mem_reclaim(struct sock *sk)
1836 {
1837 	sk_memory_allocated_sub(sk,
1838 				sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
1839 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1840 
1841 	if (sk_under_memory_pressure(sk) &&
1842 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1843 		sk_leave_memory_pressure(sk);
1844 }
1845 EXPORT_SYMBOL(__sk_mem_reclaim);
1846 
1847 
1848 /*
1849  * Set of default routines for initialising struct proto_ops when
1850  * the protocol does not support a particular function. In certain
1851  * cases where it makes no sense for a protocol to have a "do nothing"
1852  * function, some default processing is provided.
1853  */
1854 
1855 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1856 {
1857 	return -EOPNOTSUPP;
1858 }
1859 EXPORT_SYMBOL(sock_no_bind);
1860 
1861 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1862 		    int len, int flags)
1863 {
1864 	return -EOPNOTSUPP;
1865 }
1866 EXPORT_SYMBOL(sock_no_connect);
1867 
1868 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1869 {
1870 	return -EOPNOTSUPP;
1871 }
1872 EXPORT_SYMBOL(sock_no_socketpair);
1873 
1874 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1875 {
1876 	return -EOPNOTSUPP;
1877 }
1878 EXPORT_SYMBOL(sock_no_accept);
1879 
1880 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1881 		    int *len, int peer)
1882 {
1883 	return -EOPNOTSUPP;
1884 }
1885 EXPORT_SYMBOL(sock_no_getname);
1886 
1887 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1888 {
1889 	return 0;
1890 }
1891 EXPORT_SYMBOL(sock_no_poll);
1892 
1893 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1894 {
1895 	return -EOPNOTSUPP;
1896 }
1897 EXPORT_SYMBOL(sock_no_ioctl);
1898 
1899 int sock_no_listen(struct socket *sock, int backlog)
1900 {
1901 	return -EOPNOTSUPP;
1902 }
1903 EXPORT_SYMBOL(sock_no_listen);
1904 
1905 int sock_no_shutdown(struct socket *sock, int how)
1906 {
1907 	return -EOPNOTSUPP;
1908 }
1909 EXPORT_SYMBOL(sock_no_shutdown);
1910 
1911 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1912 		    char __user *optval, unsigned int optlen)
1913 {
1914 	return -EOPNOTSUPP;
1915 }
1916 EXPORT_SYMBOL(sock_no_setsockopt);
1917 
1918 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1919 		    char __user *optval, int __user *optlen)
1920 {
1921 	return -EOPNOTSUPP;
1922 }
1923 EXPORT_SYMBOL(sock_no_getsockopt);
1924 
1925 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1926 		    size_t len)
1927 {
1928 	return -EOPNOTSUPP;
1929 }
1930 EXPORT_SYMBOL(sock_no_sendmsg);
1931 
1932 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1933 		    size_t len, int flags)
1934 {
1935 	return -EOPNOTSUPP;
1936 }
1937 EXPORT_SYMBOL(sock_no_recvmsg);
1938 
1939 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1940 {
1941 	/* Mirror missing mmap method error code */
1942 	return -ENODEV;
1943 }
1944 EXPORT_SYMBOL(sock_no_mmap);
1945 
1946 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1947 {
1948 	ssize_t res;
1949 	struct msghdr msg = {.msg_flags = flags};
1950 	struct kvec iov;
1951 	char *kaddr = kmap(page);
1952 	iov.iov_base = kaddr + offset;
1953 	iov.iov_len = size;
1954 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1955 	kunmap(page);
1956 	return res;
1957 }
1958 EXPORT_SYMBOL(sock_no_sendpage);
1959 
1960 /*
1961  *	Default Socket Callbacks
1962  */
1963 
1964 static void sock_def_wakeup(struct sock *sk)
1965 {
1966 	struct socket_wq *wq;
1967 
1968 	rcu_read_lock();
1969 	wq = rcu_dereference(sk->sk_wq);
1970 	if (wq_has_sleeper(wq))
1971 		wake_up_interruptible_all(&wq->wait);
1972 	rcu_read_unlock();
1973 }
1974 
1975 static void sock_def_error_report(struct sock *sk)
1976 {
1977 	struct socket_wq *wq;
1978 
1979 	rcu_read_lock();
1980 	wq = rcu_dereference(sk->sk_wq);
1981 	if (wq_has_sleeper(wq))
1982 		wake_up_interruptible_poll(&wq->wait, POLLERR);
1983 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1984 	rcu_read_unlock();
1985 }
1986 
1987 static void sock_def_readable(struct sock *sk, int len)
1988 {
1989 	struct socket_wq *wq;
1990 
1991 	rcu_read_lock();
1992 	wq = rcu_dereference(sk->sk_wq);
1993 	if (wq_has_sleeper(wq))
1994 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
1995 						POLLRDNORM | POLLRDBAND);
1996 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1997 	rcu_read_unlock();
1998 }
1999 
2000 static void sock_def_write_space(struct sock *sk)
2001 {
2002 	struct socket_wq *wq;
2003 
2004 	rcu_read_lock();
2005 
2006 	/* Do not wake up a writer until he can make "significant"
2007 	 * progress.  --DaveM
2008 	 */
2009 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2010 		wq = rcu_dereference(sk->sk_wq);
2011 		if (wq_has_sleeper(wq))
2012 			wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2013 						POLLWRNORM | POLLWRBAND);
2014 
2015 		/* Should agree with poll, otherwise some programs break */
2016 		if (sock_writeable(sk))
2017 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2018 	}
2019 
2020 	rcu_read_unlock();
2021 }
2022 
2023 static void sock_def_destruct(struct sock *sk)
2024 {
2025 	kfree(sk->sk_protinfo);
2026 }
2027 
2028 void sk_send_sigurg(struct sock *sk)
2029 {
2030 	if (sk->sk_socket && sk->sk_socket->file)
2031 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2032 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2033 }
2034 EXPORT_SYMBOL(sk_send_sigurg);
2035 
2036 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2037 		    unsigned long expires)
2038 {
2039 	if (!mod_timer(timer, expires))
2040 		sock_hold(sk);
2041 }
2042 EXPORT_SYMBOL(sk_reset_timer);
2043 
2044 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2045 {
2046 	if (timer_pending(timer) && del_timer(timer))
2047 		__sock_put(sk);
2048 }
2049 EXPORT_SYMBOL(sk_stop_timer);
2050 
2051 void sock_init_data(struct socket *sock, struct sock *sk)
2052 {
2053 	skb_queue_head_init(&sk->sk_receive_queue);
2054 	skb_queue_head_init(&sk->sk_write_queue);
2055 	skb_queue_head_init(&sk->sk_error_queue);
2056 #ifdef CONFIG_NET_DMA
2057 	skb_queue_head_init(&sk->sk_async_wait_queue);
2058 #endif
2059 
2060 	sk->sk_send_head	=	NULL;
2061 
2062 	init_timer(&sk->sk_timer);
2063 
2064 	sk->sk_allocation	=	GFP_KERNEL;
2065 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2066 	sk->sk_sndbuf		=	sysctl_wmem_default;
2067 	sk->sk_state		=	TCP_CLOSE;
2068 	sk_set_socket(sk, sock);
2069 
2070 	sock_set_flag(sk, SOCK_ZAPPED);
2071 
2072 	if (sock) {
2073 		sk->sk_type	=	sock->type;
2074 		sk->sk_wq	=	sock->wq;
2075 		sock->sk	=	sk;
2076 	} else
2077 		sk->sk_wq	=	NULL;
2078 
2079 	spin_lock_init(&sk->sk_dst_lock);
2080 	rwlock_init(&sk->sk_callback_lock);
2081 	lockdep_set_class_and_name(&sk->sk_callback_lock,
2082 			af_callback_keys + sk->sk_family,
2083 			af_family_clock_key_strings[sk->sk_family]);
2084 
2085 	sk->sk_state_change	=	sock_def_wakeup;
2086 	sk->sk_data_ready	=	sock_def_readable;
2087 	sk->sk_write_space	=	sock_def_write_space;
2088 	sk->sk_error_report	=	sock_def_error_report;
2089 	sk->sk_destruct		=	sock_def_destruct;
2090 
2091 	sk->sk_sndmsg_page	=	NULL;
2092 	sk->sk_sndmsg_off	=	0;
2093 
2094 	sk->sk_peer_pid 	=	NULL;
2095 	sk->sk_peer_cred	=	NULL;
2096 	sk->sk_write_pending	=	0;
2097 	sk->sk_rcvlowat		=	1;
2098 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
2099 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
2100 
2101 	sk->sk_stamp = ktime_set(-1L, 0);
2102 
2103 	/*
2104 	 * Before updating sk_refcnt, we must commit prior changes to memory
2105 	 * (Documentation/RCU/rculist_nulls.txt for details)
2106 	 */
2107 	smp_wmb();
2108 	atomic_set(&sk->sk_refcnt, 1);
2109 	atomic_set(&sk->sk_drops, 0);
2110 }
2111 EXPORT_SYMBOL(sock_init_data);
2112 
2113 void lock_sock_nested(struct sock *sk, int subclass)
2114 {
2115 	might_sleep();
2116 	spin_lock_bh(&sk->sk_lock.slock);
2117 	if (sk->sk_lock.owned)
2118 		__lock_sock(sk);
2119 	sk->sk_lock.owned = 1;
2120 	spin_unlock(&sk->sk_lock.slock);
2121 	/*
2122 	 * The sk_lock has mutex_lock() semantics here:
2123 	 */
2124 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2125 	local_bh_enable();
2126 }
2127 EXPORT_SYMBOL(lock_sock_nested);
2128 
2129 void release_sock(struct sock *sk)
2130 {
2131 	/*
2132 	 * The sk_lock has mutex_unlock() semantics:
2133 	 */
2134 	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2135 
2136 	spin_lock_bh(&sk->sk_lock.slock);
2137 	if (sk->sk_backlog.tail)
2138 		__release_sock(sk);
2139 	sk->sk_lock.owned = 0;
2140 	if (waitqueue_active(&sk->sk_lock.wq))
2141 		wake_up(&sk->sk_lock.wq);
2142 	spin_unlock_bh(&sk->sk_lock.slock);
2143 }
2144 EXPORT_SYMBOL(release_sock);
2145 
2146 /**
2147  * lock_sock_fast - fast version of lock_sock
2148  * @sk: socket
2149  *
2150  * This version should be used for very small section, where process wont block
2151  * return false if fast path is taken
2152  *   sk_lock.slock locked, owned = 0, BH disabled
2153  * return true if slow path is taken
2154  *   sk_lock.slock unlocked, owned = 1, BH enabled
2155  */
2156 bool lock_sock_fast(struct sock *sk)
2157 {
2158 	might_sleep();
2159 	spin_lock_bh(&sk->sk_lock.slock);
2160 
2161 	if (!sk->sk_lock.owned)
2162 		/*
2163 		 * Note : We must disable BH
2164 		 */
2165 		return false;
2166 
2167 	__lock_sock(sk);
2168 	sk->sk_lock.owned = 1;
2169 	spin_unlock(&sk->sk_lock.slock);
2170 	/*
2171 	 * The sk_lock has mutex_lock() semantics here:
2172 	 */
2173 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2174 	local_bh_enable();
2175 	return true;
2176 }
2177 EXPORT_SYMBOL(lock_sock_fast);
2178 
2179 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2180 {
2181 	struct timeval tv;
2182 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2183 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2184 	tv = ktime_to_timeval(sk->sk_stamp);
2185 	if (tv.tv_sec == -1)
2186 		return -ENOENT;
2187 	if (tv.tv_sec == 0) {
2188 		sk->sk_stamp = ktime_get_real();
2189 		tv = ktime_to_timeval(sk->sk_stamp);
2190 	}
2191 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2192 }
2193 EXPORT_SYMBOL(sock_get_timestamp);
2194 
2195 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2196 {
2197 	struct timespec ts;
2198 	if (!sock_flag(sk, SOCK_TIMESTAMP))
2199 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2200 	ts = ktime_to_timespec(sk->sk_stamp);
2201 	if (ts.tv_sec == -1)
2202 		return -ENOENT;
2203 	if (ts.tv_sec == 0) {
2204 		sk->sk_stamp = ktime_get_real();
2205 		ts = ktime_to_timespec(sk->sk_stamp);
2206 	}
2207 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2208 }
2209 EXPORT_SYMBOL(sock_get_timestampns);
2210 
2211 void sock_enable_timestamp(struct sock *sk, int flag)
2212 {
2213 	if (!sock_flag(sk, flag)) {
2214 		unsigned long previous_flags = sk->sk_flags;
2215 
2216 		sock_set_flag(sk, flag);
2217 		/*
2218 		 * we just set one of the two flags which require net
2219 		 * time stamping, but time stamping might have been on
2220 		 * already because of the other one
2221 		 */
2222 		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2223 			net_enable_timestamp();
2224 	}
2225 }
2226 
2227 /*
2228  *	Get a socket option on an socket.
2229  *
2230  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
2231  *	asynchronous errors should be reported by getsockopt. We assume
2232  *	this means if you specify SO_ERROR (otherwise whats the point of it).
2233  */
2234 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2235 			   char __user *optval, int __user *optlen)
2236 {
2237 	struct sock *sk = sock->sk;
2238 
2239 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2240 }
2241 EXPORT_SYMBOL(sock_common_getsockopt);
2242 
2243 #ifdef CONFIG_COMPAT
2244 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2245 				  char __user *optval, int __user *optlen)
2246 {
2247 	struct sock *sk = sock->sk;
2248 
2249 	if (sk->sk_prot->compat_getsockopt != NULL)
2250 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
2251 						      optval, optlen);
2252 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2253 }
2254 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2255 #endif
2256 
2257 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2258 			struct msghdr *msg, size_t size, int flags)
2259 {
2260 	struct sock *sk = sock->sk;
2261 	int addr_len = 0;
2262 	int err;
2263 
2264 	err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2265 				   flags & ~MSG_DONTWAIT, &addr_len);
2266 	if (err >= 0)
2267 		msg->msg_namelen = addr_len;
2268 	return err;
2269 }
2270 EXPORT_SYMBOL(sock_common_recvmsg);
2271 
2272 /*
2273  *	Set socket options on an inet socket.
2274  */
2275 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2276 			   char __user *optval, unsigned int optlen)
2277 {
2278 	struct sock *sk = sock->sk;
2279 
2280 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2281 }
2282 EXPORT_SYMBOL(sock_common_setsockopt);
2283 
2284 #ifdef CONFIG_COMPAT
2285 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2286 				  char __user *optval, unsigned int optlen)
2287 {
2288 	struct sock *sk = sock->sk;
2289 
2290 	if (sk->sk_prot->compat_setsockopt != NULL)
2291 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
2292 						      optval, optlen);
2293 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2294 }
2295 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2296 #endif
2297 
2298 void sk_common_release(struct sock *sk)
2299 {
2300 	if (sk->sk_prot->destroy)
2301 		sk->sk_prot->destroy(sk);
2302 
2303 	/*
2304 	 * Observation: when sock_common_release is called, processes have
2305 	 * no access to socket. But net still has.
2306 	 * Step one, detach it from networking:
2307 	 *
2308 	 * A. Remove from hash tables.
2309 	 */
2310 
2311 	sk->sk_prot->unhash(sk);
2312 
2313 	/*
2314 	 * In this point socket cannot receive new packets, but it is possible
2315 	 * that some packets are in flight because some CPU runs receiver and
2316 	 * did hash table lookup before we unhashed socket. They will achieve
2317 	 * receive queue and will be purged by socket destructor.
2318 	 *
2319 	 * Also we still have packets pending on receive queue and probably,
2320 	 * our own packets waiting in device queues. sock_destroy will drain
2321 	 * receive queue, but transmitted packets will delay socket destruction
2322 	 * until the last reference will be released.
2323 	 */
2324 
2325 	sock_orphan(sk);
2326 
2327 	xfrm_sk_free_policy(sk);
2328 
2329 	sk_refcnt_debug_release(sk);
2330 	sock_put(sk);
2331 }
2332 EXPORT_SYMBOL(sk_common_release);
2333 
2334 #ifdef CONFIG_PROC_FS
2335 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
2336 struct prot_inuse {
2337 	int val[PROTO_INUSE_NR];
2338 };
2339 
2340 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2341 
2342 #ifdef CONFIG_NET_NS
2343 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2344 {
2345 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2346 }
2347 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2348 
2349 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2350 {
2351 	int cpu, idx = prot->inuse_idx;
2352 	int res = 0;
2353 
2354 	for_each_possible_cpu(cpu)
2355 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2356 
2357 	return res >= 0 ? res : 0;
2358 }
2359 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2360 
2361 static int __net_init sock_inuse_init_net(struct net *net)
2362 {
2363 	net->core.inuse = alloc_percpu(struct prot_inuse);
2364 	return net->core.inuse ? 0 : -ENOMEM;
2365 }
2366 
2367 static void __net_exit sock_inuse_exit_net(struct net *net)
2368 {
2369 	free_percpu(net->core.inuse);
2370 }
2371 
2372 static struct pernet_operations net_inuse_ops = {
2373 	.init = sock_inuse_init_net,
2374 	.exit = sock_inuse_exit_net,
2375 };
2376 
2377 static __init int net_inuse_init(void)
2378 {
2379 	if (register_pernet_subsys(&net_inuse_ops))
2380 		panic("Cannot initialize net inuse counters");
2381 
2382 	return 0;
2383 }
2384 
2385 core_initcall(net_inuse_init);
2386 #else
2387 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2388 
2389 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2390 {
2391 	__this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2392 }
2393 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2394 
2395 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2396 {
2397 	int cpu, idx = prot->inuse_idx;
2398 	int res = 0;
2399 
2400 	for_each_possible_cpu(cpu)
2401 		res += per_cpu(prot_inuse, cpu).val[idx];
2402 
2403 	return res >= 0 ? res : 0;
2404 }
2405 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2406 #endif
2407 
2408 static void assign_proto_idx(struct proto *prot)
2409 {
2410 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2411 
2412 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2413 		printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2414 		return;
2415 	}
2416 
2417 	set_bit(prot->inuse_idx, proto_inuse_idx);
2418 }
2419 
2420 static void release_proto_idx(struct proto *prot)
2421 {
2422 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2423 		clear_bit(prot->inuse_idx, proto_inuse_idx);
2424 }
2425 #else
2426 static inline void assign_proto_idx(struct proto *prot)
2427 {
2428 }
2429 
2430 static inline void release_proto_idx(struct proto *prot)
2431 {
2432 }
2433 #endif
2434 
2435 int proto_register(struct proto *prot, int alloc_slab)
2436 {
2437 	if (alloc_slab) {
2438 		prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2439 					SLAB_HWCACHE_ALIGN | prot->slab_flags,
2440 					NULL);
2441 
2442 		if (prot->slab == NULL) {
2443 			printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2444 			       prot->name);
2445 			goto out;
2446 		}
2447 
2448 		if (prot->rsk_prot != NULL) {
2449 			prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2450 			if (prot->rsk_prot->slab_name == NULL)
2451 				goto out_free_sock_slab;
2452 
2453 			prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2454 								 prot->rsk_prot->obj_size, 0,
2455 								 SLAB_HWCACHE_ALIGN, NULL);
2456 
2457 			if (prot->rsk_prot->slab == NULL) {
2458 				printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2459 				       prot->name);
2460 				goto out_free_request_sock_slab_name;
2461 			}
2462 		}
2463 
2464 		if (prot->twsk_prot != NULL) {
2465 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2466 
2467 			if (prot->twsk_prot->twsk_slab_name == NULL)
2468 				goto out_free_request_sock_slab;
2469 
2470 			prot->twsk_prot->twsk_slab =
2471 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2472 						  prot->twsk_prot->twsk_obj_size,
2473 						  0,
2474 						  SLAB_HWCACHE_ALIGN |
2475 							prot->slab_flags,
2476 						  NULL);
2477 			if (prot->twsk_prot->twsk_slab == NULL)
2478 				goto out_free_timewait_sock_slab_name;
2479 		}
2480 	}
2481 
2482 	mutex_lock(&proto_list_mutex);
2483 	list_add(&prot->node, &proto_list);
2484 	assign_proto_idx(prot);
2485 	mutex_unlock(&proto_list_mutex);
2486 	return 0;
2487 
2488 out_free_timewait_sock_slab_name:
2489 	kfree(prot->twsk_prot->twsk_slab_name);
2490 out_free_request_sock_slab:
2491 	if (prot->rsk_prot && prot->rsk_prot->slab) {
2492 		kmem_cache_destroy(prot->rsk_prot->slab);
2493 		prot->rsk_prot->slab = NULL;
2494 	}
2495 out_free_request_sock_slab_name:
2496 	if (prot->rsk_prot)
2497 		kfree(prot->rsk_prot->slab_name);
2498 out_free_sock_slab:
2499 	kmem_cache_destroy(prot->slab);
2500 	prot->slab = NULL;
2501 out:
2502 	return -ENOBUFS;
2503 }
2504 EXPORT_SYMBOL(proto_register);
2505 
2506 void proto_unregister(struct proto *prot)
2507 {
2508 	mutex_lock(&proto_list_mutex);
2509 	release_proto_idx(prot);
2510 	list_del(&prot->node);
2511 	mutex_unlock(&proto_list_mutex);
2512 
2513 	if (prot->slab != NULL) {
2514 		kmem_cache_destroy(prot->slab);
2515 		prot->slab = NULL;
2516 	}
2517 
2518 	if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2519 		kmem_cache_destroy(prot->rsk_prot->slab);
2520 		kfree(prot->rsk_prot->slab_name);
2521 		prot->rsk_prot->slab = NULL;
2522 	}
2523 
2524 	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2525 		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2526 		kfree(prot->twsk_prot->twsk_slab_name);
2527 		prot->twsk_prot->twsk_slab = NULL;
2528 	}
2529 }
2530 EXPORT_SYMBOL(proto_unregister);
2531 
2532 #ifdef CONFIG_PROC_FS
2533 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2534 	__acquires(proto_list_mutex)
2535 {
2536 	mutex_lock(&proto_list_mutex);
2537 	return seq_list_start_head(&proto_list, *pos);
2538 }
2539 
2540 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2541 {
2542 	return seq_list_next(v, &proto_list, pos);
2543 }
2544 
2545 static void proto_seq_stop(struct seq_file *seq, void *v)
2546 	__releases(proto_list_mutex)
2547 {
2548 	mutex_unlock(&proto_list_mutex);
2549 }
2550 
2551 static char proto_method_implemented(const void *method)
2552 {
2553 	return method == NULL ? 'n' : 'y';
2554 }
2555 static long sock_prot_memory_allocated(struct proto *proto)
2556 {
2557 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
2558 }
2559 
2560 static char *sock_prot_memory_pressure(struct proto *proto)
2561 {
2562 	return proto->memory_pressure != NULL ?
2563 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2564 }
2565 
2566 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2567 {
2568 
2569 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
2570 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2571 		   proto->name,
2572 		   proto->obj_size,
2573 		   sock_prot_inuse_get(seq_file_net(seq), proto),
2574 		   sock_prot_memory_allocated(proto),
2575 		   sock_prot_memory_pressure(proto),
2576 		   proto->max_header,
2577 		   proto->slab == NULL ? "no" : "yes",
2578 		   module_name(proto->owner),
2579 		   proto_method_implemented(proto->close),
2580 		   proto_method_implemented(proto->connect),
2581 		   proto_method_implemented(proto->disconnect),
2582 		   proto_method_implemented(proto->accept),
2583 		   proto_method_implemented(proto->ioctl),
2584 		   proto_method_implemented(proto->init),
2585 		   proto_method_implemented(proto->destroy),
2586 		   proto_method_implemented(proto->shutdown),
2587 		   proto_method_implemented(proto->setsockopt),
2588 		   proto_method_implemented(proto->getsockopt),
2589 		   proto_method_implemented(proto->sendmsg),
2590 		   proto_method_implemented(proto->recvmsg),
2591 		   proto_method_implemented(proto->sendpage),
2592 		   proto_method_implemented(proto->bind),
2593 		   proto_method_implemented(proto->backlog_rcv),
2594 		   proto_method_implemented(proto->hash),
2595 		   proto_method_implemented(proto->unhash),
2596 		   proto_method_implemented(proto->get_port),
2597 		   proto_method_implemented(proto->enter_memory_pressure));
2598 }
2599 
2600 static int proto_seq_show(struct seq_file *seq, void *v)
2601 {
2602 	if (v == &proto_list)
2603 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2604 			   "protocol",
2605 			   "size",
2606 			   "sockets",
2607 			   "memory",
2608 			   "press",
2609 			   "maxhdr",
2610 			   "slab",
2611 			   "module",
2612 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2613 	else
2614 		proto_seq_printf(seq, list_entry(v, struct proto, node));
2615 	return 0;
2616 }
2617 
2618 static const struct seq_operations proto_seq_ops = {
2619 	.start  = proto_seq_start,
2620 	.next   = proto_seq_next,
2621 	.stop   = proto_seq_stop,
2622 	.show   = proto_seq_show,
2623 };
2624 
2625 static int proto_seq_open(struct inode *inode, struct file *file)
2626 {
2627 	return seq_open_net(inode, file, &proto_seq_ops,
2628 			    sizeof(struct seq_net_private));
2629 }
2630 
2631 static const struct file_operations proto_seq_fops = {
2632 	.owner		= THIS_MODULE,
2633 	.open		= proto_seq_open,
2634 	.read		= seq_read,
2635 	.llseek		= seq_lseek,
2636 	.release	= seq_release_net,
2637 };
2638 
2639 static __net_init int proto_init_net(struct net *net)
2640 {
2641 	if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2642 		return -ENOMEM;
2643 
2644 	return 0;
2645 }
2646 
2647 static __net_exit void proto_exit_net(struct net *net)
2648 {
2649 	proc_net_remove(net, "protocols");
2650 }
2651 
2652 
2653 static __net_initdata struct pernet_operations proto_net_ops = {
2654 	.init = proto_init_net,
2655 	.exit = proto_exit_net,
2656 };
2657 
2658 static int __init proto_init(void)
2659 {
2660 	return register_pernet_subsys(&proto_net_ops);
2661 }
2662 
2663 subsys_initcall(proto_init);
2664 
2665 #endif /* PROC_FS */
2666