xref: /linux/include/net/sock.h (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/hardirq.h>
44 #include <linux/kernel.h>
45 #include <linux/list.h>
46 #include <linux/list_nulls.h>
47 #include <linux/timer.h>
48 #include <linux/cache.h>
49 #include <linux/lockdep.h>
50 #include <linux/netdevice.h>
51 #include <linux/skbuff.h>	/* struct sk_buff */
52 #include <linux/mm.h>
53 #include <linux/security.h>
54 #include <linux/slab.h>
55 #include <linux/uaccess.h>
56 #include <linux/memcontrol.h>
57 #include <linux/res_counter.h>
58 
59 #include <linux/filter.h>
60 #include <linux/rculist_nulls.h>
61 #include <linux/poll.h>
62 
63 #include <linux/atomic.h>
64 #include <net/dst.h>
65 #include <net/checksum.h>
66 
67 struct cgroup;
68 struct cgroup_subsys;
69 #ifdef CONFIG_NET
70 int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
71 void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
72 #else
73 static inline
74 int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
75 {
76 	return 0;
77 }
78 static inline
79 void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
80 {
81 }
82 #endif
83 /*
84  * This structure really needs to be cleaned up.
85  * Most of it is for TCP, and not used by any of
86  * the other protocols.
87  */
88 
89 /* Define this to get the SOCK_DBG debugging facility. */
90 #define SOCK_DEBUGGING
91 #ifdef SOCK_DEBUGGING
92 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
93 					printk(KERN_DEBUG msg); } while (0)
94 #else
95 /* Validate arguments and do nothing */
96 static inline __printf(2, 3)
97 void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
98 {
99 }
100 #endif
101 
102 /* This is the per-socket lock.  The spinlock provides a synchronization
103  * between user contexts and software interrupt processing, whereas the
104  * mini-semaphore synchronizes multiple users amongst themselves.
105  */
106 typedef struct {
107 	spinlock_t		slock;
108 	int			owned;
109 	wait_queue_head_t	wq;
110 	/*
111 	 * We express the mutex-alike socket_lock semantics
112 	 * to the lock validator by explicitly managing
113 	 * the slock as a lock variant (in addition to
114 	 * the slock itself):
115 	 */
116 #ifdef CONFIG_DEBUG_LOCK_ALLOC
117 	struct lockdep_map dep_map;
118 #endif
119 } socket_lock_t;
120 
121 struct sock;
122 struct proto;
123 struct net;
124 
125 /**
126  *	struct sock_common - minimal network layer representation of sockets
127  *	@skc_daddr: Foreign IPv4 addr
128  *	@skc_rcv_saddr: Bound local IPv4 addr
129  *	@skc_hash: hash value used with various protocol lookup tables
130  *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
131  *	@skc_family: network address family
132  *	@skc_state: Connection state
133  *	@skc_reuse: %SO_REUSEADDR setting
134  *	@skc_bound_dev_if: bound device index if != 0
135  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
136  *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
137  *	@skc_prot: protocol handlers inside a network family
138  *	@skc_net: reference to the network namespace of this socket
139  *	@skc_node: main hash linkage for various protocol lookup tables
140  *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
141  *	@skc_tx_queue_mapping: tx queue number for this connection
142  *	@skc_refcnt: reference count
143  *
144  *	This is the minimal network layer representation of sockets, the header
145  *	for struct sock and struct inet_timewait_sock.
146  */
147 struct sock_common {
148 	/* skc_daddr and skc_rcv_saddr must be grouped :
149 	 * cf INET_MATCH() and INET_TW_MATCH()
150 	 */
151 	__be32			skc_daddr;
152 	__be32			skc_rcv_saddr;
153 
154 	union  {
155 		unsigned int	skc_hash;
156 		__u16		skc_u16hashes[2];
157 	};
158 	unsigned short		skc_family;
159 	volatile unsigned char	skc_state;
160 	unsigned char		skc_reuse;
161 	int			skc_bound_dev_if;
162 	union {
163 		struct hlist_node	skc_bind_node;
164 		struct hlist_nulls_node skc_portaddr_node;
165 	};
166 	struct proto		*skc_prot;
167 #ifdef CONFIG_NET_NS
168 	struct net	 	*skc_net;
169 #endif
170 	/*
171 	 * fields between dontcopy_begin/dontcopy_end
172 	 * are not copied in sock_copy()
173 	 */
174 	/* private: */
175 	int			skc_dontcopy_begin[0];
176 	/* public: */
177 	union {
178 		struct hlist_node	skc_node;
179 		struct hlist_nulls_node skc_nulls_node;
180 	};
181 	int			skc_tx_queue_mapping;
182 	atomic_t		skc_refcnt;
183 	/* private: */
184 	int                     skc_dontcopy_end[0];
185 	/* public: */
186 };
187 
188 struct cg_proto;
189 /**
190   *	struct sock - network layer representation of sockets
191   *	@__sk_common: shared layout with inet_timewait_sock
192   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
193   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
194   *	@sk_lock:	synchronizer
195   *	@sk_rcvbuf: size of receive buffer in bytes
196   *	@sk_wq: sock wait queue and async head
197   *	@sk_dst_cache: destination cache
198   *	@sk_dst_lock: destination cache lock
199   *	@sk_policy: flow policy
200   *	@sk_receive_queue: incoming packets
201   *	@sk_wmem_alloc: transmit queue bytes committed
202   *	@sk_write_queue: Packet sending queue
203   *	@sk_async_wait_queue: DMA copied packets
204   *	@sk_omem_alloc: "o" is "option" or "other"
205   *	@sk_wmem_queued: persistent queue size
206   *	@sk_forward_alloc: space allocated forward
207   *	@sk_allocation: allocation mode
208   *	@sk_sndbuf: size of send buffer in bytes
209   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
210   *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
211   *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
212   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
213   *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
214   *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
215   *	@sk_gso_max_size: Maximum GSO segment size to build
216   *	@sk_lingertime: %SO_LINGER l_linger setting
217   *	@sk_backlog: always used with the per-socket spinlock held
218   *	@sk_callback_lock: used with the callbacks in the end of this struct
219   *	@sk_error_queue: rarely used
220   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
221   *			  IPV6_ADDRFORM for instance)
222   *	@sk_err: last error
223   *	@sk_err_soft: errors that don't cause failure but are the cause of a
224   *		      persistent failure not just 'timed out'
225   *	@sk_drops: raw/udp drops counter
226   *	@sk_ack_backlog: current listen backlog
227   *	@sk_max_ack_backlog: listen backlog set in listen()
228   *	@sk_priority: %SO_PRIORITY setting
229   *	@sk_type: socket type (%SOCK_STREAM, etc)
230   *	@sk_protocol: which protocol this socket belongs in this network family
231   *	@sk_peer_pid: &struct pid for this socket's peer
232   *	@sk_peer_cred: %SO_PEERCRED setting
233   *	@sk_rcvlowat: %SO_RCVLOWAT setting
234   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
235   *	@sk_sndtimeo: %SO_SNDTIMEO setting
236   *	@sk_rxhash: flow hash received from netif layer
237   *	@sk_filter: socket filtering instructions
238   *	@sk_protinfo: private area, net family specific, when not using slab
239   *	@sk_timer: sock cleanup timer
240   *	@sk_stamp: time stamp of last packet received
241   *	@sk_socket: Identd and reporting IO signals
242   *	@sk_user_data: RPC layer private data
243   *	@sk_sndmsg_page: cached page for sendmsg
244   *	@sk_sndmsg_off: cached offset for sendmsg
245   *	@sk_send_head: front of stuff to transmit
246   *	@sk_security: used by security modules
247   *	@sk_mark: generic packet mark
248   *	@sk_classid: this socket's cgroup classid
249   *	@sk_cgrp: this socket's cgroup-specific proto data
250   *	@sk_write_pending: a write to stream socket waits to start
251   *	@sk_state_change: callback to indicate change in the state of the sock
252   *	@sk_data_ready: callback to indicate there is data to be processed
253   *	@sk_write_space: callback to indicate there is bf sending space available
254   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
255   *	@sk_backlog_rcv: callback to process the backlog
256   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
257  */
258 struct sock {
259 	/*
260 	 * Now struct inet_timewait_sock also uses sock_common, so please just
261 	 * don't add nothing before this first member (__sk_common) --acme
262 	 */
263 	struct sock_common	__sk_common;
264 #define sk_node			__sk_common.skc_node
265 #define sk_nulls_node		__sk_common.skc_nulls_node
266 #define sk_refcnt		__sk_common.skc_refcnt
267 #define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
268 
269 #define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
270 #define sk_dontcopy_end		__sk_common.skc_dontcopy_end
271 #define sk_hash			__sk_common.skc_hash
272 #define sk_family		__sk_common.skc_family
273 #define sk_state		__sk_common.skc_state
274 #define sk_reuse		__sk_common.skc_reuse
275 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
276 #define sk_bind_node		__sk_common.skc_bind_node
277 #define sk_prot			__sk_common.skc_prot
278 #define sk_net			__sk_common.skc_net
279 	socket_lock_t		sk_lock;
280 	struct sk_buff_head	sk_receive_queue;
281 	/*
282 	 * The backlog queue is special, it is always used with
283 	 * the per-socket spinlock held and requires low latency
284 	 * access. Therefore we special case it's implementation.
285 	 * Note : rmem_alloc is in this structure to fill a hole
286 	 * on 64bit arches, not because its logically part of
287 	 * backlog.
288 	 */
289 	struct {
290 		atomic_t	rmem_alloc;
291 		int		len;
292 		struct sk_buff	*head;
293 		struct sk_buff	*tail;
294 	} sk_backlog;
295 #define sk_rmem_alloc sk_backlog.rmem_alloc
296 	int			sk_forward_alloc;
297 #ifdef CONFIG_RPS
298 	__u32			sk_rxhash;
299 #endif
300 	atomic_t		sk_drops;
301 	int			sk_rcvbuf;
302 
303 	struct sk_filter __rcu	*sk_filter;
304 	struct socket_wq __rcu	*sk_wq;
305 
306 #ifdef CONFIG_NET_DMA
307 	struct sk_buff_head	sk_async_wait_queue;
308 #endif
309 
310 #ifdef CONFIG_XFRM
311 	struct xfrm_policy	*sk_policy[2];
312 #endif
313 	unsigned long 		sk_flags;
314 	struct dst_entry	*sk_dst_cache;
315 	spinlock_t		sk_dst_lock;
316 	atomic_t		sk_wmem_alloc;
317 	atomic_t		sk_omem_alloc;
318 	int			sk_sndbuf;
319 	struct sk_buff_head	sk_write_queue;
320 	kmemcheck_bitfield_begin(flags);
321 	unsigned int		sk_shutdown  : 2,
322 				sk_no_check  : 2,
323 				sk_userlocks : 4,
324 				sk_protocol  : 8,
325 				sk_type      : 16;
326 	kmemcheck_bitfield_end(flags);
327 	int			sk_wmem_queued;
328 	gfp_t			sk_allocation;
329 	netdev_features_t	sk_route_caps;
330 	netdev_features_t	sk_route_nocaps;
331 	int			sk_gso_type;
332 	unsigned int		sk_gso_max_size;
333 	int			sk_rcvlowat;
334 	unsigned long	        sk_lingertime;
335 	struct sk_buff_head	sk_error_queue;
336 	struct proto		*sk_prot_creator;
337 	rwlock_t		sk_callback_lock;
338 	int			sk_err,
339 				sk_err_soft;
340 	unsigned short		sk_ack_backlog;
341 	unsigned short		sk_max_ack_backlog;
342 	__u32			sk_priority;
343 #ifdef CONFIG_CGROUPS
344 	__u32			sk_cgrp_prioidx;
345 #endif
346 	struct pid		*sk_peer_pid;
347 	const struct cred	*sk_peer_cred;
348 	long			sk_rcvtimeo;
349 	long			sk_sndtimeo;
350 	void			*sk_protinfo;
351 	struct timer_list	sk_timer;
352 	ktime_t			sk_stamp;
353 	struct socket		*sk_socket;
354 	void			*sk_user_data;
355 	struct page		*sk_sndmsg_page;
356 	struct sk_buff		*sk_send_head;
357 	__u32			sk_sndmsg_off;
358 	int			sk_write_pending;
359 #ifdef CONFIG_SECURITY
360 	void			*sk_security;
361 #endif
362 	__u32			sk_mark;
363 	u32			sk_classid;
364 	struct cg_proto		*sk_cgrp;
365 	void			(*sk_state_change)(struct sock *sk);
366 	void			(*sk_data_ready)(struct sock *sk, int bytes);
367 	void			(*sk_write_space)(struct sock *sk);
368 	void			(*sk_error_report)(struct sock *sk);
369   	int			(*sk_backlog_rcv)(struct sock *sk,
370 						  struct sk_buff *skb);
371 	void                    (*sk_destruct)(struct sock *sk);
372 };
373 
374 /*
375  * Hashed lists helper routines
376  */
377 static inline struct sock *sk_entry(const struct hlist_node *node)
378 {
379 	return hlist_entry(node, struct sock, sk_node);
380 }
381 
382 static inline struct sock *__sk_head(const struct hlist_head *head)
383 {
384 	return hlist_entry(head->first, struct sock, sk_node);
385 }
386 
387 static inline struct sock *sk_head(const struct hlist_head *head)
388 {
389 	return hlist_empty(head) ? NULL : __sk_head(head);
390 }
391 
392 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
393 {
394 	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
395 }
396 
397 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
398 {
399 	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
400 }
401 
402 static inline struct sock *sk_next(const struct sock *sk)
403 {
404 	return sk->sk_node.next ?
405 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
406 }
407 
408 static inline struct sock *sk_nulls_next(const struct sock *sk)
409 {
410 	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
411 		hlist_nulls_entry(sk->sk_nulls_node.next,
412 				  struct sock, sk_nulls_node) :
413 		NULL;
414 }
415 
416 static inline int sk_unhashed(const struct sock *sk)
417 {
418 	return hlist_unhashed(&sk->sk_node);
419 }
420 
421 static inline int sk_hashed(const struct sock *sk)
422 {
423 	return !sk_unhashed(sk);
424 }
425 
426 static __inline__ void sk_node_init(struct hlist_node *node)
427 {
428 	node->pprev = NULL;
429 }
430 
431 static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
432 {
433 	node->pprev = NULL;
434 }
435 
436 static __inline__ void __sk_del_node(struct sock *sk)
437 {
438 	__hlist_del(&sk->sk_node);
439 }
440 
441 /* NB: equivalent to hlist_del_init_rcu */
442 static __inline__ int __sk_del_node_init(struct sock *sk)
443 {
444 	if (sk_hashed(sk)) {
445 		__sk_del_node(sk);
446 		sk_node_init(&sk->sk_node);
447 		return 1;
448 	}
449 	return 0;
450 }
451 
452 /* Grab socket reference count. This operation is valid only
453    when sk is ALREADY grabbed f.e. it is found in hash table
454    or a list and the lookup is made under lock preventing hash table
455    modifications.
456  */
457 
458 static inline void sock_hold(struct sock *sk)
459 {
460 	atomic_inc(&sk->sk_refcnt);
461 }
462 
463 /* Ungrab socket in the context, which assumes that socket refcnt
464    cannot hit zero, f.e. it is true in context of any socketcall.
465  */
466 static inline void __sock_put(struct sock *sk)
467 {
468 	atomic_dec(&sk->sk_refcnt);
469 }
470 
471 static __inline__ int sk_del_node_init(struct sock *sk)
472 {
473 	int rc = __sk_del_node_init(sk);
474 
475 	if (rc) {
476 		/* paranoid for a while -acme */
477 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
478 		__sock_put(sk);
479 	}
480 	return rc;
481 }
482 #define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
483 
484 static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
485 {
486 	if (sk_hashed(sk)) {
487 		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
488 		return 1;
489 	}
490 	return 0;
491 }
492 
493 static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
494 {
495 	int rc = __sk_nulls_del_node_init_rcu(sk);
496 
497 	if (rc) {
498 		/* paranoid for a while -acme */
499 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
500 		__sock_put(sk);
501 	}
502 	return rc;
503 }
504 
505 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
506 {
507 	hlist_add_head(&sk->sk_node, list);
508 }
509 
510 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
511 {
512 	sock_hold(sk);
513 	__sk_add_node(sk, list);
514 }
515 
516 static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
517 {
518 	sock_hold(sk);
519 	hlist_add_head_rcu(&sk->sk_node, list);
520 }
521 
522 static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
523 {
524 	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
525 }
526 
527 static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
528 {
529 	sock_hold(sk);
530 	__sk_nulls_add_node_rcu(sk, list);
531 }
532 
533 static __inline__ void __sk_del_bind_node(struct sock *sk)
534 {
535 	__hlist_del(&sk->sk_bind_node);
536 }
537 
538 static __inline__ void sk_add_bind_node(struct sock *sk,
539 					struct hlist_head *list)
540 {
541 	hlist_add_head(&sk->sk_bind_node, list);
542 }
543 
544 #define sk_for_each(__sk, node, list) \
545 	hlist_for_each_entry(__sk, node, list, sk_node)
546 #define sk_for_each_rcu(__sk, node, list) \
547 	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
548 #define sk_nulls_for_each(__sk, node, list) \
549 	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
550 #define sk_nulls_for_each_rcu(__sk, node, list) \
551 	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
552 #define sk_for_each_from(__sk, node) \
553 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
554 		hlist_for_each_entry_from(__sk, node, sk_node)
555 #define sk_nulls_for_each_from(__sk, node) \
556 	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
557 		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
558 #define sk_for_each_safe(__sk, node, tmp, list) \
559 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
560 #define sk_for_each_bound(__sk, node, list) \
561 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
562 
563 /* Sock flags */
564 enum sock_flags {
565 	SOCK_DEAD,
566 	SOCK_DONE,
567 	SOCK_URGINLINE,
568 	SOCK_KEEPOPEN,
569 	SOCK_LINGER,
570 	SOCK_DESTROY,
571 	SOCK_BROADCAST,
572 	SOCK_TIMESTAMP,
573 	SOCK_ZAPPED,
574 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
575 	SOCK_DBG, /* %SO_DEBUG setting */
576 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
577 	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
578 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
579 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
580 	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
581 	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
582 	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
583 	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
584 	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
585 	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
586 	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
587 	SOCK_FASYNC, /* fasync() active */
588 	SOCK_RXQ_OVFL,
589 	SOCK_ZEROCOPY, /* buffers from userspace */
590 	SOCK_WIFI_STATUS, /* push wifi status to userspace */
591 };
592 
593 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
594 {
595 	nsk->sk_flags = osk->sk_flags;
596 }
597 
598 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
599 {
600 	__set_bit(flag, &sk->sk_flags);
601 }
602 
603 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
604 {
605 	__clear_bit(flag, &sk->sk_flags);
606 }
607 
608 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
609 {
610 	return test_bit(flag, &sk->sk_flags);
611 }
612 
613 static inline void sk_acceptq_removed(struct sock *sk)
614 {
615 	sk->sk_ack_backlog--;
616 }
617 
618 static inline void sk_acceptq_added(struct sock *sk)
619 {
620 	sk->sk_ack_backlog++;
621 }
622 
623 static inline int sk_acceptq_is_full(struct sock *sk)
624 {
625 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
626 }
627 
628 /*
629  * Compute minimal free write space needed to queue new packets.
630  */
631 static inline int sk_stream_min_wspace(struct sock *sk)
632 {
633 	return sk->sk_wmem_queued >> 1;
634 }
635 
636 static inline int sk_stream_wspace(struct sock *sk)
637 {
638 	return sk->sk_sndbuf - sk->sk_wmem_queued;
639 }
640 
641 extern void sk_stream_write_space(struct sock *sk);
642 
643 static inline int sk_stream_memory_free(struct sock *sk)
644 {
645 	return sk->sk_wmem_queued < sk->sk_sndbuf;
646 }
647 
648 /* OOB backlog add */
649 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
650 {
651 	/* dont let skb dst not refcounted, we are going to leave rcu lock */
652 	skb_dst_force(skb);
653 
654 	if (!sk->sk_backlog.tail)
655 		sk->sk_backlog.head = skb;
656 	else
657 		sk->sk_backlog.tail->next = skb;
658 
659 	sk->sk_backlog.tail = skb;
660 	skb->next = NULL;
661 }
662 
663 /*
664  * Take into account size of receive queue and backlog queue
665  */
666 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
667 {
668 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
669 
670 	return qsize + skb->truesize > sk->sk_rcvbuf;
671 }
672 
673 /* The per-socket spinlock must be held here. */
674 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
675 {
676 	if (sk_rcvqueues_full(sk, skb))
677 		return -ENOBUFS;
678 
679 	__sk_add_backlog(sk, skb);
680 	sk->sk_backlog.len += skb->truesize;
681 	return 0;
682 }
683 
684 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
685 {
686 	return sk->sk_backlog_rcv(sk, skb);
687 }
688 
689 static inline void sock_rps_record_flow(const struct sock *sk)
690 {
691 #ifdef CONFIG_RPS
692 	struct rps_sock_flow_table *sock_flow_table;
693 
694 	rcu_read_lock();
695 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
696 	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
697 	rcu_read_unlock();
698 #endif
699 }
700 
701 static inline void sock_rps_reset_flow(const struct sock *sk)
702 {
703 #ifdef CONFIG_RPS
704 	struct rps_sock_flow_table *sock_flow_table;
705 
706 	rcu_read_lock();
707 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
708 	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
709 	rcu_read_unlock();
710 #endif
711 }
712 
713 static inline void sock_rps_save_rxhash(struct sock *sk,
714 					const struct sk_buff *skb)
715 {
716 #ifdef CONFIG_RPS
717 	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
718 		sock_rps_reset_flow(sk);
719 		sk->sk_rxhash = skb->rxhash;
720 	}
721 #endif
722 }
723 
724 static inline void sock_rps_reset_rxhash(struct sock *sk)
725 {
726 #ifdef CONFIG_RPS
727 	sock_rps_reset_flow(sk);
728 	sk->sk_rxhash = 0;
729 #endif
730 }
731 
732 #define sk_wait_event(__sk, __timeo, __condition)			\
733 	({	int __rc;						\
734 		release_sock(__sk);					\
735 		__rc = __condition;					\
736 		if (!__rc) {						\
737 			*(__timeo) = schedule_timeout(*(__timeo));	\
738 		}							\
739 		lock_sock(__sk);					\
740 		__rc = __condition;					\
741 		__rc;							\
742 	})
743 
744 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
745 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
746 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
747 extern int sk_stream_error(struct sock *sk, int flags, int err);
748 extern void sk_stream_kill_queues(struct sock *sk);
749 
750 extern int sk_wait_data(struct sock *sk, long *timeo);
751 
752 struct request_sock_ops;
753 struct timewait_sock_ops;
754 struct inet_hashinfo;
755 struct raw_hashinfo;
756 struct module;
757 
758 /* Networking protocol blocks we attach to sockets.
759  * socket layer -> transport layer interface
760  * transport -> network interface is defined by struct inet_proto
761  */
762 struct proto {
763 	void			(*close)(struct sock *sk,
764 					long timeout);
765 	int			(*connect)(struct sock *sk,
766 				        struct sockaddr *uaddr,
767 					int addr_len);
768 	int			(*disconnect)(struct sock *sk, int flags);
769 
770 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
771 
772 	int			(*ioctl)(struct sock *sk, int cmd,
773 					 unsigned long arg);
774 	int			(*init)(struct sock *sk);
775 	void			(*destroy)(struct sock *sk);
776 	void			(*shutdown)(struct sock *sk, int how);
777 	int			(*setsockopt)(struct sock *sk, int level,
778 					int optname, char __user *optval,
779 					unsigned int optlen);
780 	int			(*getsockopt)(struct sock *sk, int level,
781 					int optname, char __user *optval,
782 					int __user *option);
783 #ifdef CONFIG_COMPAT
784 	int			(*compat_setsockopt)(struct sock *sk,
785 					int level,
786 					int optname, char __user *optval,
787 					unsigned int optlen);
788 	int			(*compat_getsockopt)(struct sock *sk,
789 					int level,
790 					int optname, char __user *optval,
791 					int __user *option);
792 	int			(*compat_ioctl)(struct sock *sk,
793 					unsigned int cmd, unsigned long arg);
794 #endif
795 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
796 					   struct msghdr *msg, size_t len);
797 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
798 					   struct msghdr *msg,
799 					size_t len, int noblock, int flags,
800 					int *addr_len);
801 	int			(*sendpage)(struct sock *sk, struct page *page,
802 					int offset, size_t size, int flags);
803 	int			(*bind)(struct sock *sk,
804 					struct sockaddr *uaddr, int addr_len);
805 
806 	int			(*backlog_rcv) (struct sock *sk,
807 						struct sk_buff *skb);
808 
809 	/* Keeping track of sk's, looking them up, and port selection methods. */
810 	void			(*hash)(struct sock *sk);
811 	void			(*unhash)(struct sock *sk);
812 	void			(*rehash)(struct sock *sk);
813 	int			(*get_port)(struct sock *sk, unsigned short snum);
814 	void			(*clear_sk)(struct sock *sk, int size);
815 
816 	/* Keeping track of sockets in use */
817 #ifdef CONFIG_PROC_FS
818 	unsigned int		inuse_idx;
819 #endif
820 
821 	/* Memory pressure */
822 	void			(*enter_memory_pressure)(struct sock *sk);
823 	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
824 	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
825 	/*
826 	 * Pressure flag: try to collapse.
827 	 * Technical note: it is used by multiple contexts non atomically.
828 	 * All the __sk_mem_schedule() is of this nature: accounting
829 	 * is strict, actions are advisory and have some latency.
830 	 */
831 	int			*memory_pressure;
832 	long			*sysctl_mem;
833 	int			*sysctl_wmem;
834 	int			*sysctl_rmem;
835 	int			max_header;
836 	bool			no_autobind;
837 
838 	struct kmem_cache	*slab;
839 	unsigned int		obj_size;
840 	int			slab_flags;
841 
842 	struct percpu_counter	*orphan_count;
843 
844 	struct request_sock_ops	*rsk_prot;
845 	struct timewait_sock_ops *twsk_prot;
846 
847 	union {
848 		struct inet_hashinfo	*hashinfo;
849 		struct udp_table	*udp_table;
850 		struct raw_hashinfo	*raw_hash;
851 	} h;
852 
853 	struct module		*owner;
854 
855 	char			name[32];
856 
857 	struct list_head	node;
858 #ifdef SOCK_REFCNT_DEBUG
859 	atomic_t		socks;
860 #endif
861 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
862 	/*
863 	 * cgroup specific init/deinit functions. Called once for all
864 	 * protocols that implement it, from cgroups populate function.
865 	 * This function has to setup any files the protocol want to
866 	 * appear in the kmem cgroup filesystem.
867 	 */
868 	int			(*init_cgroup)(struct cgroup *cgrp,
869 					       struct cgroup_subsys *ss);
870 	void			(*destroy_cgroup)(struct cgroup *cgrp,
871 						  struct cgroup_subsys *ss);
872 	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
873 #endif
874 };
875 
876 struct cg_proto {
877 	void			(*enter_memory_pressure)(struct sock *sk);
878 	struct res_counter	*memory_allocated;	/* Current allocated memory. */
879 	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
880 	int			*memory_pressure;
881 	long			*sysctl_mem;
882 	/*
883 	 * memcg field is used to find which memcg we belong directly
884 	 * Each memcg struct can hold more than one cg_proto, so container_of
885 	 * won't really cut.
886 	 *
887 	 * The elegant solution would be having an inverse function to
888 	 * proto_cgroup in struct proto, but that means polluting the structure
889 	 * for everybody, instead of just for memcg users.
890 	 */
891 	struct mem_cgroup	*memcg;
892 };
893 
894 extern int proto_register(struct proto *prot, int alloc_slab);
895 extern void proto_unregister(struct proto *prot);
896 
897 #ifdef SOCK_REFCNT_DEBUG
898 static inline void sk_refcnt_debug_inc(struct sock *sk)
899 {
900 	atomic_inc(&sk->sk_prot->socks);
901 }
902 
903 static inline void sk_refcnt_debug_dec(struct sock *sk)
904 {
905 	atomic_dec(&sk->sk_prot->socks);
906 	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
907 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
908 }
909 
910 inline void sk_refcnt_debug_release(const struct sock *sk)
911 {
912 	if (atomic_read(&sk->sk_refcnt) != 1)
913 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
914 		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
915 }
916 #else /* SOCK_REFCNT_DEBUG */
917 #define sk_refcnt_debug_inc(sk) do { } while (0)
918 #define sk_refcnt_debug_dec(sk) do { } while (0)
919 #define sk_refcnt_debug_release(sk) do { } while (0)
920 #endif /* SOCK_REFCNT_DEBUG */
921 
922 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
923 extern struct jump_label_key memcg_socket_limit_enabled;
924 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
925 					       struct cg_proto *cg_proto)
926 {
927 	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
928 }
929 #define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
930 #else
931 #define mem_cgroup_sockets_enabled 0
932 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
933 					       struct cg_proto *cg_proto)
934 {
935 	return NULL;
936 }
937 #endif
938 
939 
940 static inline bool sk_has_memory_pressure(const struct sock *sk)
941 {
942 	return sk->sk_prot->memory_pressure != NULL;
943 }
944 
945 static inline bool sk_under_memory_pressure(const struct sock *sk)
946 {
947 	if (!sk->sk_prot->memory_pressure)
948 		return false;
949 
950 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
951 		return !!*sk->sk_cgrp->memory_pressure;
952 
953 	return !!*sk->sk_prot->memory_pressure;
954 }
955 
956 static inline void sk_leave_memory_pressure(struct sock *sk)
957 {
958 	int *memory_pressure = sk->sk_prot->memory_pressure;
959 
960 	if (!memory_pressure)
961 		return;
962 
963 	if (*memory_pressure)
964 		*memory_pressure = 0;
965 
966 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
967 		struct cg_proto *cg_proto = sk->sk_cgrp;
968 		struct proto *prot = sk->sk_prot;
969 
970 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
971 			if (*cg_proto->memory_pressure)
972 				*cg_proto->memory_pressure = 0;
973 	}
974 
975 }
976 
977 static inline void sk_enter_memory_pressure(struct sock *sk)
978 {
979 	if (!sk->sk_prot->enter_memory_pressure)
980 		return;
981 
982 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
983 		struct cg_proto *cg_proto = sk->sk_cgrp;
984 		struct proto *prot = sk->sk_prot;
985 
986 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
987 			cg_proto->enter_memory_pressure(sk);
988 	}
989 
990 	sk->sk_prot->enter_memory_pressure(sk);
991 }
992 
993 static inline long sk_prot_mem_limits(const struct sock *sk, int index)
994 {
995 	long *prot = sk->sk_prot->sysctl_mem;
996 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
997 		prot = sk->sk_cgrp->sysctl_mem;
998 	return prot[index];
999 }
1000 
1001 static inline void memcg_memory_allocated_add(struct cg_proto *prot,
1002 					      unsigned long amt,
1003 					      int *parent_status)
1004 {
1005 	struct res_counter *fail;
1006 	int ret;
1007 
1008 	ret = res_counter_charge(prot->memory_allocated,
1009 				 amt << PAGE_SHIFT, &fail);
1010 
1011 	if (ret < 0)
1012 		*parent_status = OVER_LIMIT;
1013 }
1014 
1015 static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
1016 					      unsigned long amt)
1017 {
1018 	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
1019 }
1020 
1021 static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
1022 {
1023 	u64 ret;
1024 	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
1025 	return ret >> PAGE_SHIFT;
1026 }
1027 
1028 static inline long
1029 sk_memory_allocated(const struct sock *sk)
1030 {
1031 	struct proto *prot = sk->sk_prot;
1032 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1033 		return memcg_memory_allocated_read(sk->sk_cgrp);
1034 
1035 	return atomic_long_read(prot->memory_allocated);
1036 }
1037 
1038 static inline long
1039 sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1040 {
1041 	struct proto *prot = sk->sk_prot;
1042 
1043 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1044 		memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
1045 		/* update the root cgroup regardless */
1046 		atomic_long_add_return(amt, prot->memory_allocated);
1047 		return memcg_memory_allocated_read(sk->sk_cgrp);
1048 	}
1049 
1050 	return atomic_long_add_return(amt, prot->memory_allocated);
1051 }
1052 
1053 static inline void
1054 sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
1055 {
1056 	struct proto *prot = sk->sk_prot;
1057 
1058 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
1059 	    parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
1060 		memcg_memory_allocated_sub(sk->sk_cgrp, amt);
1061 
1062 	atomic_long_sub(amt, prot->memory_allocated);
1063 }
1064 
1065 static inline void sk_sockets_allocated_dec(struct sock *sk)
1066 {
1067 	struct proto *prot = sk->sk_prot;
1068 
1069 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1070 		struct cg_proto *cg_proto = sk->sk_cgrp;
1071 
1072 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1073 			percpu_counter_dec(cg_proto->sockets_allocated);
1074 	}
1075 
1076 	percpu_counter_dec(prot->sockets_allocated);
1077 }
1078 
1079 static inline void sk_sockets_allocated_inc(struct sock *sk)
1080 {
1081 	struct proto *prot = sk->sk_prot;
1082 
1083 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1084 		struct cg_proto *cg_proto = sk->sk_cgrp;
1085 
1086 		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1087 			percpu_counter_inc(cg_proto->sockets_allocated);
1088 	}
1089 
1090 	percpu_counter_inc(prot->sockets_allocated);
1091 }
1092 
1093 static inline int
1094 sk_sockets_allocated_read_positive(struct sock *sk)
1095 {
1096 	struct proto *prot = sk->sk_prot;
1097 
1098 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1099 		return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
1100 
1101 	return percpu_counter_sum_positive(prot->sockets_allocated);
1102 }
1103 
1104 static inline int
1105 proto_sockets_allocated_sum_positive(struct proto *prot)
1106 {
1107 	return percpu_counter_sum_positive(prot->sockets_allocated);
1108 }
1109 
1110 static inline long
1111 proto_memory_allocated(struct proto *prot)
1112 {
1113 	return atomic_long_read(prot->memory_allocated);
1114 }
1115 
1116 static inline bool
1117 proto_memory_pressure(struct proto *prot)
1118 {
1119 	if (!prot->memory_pressure)
1120 		return false;
1121 	return !!*prot->memory_pressure;
1122 }
1123 
1124 
1125 #ifdef CONFIG_PROC_FS
1126 /* Called with local bh disabled */
1127 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1128 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
1129 #else
1130 static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
1131 		int inc)
1132 {
1133 }
1134 #endif
1135 
1136 
1137 /* With per-bucket locks this operation is not-atomic, so that
1138  * this version is not worse.
1139  */
1140 static inline void __sk_prot_rehash(struct sock *sk)
1141 {
1142 	sk->sk_prot->unhash(sk);
1143 	sk->sk_prot->hash(sk);
1144 }
1145 
1146 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
1147 
1148 /* About 10 seconds */
1149 #define SOCK_DESTROY_TIME (10*HZ)
1150 
1151 /* Sockets 0-1023 can't be bound to unless you are superuser */
1152 #define PROT_SOCK	1024
1153 
1154 #define SHUTDOWN_MASK	3
1155 #define RCV_SHUTDOWN	1
1156 #define SEND_SHUTDOWN	2
1157 
1158 #define SOCK_SNDBUF_LOCK	1
1159 #define SOCK_RCVBUF_LOCK	2
1160 #define SOCK_BINDADDR_LOCK	4
1161 #define SOCK_BINDPORT_LOCK	8
1162 
1163 /* sock_iocb: used to kick off async processing of socket ios */
1164 struct sock_iocb {
1165 	struct list_head	list;
1166 
1167 	int			flags;
1168 	int			size;
1169 	struct socket		*sock;
1170 	struct sock		*sk;
1171 	struct scm_cookie	*scm;
1172 	struct msghdr		*msg, async_msg;
1173 	struct kiocb		*kiocb;
1174 };
1175 
1176 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
1177 {
1178 	return (struct sock_iocb *)iocb->private;
1179 }
1180 
1181 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
1182 {
1183 	return si->kiocb;
1184 }
1185 
1186 struct socket_alloc {
1187 	struct socket socket;
1188 	struct inode vfs_inode;
1189 };
1190 
1191 static inline struct socket *SOCKET_I(struct inode *inode)
1192 {
1193 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1194 }
1195 
1196 static inline struct inode *SOCK_INODE(struct socket *socket)
1197 {
1198 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1199 }
1200 
1201 /*
1202  * Functions for memory accounting
1203  */
1204 extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
1205 extern void __sk_mem_reclaim(struct sock *sk);
1206 
1207 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
1208 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1209 #define SK_MEM_SEND	0
1210 #define SK_MEM_RECV	1
1211 
1212 static inline int sk_mem_pages(int amt)
1213 {
1214 	return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1215 }
1216 
1217 static inline int sk_has_account(struct sock *sk)
1218 {
1219 	/* return true if protocol supports memory accounting */
1220 	return !!sk->sk_prot->memory_allocated;
1221 }
1222 
1223 static inline int sk_wmem_schedule(struct sock *sk, int size)
1224 {
1225 	if (!sk_has_account(sk))
1226 		return 1;
1227 	return size <= sk->sk_forward_alloc ||
1228 		__sk_mem_schedule(sk, size, SK_MEM_SEND);
1229 }
1230 
1231 static inline int sk_rmem_schedule(struct sock *sk, int size)
1232 {
1233 	if (!sk_has_account(sk))
1234 		return 1;
1235 	return size <= sk->sk_forward_alloc ||
1236 		__sk_mem_schedule(sk, size, SK_MEM_RECV);
1237 }
1238 
1239 static inline void sk_mem_reclaim(struct sock *sk)
1240 {
1241 	if (!sk_has_account(sk))
1242 		return;
1243 	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1244 		__sk_mem_reclaim(sk);
1245 }
1246 
1247 static inline void sk_mem_reclaim_partial(struct sock *sk)
1248 {
1249 	if (!sk_has_account(sk))
1250 		return;
1251 	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1252 		__sk_mem_reclaim(sk);
1253 }
1254 
1255 static inline void sk_mem_charge(struct sock *sk, int size)
1256 {
1257 	if (!sk_has_account(sk))
1258 		return;
1259 	sk->sk_forward_alloc -= size;
1260 }
1261 
1262 static inline void sk_mem_uncharge(struct sock *sk, int size)
1263 {
1264 	if (!sk_has_account(sk))
1265 		return;
1266 	sk->sk_forward_alloc += size;
1267 }
1268 
1269 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1270 {
1271 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1272 	sk->sk_wmem_queued -= skb->truesize;
1273 	sk_mem_uncharge(sk, skb->truesize);
1274 	__kfree_skb(skb);
1275 }
1276 
1277 /* Used by processes to "lock" a socket state, so that
1278  * interrupts and bottom half handlers won't change it
1279  * from under us. It essentially blocks any incoming
1280  * packets, so that we won't get any new data or any
1281  * packets that change the state of the socket.
1282  *
1283  * While locked, BH processing will add new packets to
1284  * the backlog queue.  This queue is processed by the
1285  * owner of the socket lock right before it is released.
1286  *
1287  * Since ~2.3.5 it is also exclusive sleep lock serializing
1288  * accesses from user process context.
1289  */
1290 #define sock_owned_by_user(sk)	((sk)->sk_lock.owned)
1291 
1292 /*
1293  * Macro so as to not evaluate some arguments when
1294  * lockdep is not enabled.
1295  *
1296  * Mark both the sk_lock and the sk_lock.slock as a
1297  * per-address-family lock class.
1298  */
1299 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
1300 do {									\
1301 	sk->sk_lock.owned = 0;						\
1302 	init_waitqueue_head(&sk->sk_lock.wq);				\
1303 	spin_lock_init(&(sk)->sk_lock.slock);				\
1304 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
1305 			sizeof((sk)->sk_lock));				\
1306 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
1307 		       	(skey), (sname));				\
1308 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
1309 } while (0)
1310 
1311 extern void lock_sock_nested(struct sock *sk, int subclass);
1312 
1313 static inline void lock_sock(struct sock *sk)
1314 {
1315 	lock_sock_nested(sk, 0);
1316 }
1317 
1318 extern void release_sock(struct sock *sk);
1319 
1320 /* BH context may only use the following locking interface. */
1321 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
1322 #define bh_lock_sock_nested(__sk) \
1323 				spin_lock_nested(&((__sk)->sk_lock.slock), \
1324 				SINGLE_DEPTH_NESTING)
1325 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
1326 
1327 extern bool lock_sock_fast(struct sock *sk);
1328 /**
1329  * unlock_sock_fast - complement of lock_sock_fast
1330  * @sk: socket
1331  * @slow: slow mode
1332  *
1333  * fast unlock socket for user context.
1334  * If slow mode is on, we call regular release_sock()
1335  */
1336 static inline void unlock_sock_fast(struct sock *sk, bool slow)
1337 {
1338 	if (slow)
1339 		release_sock(sk);
1340 	else
1341 		spin_unlock_bh(&sk->sk_lock.slock);
1342 }
1343 
1344 
1345 extern struct sock		*sk_alloc(struct net *net, int family,
1346 					  gfp_t priority,
1347 					  struct proto *prot);
1348 extern void			sk_free(struct sock *sk);
1349 extern void			sk_release_kernel(struct sock *sk);
1350 extern struct sock		*sk_clone_lock(const struct sock *sk,
1351 					       const gfp_t priority);
1352 
1353 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
1354 					      unsigned long size, int force,
1355 					      gfp_t priority);
1356 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
1357 					      unsigned long size, int force,
1358 					      gfp_t priority);
1359 extern void			sock_wfree(struct sk_buff *skb);
1360 extern void			sock_rfree(struct sk_buff *skb);
1361 
1362 extern int			sock_setsockopt(struct socket *sock, int level,
1363 						int op, char __user *optval,
1364 						unsigned int optlen);
1365 
1366 extern int			sock_getsockopt(struct socket *sock, int level,
1367 						int op, char __user *optval,
1368 						int __user *optlen);
1369 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
1370 						     unsigned long size,
1371 						     int noblock,
1372 						     int *errcode);
1373 extern struct sk_buff 		*sock_alloc_send_pskb(struct sock *sk,
1374 						      unsigned long header_len,
1375 						      unsigned long data_len,
1376 						      int noblock,
1377 						      int *errcode);
1378 extern void *sock_kmalloc(struct sock *sk, int size,
1379 			  gfp_t priority);
1380 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
1381 extern void sk_send_sigurg(struct sock *sk);
1382 
1383 #ifdef CONFIG_CGROUPS
1384 extern void sock_update_classid(struct sock *sk);
1385 #else
1386 static inline void sock_update_classid(struct sock *sk)
1387 {
1388 }
1389 #endif
1390 
1391 /*
1392  * Functions to fill in entries in struct proto_ops when a protocol
1393  * does not implement a particular function.
1394  */
1395 extern int                      sock_no_bind(struct socket *,
1396 					     struct sockaddr *, int);
1397 extern int                      sock_no_connect(struct socket *,
1398 						struct sockaddr *, int, int);
1399 extern int                      sock_no_socketpair(struct socket *,
1400 						   struct socket *);
1401 extern int                      sock_no_accept(struct socket *,
1402 					       struct socket *, int);
1403 extern int                      sock_no_getname(struct socket *,
1404 						struct sockaddr *, int *, int);
1405 extern unsigned int             sock_no_poll(struct file *, struct socket *,
1406 					     struct poll_table_struct *);
1407 extern int                      sock_no_ioctl(struct socket *, unsigned int,
1408 					      unsigned long);
1409 extern int			sock_no_listen(struct socket *, int);
1410 extern int                      sock_no_shutdown(struct socket *, int);
1411 extern int			sock_no_getsockopt(struct socket *, int , int,
1412 						   char __user *, int __user *);
1413 extern int			sock_no_setsockopt(struct socket *, int, int,
1414 						   char __user *, unsigned int);
1415 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
1416 						struct msghdr *, size_t);
1417 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
1418 						struct msghdr *, size_t, int);
1419 extern int			sock_no_mmap(struct file *file,
1420 					     struct socket *sock,
1421 					     struct vm_area_struct *vma);
1422 extern ssize_t			sock_no_sendpage(struct socket *sock,
1423 						struct page *page,
1424 						int offset, size_t size,
1425 						int flags);
1426 
1427 /*
1428  * Functions to fill in entries in struct proto_ops when a protocol
1429  * uses the inet style.
1430  */
1431 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1432 				  char __user *optval, int __user *optlen);
1433 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1434 			       struct msghdr *msg, size_t size, int flags);
1435 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1436 				  char __user *optval, unsigned int optlen);
1437 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1438 		int optname, char __user *optval, int __user *optlen);
1439 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1440 		int optname, char __user *optval, unsigned int optlen);
1441 
1442 extern void sk_common_release(struct sock *sk);
1443 
1444 /*
1445  *	Default socket callbacks and setup code
1446  */
1447 
1448 /* Initialise core socket variables */
1449 extern void sock_init_data(struct socket *sock, struct sock *sk);
1450 
1451 extern void sk_filter_release_rcu(struct rcu_head *rcu);
1452 
1453 /**
1454  *	sk_filter_release - release a socket filter
1455  *	@fp: filter to remove
1456  *
1457  *	Remove a filter from a socket and release its resources.
1458  */
1459 
1460 static inline void sk_filter_release(struct sk_filter *fp)
1461 {
1462 	if (atomic_dec_and_test(&fp->refcnt))
1463 		call_rcu(&fp->rcu, sk_filter_release_rcu);
1464 }
1465 
1466 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1467 {
1468 	unsigned int size = sk_filter_len(fp);
1469 
1470 	atomic_sub(size, &sk->sk_omem_alloc);
1471 	sk_filter_release(fp);
1472 }
1473 
1474 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1475 {
1476 	atomic_inc(&fp->refcnt);
1477 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
1478 }
1479 
1480 /*
1481  * Socket reference counting postulates.
1482  *
1483  * * Each user of socket SHOULD hold a reference count.
1484  * * Each access point to socket (an hash table bucket, reference from a list,
1485  *   running timer, skb in flight MUST hold a reference count.
1486  * * When reference count hits 0, it means it will never increase back.
1487  * * When reference count hits 0, it means that no references from
1488  *   outside exist to this socket and current process on current CPU
1489  *   is last user and may/should destroy this socket.
1490  * * sk_free is called from any context: process, BH, IRQ. When
1491  *   it is called, socket has no references from outside -> sk_free
1492  *   may release descendant resources allocated by the socket, but
1493  *   to the time when it is called, socket is NOT referenced by any
1494  *   hash tables, lists etc.
1495  * * Packets, delivered from outside (from network or from another process)
1496  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
1497  *   when they sit in queue. Otherwise, packets will leak to hole, when
1498  *   socket is looked up by one cpu and unhasing is made by another CPU.
1499  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
1500  *   (leak to backlog). Packet socket does all the processing inside
1501  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1502  *   use separate SMP lock, so that they are prone too.
1503  */
1504 
1505 /* Ungrab socket and destroy it, if it was the last reference. */
1506 static inline void sock_put(struct sock *sk)
1507 {
1508 	if (atomic_dec_and_test(&sk->sk_refcnt))
1509 		sk_free(sk);
1510 }
1511 
1512 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1513 			  const int nested);
1514 
1515 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1516 {
1517 	sk->sk_tx_queue_mapping = tx_queue;
1518 }
1519 
1520 static inline void sk_tx_queue_clear(struct sock *sk)
1521 {
1522 	sk->sk_tx_queue_mapping = -1;
1523 }
1524 
1525 static inline int sk_tx_queue_get(const struct sock *sk)
1526 {
1527 	return sk ? sk->sk_tx_queue_mapping : -1;
1528 }
1529 
1530 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1531 {
1532 	sk_tx_queue_clear(sk);
1533 	sk->sk_socket = sock;
1534 }
1535 
1536 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1537 {
1538 	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1539 	return &rcu_dereference_raw(sk->sk_wq)->wait;
1540 }
1541 /* Detach socket from process context.
1542  * Announce socket dead, detach it from wait queue and inode.
1543  * Note that parent inode held reference count on this struct sock,
1544  * we do not release it in this function, because protocol
1545  * probably wants some additional cleanups or even continuing
1546  * to work with this socket (TCP).
1547  */
1548 static inline void sock_orphan(struct sock *sk)
1549 {
1550 	write_lock_bh(&sk->sk_callback_lock);
1551 	sock_set_flag(sk, SOCK_DEAD);
1552 	sk_set_socket(sk, NULL);
1553 	sk->sk_wq  = NULL;
1554 	write_unlock_bh(&sk->sk_callback_lock);
1555 }
1556 
1557 static inline void sock_graft(struct sock *sk, struct socket *parent)
1558 {
1559 	write_lock_bh(&sk->sk_callback_lock);
1560 	sk->sk_wq = parent->wq;
1561 	parent->sk = sk;
1562 	sk_set_socket(sk, parent);
1563 	security_sock_graft(sk, parent);
1564 	write_unlock_bh(&sk->sk_callback_lock);
1565 }
1566 
1567 extern int sock_i_uid(struct sock *sk);
1568 extern unsigned long sock_i_ino(struct sock *sk);
1569 
1570 static inline struct dst_entry *
1571 __sk_dst_get(struct sock *sk)
1572 {
1573 	return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
1574 						       lockdep_is_held(&sk->sk_lock.slock));
1575 }
1576 
1577 static inline struct dst_entry *
1578 sk_dst_get(struct sock *sk)
1579 {
1580 	struct dst_entry *dst;
1581 
1582 	rcu_read_lock();
1583 	dst = rcu_dereference(sk->sk_dst_cache);
1584 	if (dst)
1585 		dst_hold(dst);
1586 	rcu_read_unlock();
1587 	return dst;
1588 }
1589 
1590 extern void sk_reset_txq(struct sock *sk);
1591 
1592 static inline void dst_negative_advice(struct sock *sk)
1593 {
1594 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1595 
1596 	if (dst && dst->ops->negative_advice) {
1597 		ndst = dst->ops->negative_advice(dst);
1598 
1599 		if (ndst != dst) {
1600 			rcu_assign_pointer(sk->sk_dst_cache, ndst);
1601 			sk_reset_txq(sk);
1602 		}
1603 	}
1604 }
1605 
1606 static inline void
1607 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1608 {
1609 	struct dst_entry *old_dst;
1610 
1611 	sk_tx_queue_clear(sk);
1612 	/*
1613 	 * This can be called while sk is owned by the caller only,
1614 	 * with no state that can be checked in a rcu_dereference_check() cond
1615 	 */
1616 	old_dst = rcu_dereference_raw(sk->sk_dst_cache);
1617 	rcu_assign_pointer(sk->sk_dst_cache, dst);
1618 	dst_release(old_dst);
1619 }
1620 
1621 static inline void
1622 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1623 {
1624 	spin_lock(&sk->sk_dst_lock);
1625 	__sk_dst_set(sk, dst);
1626 	spin_unlock(&sk->sk_dst_lock);
1627 }
1628 
1629 static inline void
1630 __sk_dst_reset(struct sock *sk)
1631 {
1632 	__sk_dst_set(sk, NULL);
1633 }
1634 
1635 static inline void
1636 sk_dst_reset(struct sock *sk)
1637 {
1638 	spin_lock(&sk->sk_dst_lock);
1639 	__sk_dst_reset(sk);
1640 	spin_unlock(&sk->sk_dst_lock);
1641 }
1642 
1643 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1644 
1645 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1646 
1647 static inline int sk_can_gso(const struct sock *sk)
1648 {
1649 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1650 }
1651 
1652 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1653 
1654 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1655 {
1656 	sk->sk_route_nocaps |= flags;
1657 	sk->sk_route_caps &= ~flags;
1658 }
1659 
1660 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1661 					   char __user *from, char *to,
1662 					   int copy, int offset)
1663 {
1664 	if (skb->ip_summed == CHECKSUM_NONE) {
1665 		int err = 0;
1666 		__wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
1667 		if (err)
1668 			return err;
1669 		skb->csum = csum_block_add(skb->csum, csum, offset);
1670 	} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1671 		if (!access_ok(VERIFY_READ, from, copy) ||
1672 		    __copy_from_user_nocache(to, from, copy))
1673 			return -EFAULT;
1674 	} else if (copy_from_user(to, from, copy))
1675 		return -EFAULT;
1676 
1677 	return 0;
1678 }
1679 
1680 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1681 				       char __user *from, int copy)
1682 {
1683 	int err, offset = skb->len;
1684 
1685 	err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
1686 				       copy, offset);
1687 	if (err)
1688 		__skb_trim(skb, offset);
1689 
1690 	return err;
1691 }
1692 
1693 static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
1694 					   struct sk_buff *skb,
1695 					   struct page *page,
1696 					   int off, int copy)
1697 {
1698 	int err;
1699 
1700 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
1701 				       copy, skb->len);
1702 	if (err)
1703 		return err;
1704 
1705 	skb->len	     += copy;
1706 	skb->data_len	     += copy;
1707 	skb->truesize	     += copy;
1708 	sk->sk_wmem_queued   += copy;
1709 	sk_mem_charge(sk, copy);
1710 	return 0;
1711 }
1712 
1713 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1714 				   struct sk_buff *skb, struct page *page,
1715 				   int off, int copy)
1716 {
1717 	if (skb->ip_summed == CHECKSUM_NONE) {
1718 		int err = 0;
1719 		__wsum csum = csum_and_copy_from_user(from,
1720 						     page_address(page) + off,
1721 							    copy, 0, &err);
1722 		if (err)
1723 			return err;
1724 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
1725 	} else if (copy_from_user(page_address(page) + off, from, copy))
1726 		return -EFAULT;
1727 
1728 	skb->len	     += copy;
1729 	skb->data_len	     += copy;
1730 	skb->truesize	     += copy;
1731 	sk->sk_wmem_queued   += copy;
1732 	sk_mem_charge(sk, copy);
1733 	return 0;
1734 }
1735 
1736 /**
1737  * sk_wmem_alloc_get - returns write allocations
1738  * @sk: socket
1739  *
1740  * Returns sk_wmem_alloc minus initial offset of one
1741  */
1742 static inline int sk_wmem_alloc_get(const struct sock *sk)
1743 {
1744 	return atomic_read(&sk->sk_wmem_alloc) - 1;
1745 }
1746 
1747 /**
1748  * sk_rmem_alloc_get - returns read allocations
1749  * @sk: socket
1750  *
1751  * Returns sk_rmem_alloc
1752  */
1753 static inline int sk_rmem_alloc_get(const struct sock *sk)
1754 {
1755 	return atomic_read(&sk->sk_rmem_alloc);
1756 }
1757 
1758 /**
1759  * sk_has_allocations - check if allocations are outstanding
1760  * @sk: socket
1761  *
1762  * Returns true if socket has write or read allocations
1763  */
1764 static inline int sk_has_allocations(const struct sock *sk)
1765 {
1766 	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
1767 }
1768 
1769 /**
1770  * wq_has_sleeper - check if there are any waiting processes
1771  * @wq: struct socket_wq
1772  *
1773  * Returns true if socket_wq has waiting processes
1774  *
1775  * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
1776  * barrier call. They were added due to the race found within the tcp code.
1777  *
1778  * Consider following tcp code paths:
1779  *
1780  * CPU1                  CPU2
1781  *
1782  * sys_select            receive packet
1783  *   ...                 ...
1784  *   __add_wait_queue    update tp->rcv_nxt
1785  *   ...                 ...
1786  *   tp->rcv_nxt check   sock_def_readable
1787  *   ...                 {
1788  *   schedule               rcu_read_lock();
1789  *                          wq = rcu_dereference(sk->sk_wq);
1790  *                          if (wq && waitqueue_active(&wq->wait))
1791  *                              wake_up_interruptible(&wq->wait)
1792  *                          ...
1793  *                       }
1794  *
1795  * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
1796  * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
1797  * could then endup calling schedule and sleep forever if there are no more
1798  * data on the socket.
1799  *
1800  */
1801 static inline bool wq_has_sleeper(struct socket_wq *wq)
1802 {
1803 
1804 	/*
1805 	 * We need to be sure we are in sync with the
1806 	 * add_wait_queue modifications to the wait queue.
1807 	 *
1808 	 * This memory barrier is paired in the sock_poll_wait.
1809 	 */
1810 	smp_mb();
1811 	return wq && waitqueue_active(&wq->wait);
1812 }
1813 
1814 /**
1815  * sock_poll_wait - place memory barrier behind the poll_wait call.
1816  * @filp:           file
1817  * @wait_address:   socket wait queue
1818  * @p:              poll_table
1819  *
1820  * See the comments in the wq_has_sleeper function.
1821  */
1822 static inline void sock_poll_wait(struct file *filp,
1823 		wait_queue_head_t *wait_address, poll_table *p)
1824 {
1825 	if (p && wait_address) {
1826 		poll_wait(filp, wait_address, p);
1827 		/*
1828 		 * We need to be sure we are in sync with the
1829 		 * socket flags modification.
1830 		 *
1831 		 * This memory barrier is paired in the wq_has_sleeper.
1832 		*/
1833 		smp_mb();
1834 	}
1835 }
1836 
1837 /*
1838  * 	Queue a received datagram if it will fit. Stream and sequenced
1839  *	protocols can't normally use this as they need to fit buffers in
1840  *	and play with them.
1841  *
1842  * 	Inlined as it's very short and called for pretty much every
1843  *	packet ever received.
1844  */
1845 
1846 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1847 {
1848 	skb_orphan(skb);
1849 	skb->sk = sk;
1850 	skb->destructor = sock_wfree;
1851 	/*
1852 	 * We used to take a refcount on sk, but following operation
1853 	 * is enough to guarantee sk_free() wont free this sock until
1854 	 * all in-flight packets are completed
1855 	 */
1856 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1857 }
1858 
1859 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1860 {
1861 	skb_orphan(skb);
1862 	skb->sk = sk;
1863 	skb->destructor = sock_rfree;
1864 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1865 	sk_mem_charge(sk, skb->truesize);
1866 }
1867 
1868 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1869 			   unsigned long expires);
1870 
1871 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1872 
1873 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1874 
1875 extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
1876 
1877 /*
1878  *	Recover an error report and clear atomically
1879  */
1880 
1881 static inline int sock_error(struct sock *sk)
1882 {
1883 	int err;
1884 	if (likely(!sk->sk_err))
1885 		return 0;
1886 	err = xchg(&sk->sk_err, 0);
1887 	return -err;
1888 }
1889 
1890 static inline unsigned long sock_wspace(struct sock *sk)
1891 {
1892 	int amt = 0;
1893 
1894 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1895 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1896 		if (amt < 0)
1897 			amt = 0;
1898 	}
1899 	return amt;
1900 }
1901 
1902 static inline void sk_wake_async(struct sock *sk, int how, int band)
1903 {
1904 	if (sock_flag(sk, SOCK_FASYNC))
1905 		sock_wake_async(sk->sk_socket, how, band);
1906 }
1907 
1908 #define SOCK_MIN_SNDBUF 2048
1909 /*
1910  * Since sk_rmem_alloc sums skb->truesize, even a small frame might need
1911  * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak
1912  */
1913 #define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff))
1914 
1915 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1916 {
1917 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1918 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
1919 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1920 	}
1921 }
1922 
1923 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
1924 
1925 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1926 {
1927 	struct page *page = NULL;
1928 
1929 	page = alloc_pages(sk->sk_allocation, 0);
1930 	if (!page) {
1931 		sk_enter_memory_pressure(sk);
1932 		sk_stream_moderate_sndbuf(sk);
1933 	}
1934 	return page;
1935 }
1936 
1937 /*
1938  *	Default write policy as shown to user space via poll/select/SIGIO
1939  */
1940 static inline int sock_writeable(const struct sock *sk)
1941 {
1942 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
1943 }
1944 
1945 static inline gfp_t gfp_any(void)
1946 {
1947 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1948 }
1949 
1950 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1951 {
1952 	return noblock ? 0 : sk->sk_rcvtimeo;
1953 }
1954 
1955 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1956 {
1957 	return noblock ? 0 : sk->sk_sndtimeo;
1958 }
1959 
1960 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1961 {
1962 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1963 }
1964 
1965 /* Alas, with timeout socket operations are not restartable.
1966  * Compare this to poll().
1967  */
1968 static inline int sock_intr_errno(long timeo)
1969 {
1970 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1971 }
1972 
1973 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1974 	struct sk_buff *skb);
1975 extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
1976 	struct sk_buff *skb);
1977 
1978 static __inline__ void
1979 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1980 {
1981 	ktime_t kt = skb->tstamp;
1982 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
1983 
1984 	/*
1985 	 * generate control messages if
1986 	 * - receive time stamping in software requested (SOCK_RCVTSTAMP
1987 	 *   or SOCK_TIMESTAMPING_RX_SOFTWARE)
1988 	 * - software time stamp available and wanted
1989 	 *   (SOCK_TIMESTAMPING_SOFTWARE)
1990 	 * - hardware time stamps available and wanted
1991 	 *   (SOCK_TIMESTAMPING_SYS_HARDWARE or
1992 	 *   SOCK_TIMESTAMPING_RAW_HARDWARE)
1993 	 */
1994 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
1995 	    sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
1996 	    (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
1997 	    (hwtstamps->hwtstamp.tv64 &&
1998 	     sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
1999 	    (hwtstamps->syststamp.tv64 &&
2000 	     sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
2001 		__sock_recv_timestamp(msg, sk, skb);
2002 	else
2003 		sk->sk_stamp = kt;
2004 
2005 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2006 		__sock_recv_wifi_status(msg, sk, skb);
2007 }
2008 
2009 extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2010 				     struct sk_buff *skb);
2011 
2012 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2013 					  struct sk_buff *skb)
2014 {
2015 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)			| \
2016 			   (1UL << SOCK_RCVTSTAMP)			| \
2017 			   (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)	| \
2018 			   (1UL << SOCK_TIMESTAMPING_SOFTWARE)		| \
2019 			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) 	| \
2020 			   (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
2021 
2022 	if (sk->sk_flags & FLAGS_TS_OR_DROPS)
2023 		__sock_recv_ts_and_drops(msg, sk, skb);
2024 	else
2025 		sk->sk_stamp = skb->tstamp;
2026 }
2027 
2028 /**
2029  * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2030  * @sk:		socket sending this packet
2031  * @tx_flags:	filled with instructions for time stamping
2032  *
2033  * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
2034  * parameters are invalid.
2035  */
2036 extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
2037 
2038 /**
2039  * sk_eat_skb - Release a skb if it is no longer needed
2040  * @sk: socket to eat this skb from
2041  * @skb: socket buffer to eat
2042  * @copied_early: flag indicating whether DMA operations copied this data early
2043  *
2044  * This routine must be called with interrupts disabled or with the socket
2045  * locked so that the sk_buff queue operation is ok.
2046 */
2047 #ifdef CONFIG_NET_DMA
2048 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
2049 {
2050 	__skb_unlink(skb, &sk->sk_receive_queue);
2051 	if (!copied_early)
2052 		__kfree_skb(skb);
2053 	else
2054 		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
2055 }
2056 #else
2057 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
2058 {
2059 	__skb_unlink(skb, &sk->sk_receive_queue);
2060 	__kfree_skb(skb);
2061 }
2062 #endif
2063 
2064 static inline
2065 struct net *sock_net(const struct sock *sk)
2066 {
2067 	return read_pnet(&sk->sk_net);
2068 }
2069 
2070 static inline
2071 void sock_net_set(struct sock *sk, struct net *net)
2072 {
2073 	write_pnet(&sk->sk_net, net);
2074 }
2075 
2076 /*
2077  * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
2078  * They should not hold a reference to a namespace in order to allow
2079  * to stop it.
2080  * Sockets after sk_change_net should be released using sk_release_kernel
2081  */
2082 static inline void sk_change_net(struct sock *sk, struct net *net)
2083 {
2084 	put_net(sock_net(sk));
2085 	sock_net_set(sk, hold_net(net));
2086 }
2087 
2088 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2089 {
2090 	if (unlikely(skb->sk)) {
2091 		struct sock *sk = skb->sk;
2092 
2093 		skb->destructor = NULL;
2094 		skb->sk = NULL;
2095 		return sk;
2096 	}
2097 	return NULL;
2098 }
2099 
2100 extern void sock_enable_timestamp(struct sock *sk, int flag);
2101 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
2102 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
2103 
2104 /*
2105  *	Enable debug/info messages
2106  */
2107 extern int net_msg_warn;
2108 #define NETDEBUG(fmt, args...) \
2109 	do { if (net_msg_warn) printk(fmt,##args); } while (0)
2110 
2111 #define LIMIT_NETDEBUG(fmt, args...) \
2112 	do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
2113 
2114 extern __u32 sysctl_wmem_max;
2115 extern __u32 sysctl_rmem_max;
2116 
2117 extern void sk_init(void);
2118 
2119 extern int sysctl_optmem_max;
2120 
2121 extern __u32 sysctl_wmem_default;
2122 extern __u32 sysctl_rmem_default;
2123 
2124 #endif	/* _SOCK_H */
2125