xref: /linux/include/net/sock.h (revision 98366c20a275e957416e9516db5dcb7195b4e101)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/kernel.h>
44 #include <linux/list.h>
45 #include <linux/timer.h>
46 #include <linux/cache.h>
47 #include <linux/module.h>
48 #include <linux/lockdep.h>
49 #include <linux/netdevice.h>
50 #include <linux/skbuff.h>	/* struct sk_buff */
51 #include <linux/mm.h>
52 #include <linux/security.h>
53 
54 #include <linux/filter.h>
55 
56 #include <asm/atomic.h>
57 #include <net/dst.h>
58 #include <net/checksum.h>
59 #include <net/net_namespace.h>
60 
61 /*
62  * This structure really needs to be cleaned up.
63  * Most of it is for TCP, and not used by any of
64  * the other protocols.
65  */
66 
67 /* Define this to get the SOCK_DBG debugging facility. */
68 #define SOCK_DEBUGGING
69 #ifdef SOCK_DEBUGGING
70 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
71 					printk(KERN_DEBUG msg); } while (0)
72 #else
73 #define SOCK_DEBUG(sk, msg...) do { } while (0)
74 #endif
75 
76 /* This is the per-socket lock.  The spinlock provides a synchronization
77  * between user contexts and software interrupt processing, whereas the
78  * mini-semaphore synchronizes multiple users amongst themselves.
79  */
80 typedef struct {
81 	spinlock_t		slock;
82 	int			owned;
83 	wait_queue_head_t	wq;
84 	/*
85 	 * We express the mutex-alike socket_lock semantics
86 	 * to the lock validator by explicitly managing
87 	 * the slock as a lock variant (in addition to
88 	 * the slock itself):
89 	 */
90 #ifdef CONFIG_DEBUG_LOCK_ALLOC
91 	struct lockdep_map dep_map;
92 #endif
93 } socket_lock_t;
94 
95 struct sock;
96 struct proto;
97 
98 /**
99  *	struct sock_common - minimal network layer representation of sockets
100  *	@skc_family: network address family
101  *	@skc_state: Connection state
102  *	@skc_reuse: %SO_REUSEADDR setting
103  *	@skc_bound_dev_if: bound device index if != 0
104  *	@skc_node: main hash linkage for various protocol lookup tables
105  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
106  *	@skc_refcnt: reference count
107  *	@skc_hash: hash value used with various protocol lookup tables
108  *	@skc_prot: protocol handlers inside a network family
109  *	@skc_net: reference to the network namespace of this socket
110  *
111  *	This is the minimal network layer representation of sockets, the header
112  *	for struct sock and struct inet_timewait_sock.
113  */
114 struct sock_common {
115 	unsigned short		skc_family;
116 	volatile unsigned char	skc_state;
117 	unsigned char		skc_reuse;
118 	int			skc_bound_dev_if;
119 	struct hlist_node	skc_node;
120 	struct hlist_node	skc_bind_node;
121 	atomic_t		skc_refcnt;
122 	unsigned int		skc_hash;
123 	struct proto		*skc_prot;
124 	struct net	 	*skc_net;
125 };
126 
127 /**
128   *	struct sock - network layer representation of sockets
129   *	@__sk_common: shared layout with inet_timewait_sock
130   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
131   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
132   *	@sk_lock:	synchronizer
133   *	@sk_rcvbuf: size of receive buffer in bytes
134   *	@sk_sleep: sock wait queue
135   *	@sk_dst_cache: destination cache
136   *	@sk_dst_lock: destination cache lock
137   *	@sk_policy: flow policy
138   *	@sk_rmem_alloc: receive queue bytes committed
139   *	@sk_receive_queue: incoming packets
140   *	@sk_wmem_alloc: transmit queue bytes committed
141   *	@sk_write_queue: Packet sending queue
142   *	@sk_async_wait_queue: DMA copied packets
143   *	@sk_omem_alloc: "o" is "option" or "other"
144   *	@sk_wmem_queued: persistent queue size
145   *	@sk_forward_alloc: space allocated forward
146   *	@sk_allocation: allocation mode
147   *	@sk_sndbuf: size of send buffer in bytes
148   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
149   *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
150   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
151   *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
152   *	@sk_lingertime: %SO_LINGER l_linger setting
153   *	@sk_backlog: always used with the per-socket spinlock held
154   *	@sk_callback_lock: used with the callbacks in the end of this struct
155   *	@sk_error_queue: rarely used
156   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
157   *	@sk_err: last error
158   *	@sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
159   *	@sk_ack_backlog: current listen backlog
160   *	@sk_max_ack_backlog: listen backlog set in listen()
161   *	@sk_priority: %SO_PRIORITY setting
162   *	@sk_type: socket type (%SOCK_STREAM, etc)
163   *	@sk_protocol: which protocol this socket belongs in this network family
164   *	@sk_peercred: %SO_PEERCRED setting
165   *	@sk_rcvlowat: %SO_RCVLOWAT setting
166   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
167   *	@sk_sndtimeo: %SO_SNDTIMEO setting
168   *	@sk_filter: socket filtering instructions
169   *	@sk_protinfo: private area, net family specific, when not using slab
170   *	@sk_timer: sock cleanup timer
171   *	@sk_stamp: time stamp of last packet received
172   *	@sk_socket: Identd and reporting IO signals
173   *	@sk_user_data: RPC layer private data
174   *	@sk_sndmsg_page: cached page for sendmsg
175   *	@sk_sndmsg_off: cached offset for sendmsg
176   *	@sk_send_head: front of stuff to transmit
177   *	@sk_security: used by security modules
178   *	@sk_write_pending: a write to stream socket waits to start
179   *	@sk_state_change: callback to indicate change in the state of the sock
180   *	@sk_data_ready: callback to indicate there is data to be processed
181   *	@sk_write_space: callback to indicate there is bf sending space available
182   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
183   *	@sk_backlog_rcv: callback to process the backlog
184   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
185  */
186 struct sock {
187 	/*
188 	 * Now struct inet_timewait_sock also uses sock_common, so please just
189 	 * don't add nothing before this first member (__sk_common) --acme
190 	 */
191 	struct sock_common	__sk_common;
192 #define sk_family		__sk_common.skc_family
193 #define sk_state		__sk_common.skc_state
194 #define sk_reuse		__sk_common.skc_reuse
195 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
196 #define sk_node			__sk_common.skc_node
197 #define sk_bind_node		__sk_common.skc_bind_node
198 #define sk_refcnt		__sk_common.skc_refcnt
199 #define sk_hash			__sk_common.skc_hash
200 #define sk_prot			__sk_common.skc_prot
201 #define sk_net			__sk_common.skc_net
202 	unsigned char		sk_shutdown : 2,
203 				sk_no_check : 2,
204 				sk_userlocks : 4;
205 	unsigned char		sk_protocol;
206 	unsigned short		sk_type;
207 	int			sk_rcvbuf;
208 	socket_lock_t		sk_lock;
209 	/*
210 	 * The backlog queue is special, it is always used with
211 	 * the per-socket spinlock held and requires low latency
212 	 * access. Therefore we special case it's implementation.
213 	 */
214 	struct {
215 		struct sk_buff *head;
216 		struct sk_buff *tail;
217 	} sk_backlog;
218 	wait_queue_head_t	*sk_sleep;
219 	struct dst_entry	*sk_dst_cache;
220 	struct xfrm_policy	*sk_policy[2];
221 	rwlock_t		sk_dst_lock;
222 	atomic_t		sk_rmem_alloc;
223 	atomic_t		sk_wmem_alloc;
224 	atomic_t		sk_omem_alloc;
225 	int			sk_sndbuf;
226 	struct sk_buff_head	sk_receive_queue;
227 	struct sk_buff_head	sk_write_queue;
228 	struct sk_buff_head	sk_async_wait_queue;
229 	int			sk_wmem_queued;
230 	int			sk_forward_alloc;
231 	gfp_t			sk_allocation;
232 	int			sk_route_caps;
233 	int			sk_gso_type;
234 	int			sk_rcvlowat;
235 	unsigned long 		sk_flags;
236 	unsigned long	        sk_lingertime;
237 	struct sk_buff_head	sk_error_queue;
238 	struct proto		*sk_prot_creator;
239 	rwlock_t		sk_callback_lock;
240 	int			sk_err,
241 				sk_err_soft;
242 	unsigned short		sk_ack_backlog;
243 	unsigned short		sk_max_ack_backlog;
244 	__u32			sk_priority;
245 	struct ucred		sk_peercred;
246 	long			sk_rcvtimeo;
247 	long			sk_sndtimeo;
248 	struct sk_filter      	*sk_filter;
249 	void			*sk_protinfo;
250 	struct timer_list	sk_timer;
251 	ktime_t			sk_stamp;
252 	struct socket		*sk_socket;
253 	void			*sk_user_data;
254 	struct page		*sk_sndmsg_page;
255 	struct sk_buff		*sk_send_head;
256 	__u32			sk_sndmsg_off;
257 	int			sk_write_pending;
258 	void			*sk_security;
259 	void			(*sk_state_change)(struct sock *sk);
260 	void			(*sk_data_ready)(struct sock *sk, int bytes);
261 	void			(*sk_write_space)(struct sock *sk);
262 	void			(*sk_error_report)(struct sock *sk);
263   	int			(*sk_backlog_rcv)(struct sock *sk,
264 						  struct sk_buff *skb);
265 	void                    (*sk_destruct)(struct sock *sk);
266 };
267 
268 /*
269  * Hashed lists helper routines
270  */
271 static inline struct sock *__sk_head(const struct hlist_head *head)
272 {
273 	return hlist_entry(head->first, struct sock, sk_node);
274 }
275 
276 static inline struct sock *sk_head(const struct hlist_head *head)
277 {
278 	return hlist_empty(head) ? NULL : __sk_head(head);
279 }
280 
281 static inline struct sock *sk_next(const struct sock *sk)
282 {
283 	return sk->sk_node.next ?
284 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
285 }
286 
287 static inline int sk_unhashed(const struct sock *sk)
288 {
289 	return hlist_unhashed(&sk->sk_node);
290 }
291 
292 static inline int sk_hashed(const struct sock *sk)
293 {
294 	return !sk_unhashed(sk);
295 }
296 
297 static __inline__ void sk_node_init(struct hlist_node *node)
298 {
299 	node->pprev = NULL;
300 }
301 
302 static __inline__ void __sk_del_node(struct sock *sk)
303 {
304 	__hlist_del(&sk->sk_node);
305 }
306 
307 static __inline__ int __sk_del_node_init(struct sock *sk)
308 {
309 	if (sk_hashed(sk)) {
310 		__sk_del_node(sk);
311 		sk_node_init(&sk->sk_node);
312 		return 1;
313 	}
314 	return 0;
315 }
316 
317 /* Grab socket reference count. This operation is valid only
318    when sk is ALREADY grabbed f.e. it is found in hash table
319    or a list and the lookup is made under lock preventing hash table
320    modifications.
321  */
322 
323 static inline void sock_hold(struct sock *sk)
324 {
325 	atomic_inc(&sk->sk_refcnt);
326 }
327 
328 /* Ungrab socket in the context, which assumes that socket refcnt
329    cannot hit zero, f.e. it is true in context of any socketcall.
330  */
331 static inline void __sock_put(struct sock *sk)
332 {
333 	atomic_dec(&sk->sk_refcnt);
334 }
335 
336 static __inline__ int sk_del_node_init(struct sock *sk)
337 {
338 	int rc = __sk_del_node_init(sk);
339 
340 	if (rc) {
341 		/* paranoid for a while -acme */
342 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
343 		__sock_put(sk);
344 	}
345 	return rc;
346 }
347 
348 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
349 {
350 	hlist_add_head(&sk->sk_node, list);
351 }
352 
353 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
354 {
355 	sock_hold(sk);
356 	__sk_add_node(sk, list);
357 }
358 
359 static __inline__ void __sk_del_bind_node(struct sock *sk)
360 {
361 	__hlist_del(&sk->sk_bind_node);
362 }
363 
364 static __inline__ void sk_add_bind_node(struct sock *sk,
365 					struct hlist_head *list)
366 {
367 	hlist_add_head(&sk->sk_bind_node, list);
368 }
369 
370 #define sk_for_each(__sk, node, list) \
371 	hlist_for_each_entry(__sk, node, list, sk_node)
372 #define sk_for_each_from(__sk, node) \
373 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
374 		hlist_for_each_entry_from(__sk, node, sk_node)
375 #define sk_for_each_continue(__sk, node) \
376 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
377 		hlist_for_each_entry_continue(__sk, node, sk_node)
378 #define sk_for_each_safe(__sk, node, tmp, list) \
379 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
380 #define sk_for_each_bound(__sk, node, list) \
381 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
382 
383 /* Sock flags */
384 enum sock_flags {
385 	SOCK_DEAD,
386 	SOCK_DONE,
387 	SOCK_URGINLINE,
388 	SOCK_KEEPOPEN,
389 	SOCK_LINGER,
390 	SOCK_DESTROY,
391 	SOCK_BROADCAST,
392 	SOCK_TIMESTAMP,
393 	SOCK_ZAPPED,
394 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
395 	SOCK_DBG, /* %SO_DEBUG setting */
396 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
397 	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
398 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
399 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
400 };
401 
402 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
403 {
404 	nsk->sk_flags = osk->sk_flags;
405 }
406 
407 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
408 {
409 	__set_bit(flag, &sk->sk_flags);
410 }
411 
412 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
413 {
414 	__clear_bit(flag, &sk->sk_flags);
415 }
416 
417 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
418 {
419 	return test_bit(flag, &sk->sk_flags);
420 }
421 
422 static inline void sk_acceptq_removed(struct sock *sk)
423 {
424 	sk->sk_ack_backlog--;
425 }
426 
427 static inline void sk_acceptq_added(struct sock *sk)
428 {
429 	sk->sk_ack_backlog++;
430 }
431 
432 static inline int sk_acceptq_is_full(struct sock *sk)
433 {
434 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
435 }
436 
437 /*
438  * Compute minimal free write space needed to queue new packets.
439  */
440 static inline int sk_stream_min_wspace(struct sock *sk)
441 {
442 	return sk->sk_wmem_queued / 2;
443 }
444 
445 static inline int sk_stream_wspace(struct sock *sk)
446 {
447 	return sk->sk_sndbuf - sk->sk_wmem_queued;
448 }
449 
450 extern void sk_stream_write_space(struct sock *sk);
451 
452 static inline int sk_stream_memory_free(struct sock *sk)
453 {
454 	return sk->sk_wmem_queued < sk->sk_sndbuf;
455 }
456 
457 extern void sk_stream_rfree(struct sk_buff *skb);
458 
459 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
460 {
461 	skb->sk = sk;
462 	skb->destructor = sk_stream_rfree;
463 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
464 	sk->sk_forward_alloc -= skb->truesize;
465 }
466 
467 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
468 {
469 	skb_truesize_check(skb);
470 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
471 	sk->sk_wmem_queued   -= skb->truesize;
472 	sk->sk_forward_alloc += skb->truesize;
473 	__kfree_skb(skb);
474 }
475 
476 /* The per-socket spinlock must be held here. */
477 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
478 {
479 	if (!sk->sk_backlog.tail) {
480 		sk->sk_backlog.head = sk->sk_backlog.tail = skb;
481 	} else {
482 		sk->sk_backlog.tail->next = skb;
483 		sk->sk_backlog.tail = skb;
484 	}
485 	skb->next = NULL;
486 }
487 
488 #define sk_wait_event(__sk, __timeo, __condition)			\
489 	({	int __rc;						\
490 		release_sock(__sk);					\
491 		__rc = __condition;					\
492 		if (!__rc) {						\
493 			*(__timeo) = schedule_timeout(*(__timeo));	\
494 		}							\
495 		lock_sock(__sk);					\
496 		__rc = __condition;					\
497 		__rc;							\
498 	})
499 
500 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
501 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
502 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
503 extern int sk_stream_error(struct sock *sk, int flags, int err);
504 extern void sk_stream_kill_queues(struct sock *sk);
505 
506 extern int sk_wait_data(struct sock *sk, long *timeo);
507 
508 struct request_sock_ops;
509 struct timewait_sock_ops;
510 
511 /* Networking protocol blocks we attach to sockets.
512  * socket layer -> transport layer interface
513  * transport -> network interface is defined by struct inet_proto
514  */
515 struct proto {
516 	void			(*close)(struct sock *sk,
517 					long timeout);
518 	int			(*connect)(struct sock *sk,
519 				        struct sockaddr *uaddr,
520 					int addr_len);
521 	int			(*disconnect)(struct sock *sk, int flags);
522 
523 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
524 
525 	int			(*ioctl)(struct sock *sk, int cmd,
526 					 unsigned long arg);
527 	int			(*init)(struct sock *sk);
528 	int			(*destroy)(struct sock *sk);
529 	void			(*shutdown)(struct sock *sk, int how);
530 	int			(*setsockopt)(struct sock *sk, int level,
531 					int optname, char __user *optval,
532 					int optlen);
533 	int			(*getsockopt)(struct sock *sk, int level,
534 					int optname, char __user *optval,
535 					int __user *option);
536 	int			(*compat_setsockopt)(struct sock *sk,
537 					int level,
538 					int optname, char __user *optval,
539 					int optlen);
540 	int			(*compat_getsockopt)(struct sock *sk,
541 					int level,
542 					int optname, char __user *optval,
543 					int __user *option);
544 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
545 					   struct msghdr *msg, size_t len);
546 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
547 					   struct msghdr *msg,
548 					size_t len, int noblock, int flags,
549 					int *addr_len);
550 	int			(*sendpage)(struct sock *sk, struct page *page,
551 					int offset, size_t size, int flags);
552 	int			(*bind)(struct sock *sk,
553 					struct sockaddr *uaddr, int addr_len);
554 
555 	int			(*backlog_rcv) (struct sock *sk,
556 						struct sk_buff *skb);
557 
558 	/* Keeping track of sk's, looking them up, and port selection methods. */
559 	void			(*hash)(struct sock *sk);
560 	void			(*unhash)(struct sock *sk);
561 	int			(*get_port)(struct sock *sk, unsigned short snum);
562 
563 	/* Memory pressure */
564 	void			(*enter_memory_pressure)(void);
565 	atomic_t		*memory_allocated;	/* Current allocated memory. */
566 	atomic_t		*sockets_allocated;	/* Current number of sockets. */
567 	/*
568 	 * Pressure flag: try to collapse.
569 	 * Technical note: it is used by multiple contexts non atomically.
570 	 * All the sk_stream_mem_schedule() is of this nature: accounting
571 	 * is strict, actions are advisory and have some latency.
572 	 */
573 	int			*memory_pressure;
574 	int			*sysctl_mem;
575 	int			*sysctl_wmem;
576 	int			*sysctl_rmem;
577 	int			max_header;
578 
579 	struct kmem_cache		*slab;
580 	unsigned int		obj_size;
581 
582 	atomic_t		*orphan_count;
583 
584 	struct request_sock_ops	*rsk_prot;
585 	struct timewait_sock_ops *twsk_prot;
586 
587 	struct module		*owner;
588 
589 	char			name[32];
590 
591 	struct list_head	node;
592 #ifdef SOCK_REFCNT_DEBUG
593 	atomic_t		socks;
594 #endif
595 	struct {
596 		int inuse;
597 		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
598 	} stats[NR_CPUS];
599 };
600 
601 extern int proto_register(struct proto *prot, int alloc_slab);
602 extern void proto_unregister(struct proto *prot);
603 
604 #ifdef SOCK_REFCNT_DEBUG
605 static inline void sk_refcnt_debug_inc(struct sock *sk)
606 {
607 	atomic_inc(&sk->sk_prot->socks);
608 }
609 
610 static inline void sk_refcnt_debug_dec(struct sock *sk)
611 {
612 	atomic_dec(&sk->sk_prot->socks);
613 	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
614 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
615 }
616 
617 static inline void sk_refcnt_debug_release(const struct sock *sk)
618 {
619 	if (atomic_read(&sk->sk_refcnt) != 1)
620 		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
621 		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
622 }
623 #else /* SOCK_REFCNT_DEBUG */
624 #define sk_refcnt_debug_inc(sk) do { } while (0)
625 #define sk_refcnt_debug_dec(sk) do { } while (0)
626 #define sk_refcnt_debug_release(sk) do { } while (0)
627 #endif /* SOCK_REFCNT_DEBUG */
628 
629 /* Called with local bh disabled */
630 static __inline__ void sock_prot_inc_use(struct proto *prot)
631 {
632 	prot->stats[smp_processor_id()].inuse++;
633 }
634 
635 static __inline__ void sock_prot_dec_use(struct proto *prot)
636 {
637 	prot->stats[smp_processor_id()].inuse--;
638 }
639 
640 /* With per-bucket locks this operation is not-atomic, so that
641  * this version is not worse.
642  */
643 static inline void __sk_prot_rehash(struct sock *sk)
644 {
645 	sk->sk_prot->unhash(sk);
646 	sk->sk_prot->hash(sk);
647 }
648 
649 /* About 10 seconds */
650 #define SOCK_DESTROY_TIME (10*HZ)
651 
652 /* Sockets 0-1023 can't be bound to unless you are superuser */
653 #define PROT_SOCK	1024
654 
655 #define SHUTDOWN_MASK	3
656 #define RCV_SHUTDOWN	1
657 #define SEND_SHUTDOWN	2
658 
659 #define SOCK_SNDBUF_LOCK	1
660 #define SOCK_RCVBUF_LOCK	2
661 #define SOCK_BINDADDR_LOCK	4
662 #define SOCK_BINDPORT_LOCK	8
663 
664 /* sock_iocb: used to kick off async processing of socket ios */
665 struct sock_iocb {
666 	struct list_head	list;
667 
668 	int			flags;
669 	int			size;
670 	struct socket		*sock;
671 	struct sock		*sk;
672 	struct scm_cookie	*scm;
673 	struct msghdr		*msg, async_msg;
674 	struct kiocb		*kiocb;
675 };
676 
677 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
678 {
679 	return (struct sock_iocb *)iocb->private;
680 }
681 
682 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
683 {
684 	return si->kiocb;
685 }
686 
687 struct socket_alloc {
688 	struct socket socket;
689 	struct inode vfs_inode;
690 };
691 
692 static inline struct socket *SOCKET_I(struct inode *inode)
693 {
694 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
695 }
696 
697 static inline struct inode *SOCK_INODE(struct socket *socket)
698 {
699 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
700 }
701 
702 extern void __sk_stream_mem_reclaim(struct sock *sk);
703 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
704 
705 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
706 
707 static inline int sk_stream_pages(int amt)
708 {
709 	return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM);
710 }
711 
712 static inline void sk_stream_mem_reclaim(struct sock *sk)
713 {
714 	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
715 		__sk_stream_mem_reclaim(sk);
716 }
717 
718 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
719 {
720 	return (int)skb->truesize <= sk->sk_forward_alloc ||
721 		sk_stream_mem_schedule(sk, skb->truesize, 1);
722 }
723 
724 static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
725 {
726 	return size <= sk->sk_forward_alloc ||
727 	       sk_stream_mem_schedule(sk, size, 0);
728 }
729 
730 /* Used by processes to "lock" a socket state, so that
731  * interrupts and bottom half handlers won't change it
732  * from under us. It essentially blocks any incoming
733  * packets, so that we won't get any new data or any
734  * packets that change the state of the socket.
735  *
736  * While locked, BH processing will add new packets to
737  * the backlog queue.  This queue is processed by the
738  * owner of the socket lock right before it is released.
739  *
740  * Since ~2.3.5 it is also exclusive sleep lock serializing
741  * accesses from user process context.
742  */
743 #define sock_owned_by_user(sk)	((sk)->sk_lock.owned)
744 
745 /*
746  * Macro so as to not evaluate some arguments when
747  * lockdep is not enabled.
748  *
749  * Mark both the sk_lock and the sk_lock.slock as a
750  * per-address-family lock class.
751  */
752 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
753 do {									\
754 	sk->sk_lock.owned = 0;					\
755 	init_waitqueue_head(&sk->sk_lock.wq);				\
756 	spin_lock_init(&(sk)->sk_lock.slock);				\
757 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
758 			sizeof((sk)->sk_lock));				\
759 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
760 		       	(skey), (sname));				\
761 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
762 } while (0)
763 
764 extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
765 
766 static inline void lock_sock(struct sock *sk)
767 {
768 	lock_sock_nested(sk, 0);
769 }
770 
771 extern void FASTCALL(release_sock(struct sock *sk));
772 
773 /* BH context may only use the following locking interface. */
774 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
775 #define bh_lock_sock_nested(__sk) \
776 				spin_lock_nested(&((__sk)->sk_lock.slock), \
777 				SINGLE_DEPTH_NESTING)
778 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
779 
780 extern struct sock		*sk_alloc(struct net *net, int family,
781 					  gfp_t priority,
782 					  struct proto *prot);
783 extern void			sk_free(struct sock *sk);
784 extern struct sock		*sk_clone(const struct sock *sk,
785 					  const gfp_t priority);
786 
787 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
788 					      unsigned long size, int force,
789 					      gfp_t priority);
790 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
791 					      unsigned long size, int force,
792 					      gfp_t priority);
793 extern void			sock_wfree(struct sk_buff *skb);
794 extern void			sock_rfree(struct sk_buff *skb);
795 
796 extern int			sock_setsockopt(struct socket *sock, int level,
797 						int op, char __user *optval,
798 						int optlen);
799 
800 extern int			sock_getsockopt(struct socket *sock, int level,
801 						int op, char __user *optval,
802 						int __user *optlen);
803 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
804 						     unsigned long size,
805 						     int noblock,
806 						     int *errcode);
807 extern void *sock_kmalloc(struct sock *sk, int size,
808 			  gfp_t priority);
809 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
810 extern void sk_send_sigurg(struct sock *sk);
811 
812 /*
813  * Functions to fill in entries in struct proto_ops when a protocol
814  * does not implement a particular function.
815  */
816 extern int                      sock_no_bind(struct socket *,
817 					     struct sockaddr *, int);
818 extern int                      sock_no_connect(struct socket *,
819 						struct sockaddr *, int, int);
820 extern int                      sock_no_socketpair(struct socket *,
821 						   struct socket *);
822 extern int                      sock_no_accept(struct socket *,
823 					       struct socket *, int);
824 extern int                      sock_no_getname(struct socket *,
825 						struct sockaddr *, int *, int);
826 extern unsigned int             sock_no_poll(struct file *, struct socket *,
827 					     struct poll_table_struct *);
828 extern int                      sock_no_ioctl(struct socket *, unsigned int,
829 					      unsigned long);
830 extern int			sock_no_listen(struct socket *, int);
831 extern int                      sock_no_shutdown(struct socket *, int);
832 extern int			sock_no_getsockopt(struct socket *, int , int,
833 						   char __user *, int __user *);
834 extern int			sock_no_setsockopt(struct socket *, int, int,
835 						   char __user *, int);
836 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
837 						struct msghdr *, size_t);
838 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
839 						struct msghdr *, size_t, int);
840 extern int			sock_no_mmap(struct file *file,
841 					     struct socket *sock,
842 					     struct vm_area_struct *vma);
843 extern ssize_t			sock_no_sendpage(struct socket *sock,
844 						struct page *page,
845 						int offset, size_t size,
846 						int flags);
847 
848 /*
849  * Functions to fill in entries in struct proto_ops when a protocol
850  * uses the inet style.
851  */
852 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
853 				  char __user *optval, int __user *optlen);
854 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
855 			       struct msghdr *msg, size_t size, int flags);
856 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
857 				  char __user *optval, int optlen);
858 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
859 		int optname, char __user *optval, int __user *optlen);
860 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
861 		int optname, char __user *optval, int optlen);
862 
863 extern void sk_common_release(struct sock *sk);
864 
865 /*
866  *	Default socket callbacks and setup code
867  */
868 
869 /* Initialise core socket variables */
870 extern void sock_init_data(struct socket *sock, struct sock *sk);
871 
872 /**
873  *	sk_filter - run a packet through a socket filter
874  *	@sk: sock associated with &sk_buff
875  *	@skb: buffer to filter
876  *	@needlock: set to 1 if the sock is not locked by caller.
877  *
878  * Run the filter code and then cut skb->data to correct size returned by
879  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
880  * than pkt_len we keep whole skb->data. This is the socket level
881  * wrapper to sk_run_filter. It returns 0 if the packet should
882  * be accepted or -EPERM if the packet should be tossed.
883  *
884  */
885 
886 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
887 {
888 	int err;
889 	struct sk_filter *filter;
890 
891 	err = security_sock_rcv_skb(sk, skb);
892 	if (err)
893 		return err;
894 
895 	rcu_read_lock_bh();
896 	filter = sk->sk_filter;
897 	if (filter) {
898 		unsigned int pkt_len = sk_run_filter(skb, filter->insns,
899 				filter->len);
900 		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
901 	}
902  	rcu_read_unlock_bh();
903 
904 	return err;
905 }
906 
907 /**
908  *	sk_filter_release: Release a socket filter
909  *	@sk: socket
910  *	@fp: filter to remove
911  *
912  *	Remove a filter from a socket and release its resources.
913  */
914 
915 static inline void sk_filter_release(struct sk_filter *fp)
916 {
917 	if (atomic_dec_and_test(&fp->refcnt))
918 		kfree(fp);
919 }
920 
921 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
922 {
923 	unsigned int size = sk_filter_len(fp);
924 
925 	atomic_sub(size, &sk->sk_omem_alloc);
926 	sk_filter_release(fp);
927 }
928 
929 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
930 {
931 	atomic_inc(&fp->refcnt);
932 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
933 }
934 
935 /*
936  * Socket reference counting postulates.
937  *
938  * * Each user of socket SHOULD hold a reference count.
939  * * Each access point to socket (an hash table bucket, reference from a list,
940  *   running timer, skb in flight MUST hold a reference count.
941  * * When reference count hits 0, it means it will never increase back.
942  * * When reference count hits 0, it means that no references from
943  *   outside exist to this socket and current process on current CPU
944  *   is last user and may/should destroy this socket.
945  * * sk_free is called from any context: process, BH, IRQ. When
946  *   it is called, socket has no references from outside -> sk_free
947  *   may release descendant resources allocated by the socket, but
948  *   to the time when it is called, socket is NOT referenced by any
949  *   hash tables, lists etc.
950  * * Packets, delivered from outside (from network or from another process)
951  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
952  *   when they sit in queue. Otherwise, packets will leak to hole, when
953  *   socket is looked up by one cpu and unhasing is made by another CPU.
954  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
955  *   (leak to backlog). Packet socket does all the processing inside
956  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
957  *   use separate SMP lock, so that they are prone too.
958  */
959 
960 /* Ungrab socket and destroy it, if it was the last reference. */
961 static inline void sock_put(struct sock *sk)
962 {
963 	if (atomic_dec_and_test(&sk->sk_refcnt))
964 		sk_free(sk);
965 }
966 
967 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
968 			  const int nested);
969 
970 /* Detach socket from process context.
971  * Announce socket dead, detach it from wait queue and inode.
972  * Note that parent inode held reference count on this struct sock,
973  * we do not release it in this function, because protocol
974  * probably wants some additional cleanups or even continuing
975  * to work with this socket (TCP).
976  */
977 static inline void sock_orphan(struct sock *sk)
978 {
979 	write_lock_bh(&sk->sk_callback_lock);
980 	sock_set_flag(sk, SOCK_DEAD);
981 	sk->sk_socket = NULL;
982 	sk->sk_sleep  = NULL;
983 	write_unlock_bh(&sk->sk_callback_lock);
984 }
985 
986 static inline void sock_graft(struct sock *sk, struct socket *parent)
987 {
988 	write_lock_bh(&sk->sk_callback_lock);
989 	sk->sk_sleep = &parent->wait;
990 	parent->sk = sk;
991 	sk->sk_socket = parent;
992 	security_sock_graft(sk, parent);
993 	write_unlock_bh(&sk->sk_callback_lock);
994 }
995 
996 extern int sock_i_uid(struct sock *sk);
997 extern unsigned long sock_i_ino(struct sock *sk);
998 
999 static inline struct dst_entry *
1000 __sk_dst_get(struct sock *sk)
1001 {
1002 	return sk->sk_dst_cache;
1003 }
1004 
1005 static inline struct dst_entry *
1006 sk_dst_get(struct sock *sk)
1007 {
1008 	struct dst_entry *dst;
1009 
1010 	read_lock(&sk->sk_dst_lock);
1011 	dst = sk->sk_dst_cache;
1012 	if (dst)
1013 		dst_hold(dst);
1014 	read_unlock(&sk->sk_dst_lock);
1015 	return dst;
1016 }
1017 
1018 static inline void
1019 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1020 {
1021 	struct dst_entry *old_dst;
1022 
1023 	old_dst = sk->sk_dst_cache;
1024 	sk->sk_dst_cache = dst;
1025 	dst_release(old_dst);
1026 }
1027 
1028 static inline void
1029 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1030 {
1031 	write_lock(&sk->sk_dst_lock);
1032 	__sk_dst_set(sk, dst);
1033 	write_unlock(&sk->sk_dst_lock);
1034 }
1035 
1036 static inline void
1037 __sk_dst_reset(struct sock *sk)
1038 {
1039 	struct dst_entry *old_dst;
1040 
1041 	old_dst = sk->sk_dst_cache;
1042 	sk->sk_dst_cache = NULL;
1043 	dst_release(old_dst);
1044 }
1045 
1046 static inline void
1047 sk_dst_reset(struct sock *sk)
1048 {
1049 	write_lock(&sk->sk_dst_lock);
1050 	__sk_dst_reset(sk);
1051 	write_unlock(&sk->sk_dst_lock);
1052 }
1053 
1054 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1055 
1056 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1057 
1058 static inline int sk_can_gso(const struct sock *sk)
1059 {
1060 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1061 }
1062 
1063 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1064 
1065 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1066 {
1067 	sk->sk_wmem_queued   += skb->truesize;
1068 	sk->sk_forward_alloc -= skb->truesize;
1069 }
1070 
1071 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1072 				   struct sk_buff *skb, struct page *page,
1073 				   int off, int copy)
1074 {
1075 	if (skb->ip_summed == CHECKSUM_NONE) {
1076 		int err = 0;
1077 		__wsum csum = csum_and_copy_from_user(from,
1078 						     page_address(page) + off,
1079 							    copy, 0, &err);
1080 		if (err)
1081 			return err;
1082 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
1083 	} else if (copy_from_user(page_address(page) + off, from, copy))
1084 		return -EFAULT;
1085 
1086 	skb->len	     += copy;
1087 	skb->data_len	     += copy;
1088 	skb->truesize	     += copy;
1089 	sk->sk_wmem_queued   += copy;
1090 	sk->sk_forward_alloc -= copy;
1091 	return 0;
1092 }
1093 
1094 /*
1095  * 	Queue a received datagram if it will fit. Stream and sequenced
1096  *	protocols can't normally use this as they need to fit buffers in
1097  *	and play with them.
1098  *
1099  * 	Inlined as it's very short and called for pretty much every
1100  *	packet ever received.
1101  */
1102 
1103 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1104 {
1105 	sock_hold(sk);
1106 	skb->sk = sk;
1107 	skb->destructor = sock_wfree;
1108 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1109 }
1110 
1111 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1112 {
1113 	skb->sk = sk;
1114 	skb->destructor = sock_rfree;
1115 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1116 }
1117 
1118 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1119 			   unsigned long expires);
1120 
1121 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1122 
1123 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1124 
1125 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1126 {
1127 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1128 	   number of warnings when compiling with -W --ANK
1129 	 */
1130 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1131 	    (unsigned)sk->sk_rcvbuf)
1132 		return -ENOMEM;
1133 	skb_set_owner_r(skb, sk);
1134 	skb_queue_tail(&sk->sk_error_queue, skb);
1135 	if (!sock_flag(sk, SOCK_DEAD))
1136 		sk->sk_data_ready(sk, skb->len);
1137 	return 0;
1138 }
1139 
1140 /*
1141  *	Recover an error report and clear atomically
1142  */
1143 
1144 static inline int sock_error(struct sock *sk)
1145 {
1146 	int err;
1147 	if (likely(!sk->sk_err))
1148 		return 0;
1149 	err = xchg(&sk->sk_err, 0);
1150 	return -err;
1151 }
1152 
1153 static inline unsigned long sock_wspace(struct sock *sk)
1154 {
1155 	int amt = 0;
1156 
1157 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1158 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1159 		if (amt < 0)
1160 			amt = 0;
1161 	}
1162 	return amt;
1163 }
1164 
1165 static inline void sk_wake_async(struct sock *sk, int how, int band)
1166 {
1167 	if (sk->sk_socket && sk->sk_socket->fasync_list)
1168 		sock_wake_async(sk->sk_socket, how, band);
1169 }
1170 
1171 #define SOCK_MIN_SNDBUF 2048
1172 #define SOCK_MIN_RCVBUF 256
1173 
1174 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1175 {
1176 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1177 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1178 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1179 	}
1180 }
1181 
1182 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1183 						   int size, int mem,
1184 						   gfp_t gfp)
1185 {
1186 	struct sk_buff *skb;
1187 	int hdr_len;
1188 
1189 	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
1190 	skb = alloc_skb_fclone(size + hdr_len, gfp);
1191 	if (skb) {
1192 		skb->truesize += mem;
1193 		if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1194 			skb_reserve(skb, hdr_len);
1195 			return skb;
1196 		}
1197 		__kfree_skb(skb);
1198 	} else {
1199 		sk->sk_prot->enter_memory_pressure();
1200 		sk_stream_moderate_sndbuf(sk);
1201 	}
1202 	return NULL;
1203 }
1204 
1205 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1206 						  int size,
1207 						  gfp_t gfp)
1208 {
1209 	return sk_stream_alloc_pskb(sk, size, 0, gfp);
1210 }
1211 
1212 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1213 {
1214 	struct page *page = NULL;
1215 
1216 	page = alloc_pages(sk->sk_allocation, 0);
1217 	if (!page) {
1218 		sk->sk_prot->enter_memory_pressure();
1219 		sk_stream_moderate_sndbuf(sk);
1220 	}
1221 	return page;
1222 }
1223 
1224 /*
1225  *	Default write policy as shown to user space via poll/select/SIGIO
1226  */
1227 static inline int sock_writeable(const struct sock *sk)
1228 {
1229 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1230 }
1231 
1232 static inline gfp_t gfp_any(void)
1233 {
1234 	return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
1235 }
1236 
1237 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1238 {
1239 	return noblock ? 0 : sk->sk_rcvtimeo;
1240 }
1241 
1242 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1243 {
1244 	return noblock ? 0 : sk->sk_sndtimeo;
1245 }
1246 
1247 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1248 {
1249 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1250 }
1251 
1252 /* Alas, with timeout socket operations are not restartable.
1253  * Compare this to poll().
1254  */
1255 static inline int sock_intr_errno(long timeo)
1256 {
1257 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1258 }
1259 
1260 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1261 	struct sk_buff *skb);
1262 
1263 static __inline__ void
1264 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1265 {
1266 	ktime_t kt = skb->tstamp;
1267 
1268 	if (sock_flag(sk, SOCK_RCVTSTAMP))
1269 		__sock_recv_timestamp(msg, sk, skb);
1270 	else
1271 		sk->sk_stamp = kt;
1272 }
1273 
1274 /**
1275  * sk_eat_skb - Release a skb if it is no longer needed
1276  * @sk: socket to eat this skb from
1277  * @skb: socket buffer to eat
1278  * @copied_early: flag indicating whether DMA operations copied this data early
1279  *
1280  * This routine must be called with interrupts disabled or with the socket
1281  * locked so that the sk_buff queue operation is ok.
1282 */
1283 #ifdef CONFIG_NET_DMA
1284 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1285 {
1286 	__skb_unlink(skb, &sk->sk_receive_queue);
1287 	if (!copied_early)
1288 		__kfree_skb(skb);
1289 	else
1290 		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
1291 }
1292 #else
1293 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1294 {
1295 	__skb_unlink(skb, &sk->sk_receive_queue);
1296 	__kfree_skb(skb);
1297 }
1298 #endif
1299 
1300 extern void sock_enable_timestamp(struct sock *sk);
1301 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1302 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1303 
1304 /*
1305  *	Enable debug/info messages
1306  */
1307 extern int net_msg_warn;
1308 #define NETDEBUG(fmt, args...) \
1309 	do { if (net_msg_warn) printk(fmt,##args); } while (0)
1310 
1311 #define LIMIT_NETDEBUG(fmt, args...) \
1312 	do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1313 
1314 /*
1315  * Macros for sleeping on a socket. Use them like this:
1316  *
1317  * SOCK_SLEEP_PRE(sk)
1318  * if (condition)
1319  * 	schedule();
1320  * SOCK_SLEEP_POST(sk)
1321  *
1322  * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1323  * and when the last use of them in DECnet has gone, I'm intending to
1324  * remove them.
1325  */
1326 
1327 #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1328 				DECLARE_WAITQUEUE(wait, tsk); \
1329 				tsk->state = TASK_INTERRUPTIBLE; \
1330 				add_wait_queue((sk)->sk_sleep, &wait); \
1331 				release_sock(sk);
1332 
1333 #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1334 				remove_wait_queue((sk)->sk_sleep, &wait); \
1335 				lock_sock(sk); \
1336 				}
1337 
1338 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1339 {
1340 	if (valbool)
1341 		sock_set_flag(sk, bit);
1342 	else
1343 		sock_reset_flag(sk, bit);
1344 }
1345 
1346 extern __u32 sysctl_wmem_max;
1347 extern __u32 sysctl_rmem_max;
1348 
1349 extern void sk_init(void);
1350 
1351 #ifdef CONFIG_SYSCTL
1352 extern struct ctl_table core_table[];
1353 #endif
1354 
1355 extern int sysctl_optmem_max;
1356 
1357 extern __u32 sysctl_wmem_default;
1358 extern __u32 sysctl_rmem_default;
1359 
1360 #endif	/* _SOCK_H */
1361