xref: /linux/include/net/sock.h (revision 776cfebb430c7b22c208b1b17add97f354d97cab)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the AF_INET socket handler.
7  *
8  * Version:	@(#)sock.h	1.0.4	05/13/93
9  *
10  * Authors:	Ross Biro, <bir7@leland.Stanford.Edu>
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche <flla@stud.uni-sb.de>
14  *
15  * Fixes:
16  *		Alan Cox	:	Volatiles in skbuff pointers. See
17  *					skbuff comments. May be overdone,
18  *					better to prove they can be removed
19  *					than the reverse.
20  *		Alan Cox	:	Added a zapped field for tcp to note
21  *					a socket is reset and must stay shut up
22  *		Alan Cox	:	New fields for options
23  *	Pauline Middelink	:	identd support
24  *		Alan Cox	:	Eliminate low level recv/recvfrom
25  *		David S. Miller	:	New socket lookup architecture.
26  *              Steve Whitehouse:       Default routines for sock_ops
27  *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
28  *              			protinfo be just a void pointer, as the
29  *              			protocol specific parts were moved to
30  *              			respective headers and ipv4/v6, etc now
31  *              			use private slabcaches for its socks
32  *              Pedro Hortas	:	New flags field for socket options
33  *
34  *
35  *		This program is free software; you can redistribute it and/or
36  *		modify it under the terms of the GNU General Public License
37  *		as published by the Free Software Foundation; either version
38  *		2 of the License, or (at your option) any later version.
39  */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42 
43 #include <linux/config.h>
44 #include <linux/list.h>
45 #include <linux/timer.h>
46 #include <linux/cache.h>
47 #include <linux/module.h>
48 #include <linux/netdevice.h>
49 #include <linux/skbuff.h>	/* struct sk_buff */
50 #include <linux/security.h>
51 
52 #include <linux/filter.h>
53 
54 #include <asm/atomic.h>
55 #include <net/dst.h>
56 #include <net/checksum.h>
57 
58 /*
59  * This structure really needs to be cleaned up.
60  * Most of it is for TCP, and not used by any of
61  * the other protocols.
62  */
63 
64 /* Define this to get the SOCK_DBG debugging facility. */
65 #define SOCK_DEBUGGING
66 #ifdef SOCK_DEBUGGING
67 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
68 					printk(KERN_DEBUG msg); } while (0)
69 #else
70 #define SOCK_DEBUG(sk, msg...) do { } while (0)
71 #endif
72 
73 /* This is the per-socket lock.  The spinlock provides a synchronization
74  * between user contexts and software interrupt processing, whereas the
75  * mini-semaphore synchronizes multiple users amongst themselves.
76  */
77 struct sock_iocb;
78 typedef struct {
79 	spinlock_t		slock;
80 	struct sock_iocb	*owner;
81 	wait_queue_head_t	wq;
82 } socket_lock_t;
83 
84 #define sock_lock_init(__sk) \
85 do {	spin_lock_init(&((__sk)->sk_lock.slock)); \
86 	(__sk)->sk_lock.owner = NULL; \
87 	init_waitqueue_head(&((__sk)->sk_lock.wq)); \
88 } while(0)
89 
90 struct sock;
91 
92 /**
93  *	struct sock_common - minimal network layer representation of sockets
94  *	@skc_family: network address family
95  *	@skc_state: Connection state
96  *	@skc_reuse: %SO_REUSEADDR setting
97  *	@skc_bound_dev_if: bound device index if != 0
98  *	@skc_node: main hash linkage for various protocol lookup tables
99  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
100  *	@skc_refcnt: reference count
101  *
102  *	This is the minimal network layer representation of sockets, the header
103  *	for struct sock and struct tcp_tw_bucket.
104   */
105 struct sock_common {
106 	unsigned short		skc_family;
107 	volatile unsigned char	skc_state;
108 	unsigned char		skc_reuse;
109 	int			skc_bound_dev_if;
110 	struct hlist_node	skc_node;
111 	struct hlist_node	skc_bind_node;
112 	atomic_t		skc_refcnt;
113 };
114 
115 /**
116   *	struct sock - network layer representation of sockets
117   *	@__sk_common: shared layout with tcp_tw_bucket
118   *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
119   *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
120   *	@sk_lock:	synchronizer
121   *	@sk_rcvbuf: size of receive buffer in bytes
122   *	@sk_sleep: sock wait queue
123   *	@sk_dst_cache: destination cache
124   *	@sk_dst_lock: destination cache lock
125   *	@sk_policy: flow policy
126   *	@sk_rmem_alloc: receive queue bytes committed
127   *	@sk_receive_queue: incoming packets
128   *	@sk_wmem_alloc: transmit queue bytes committed
129   *	@sk_write_queue: Packet sending queue
130   *	@sk_omem_alloc: "o" is "option" or "other"
131   *	@sk_wmem_queued: persistent queue size
132   *	@sk_forward_alloc: space allocated forward
133   *	@sk_allocation: allocation mode
134   *	@sk_sndbuf: size of send buffer in bytes
135   *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
136   *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
137   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
138   *	@sk_lingertime: %SO_LINGER l_linger setting
139   *	@sk_hashent: hash entry in several tables (e.g. tcp_ehash)
140   *	@sk_backlog: always used with the per-socket spinlock held
141   *	@sk_callback_lock: used with the callbacks in the end of this struct
142   *	@sk_error_queue: rarely used
143   *	@sk_prot: protocol handlers inside a network family
144   *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
145   *	@sk_err: last error
146   *	@sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
147   *	@sk_ack_backlog: current listen backlog
148   *	@sk_max_ack_backlog: listen backlog set in listen()
149   *	@sk_priority: %SO_PRIORITY setting
150   *	@sk_type: socket type (%SOCK_STREAM, etc)
151   *	@sk_protocol: which protocol this socket belongs in this network family
152   *	@sk_peercred: %SO_PEERCRED setting
153   *	@sk_rcvlowat: %SO_RCVLOWAT setting
154   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
155   *	@sk_sndtimeo: %SO_SNDTIMEO setting
156   *	@sk_filter: socket filtering instructions
157   *	@sk_protinfo: private area, net family specific, when not using slab
158   *	@sk_timer: sock cleanup timer
159   *	@sk_stamp: time stamp of last packet received
160   *	@sk_socket: Identd and reporting IO signals
161   *	@sk_user_data: RPC layer private data
162   *	@sk_sndmsg_page: cached page for sendmsg
163   *	@sk_sndmsg_off: cached offset for sendmsg
164   *	@sk_send_head: front of stuff to transmit
165   *	@sk_security: used by security modules
166   *	@sk_write_pending: a write to stream socket waits to start
167   *	@sk_state_change: callback to indicate change in the state of the sock
168   *	@sk_data_ready: callback to indicate there is data to be processed
169   *	@sk_write_space: callback to indicate there is bf sending space available
170   *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
171   *	@sk_backlog_rcv: callback to process the backlog
172   *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
173  */
174 struct sock {
175 	/*
176 	 * Now struct tcp_tw_bucket also uses sock_common, so please just
177 	 * don't add nothing before this first member (__sk_common) --acme
178 	 */
179 	struct sock_common	__sk_common;
180 #define sk_family		__sk_common.skc_family
181 #define sk_state		__sk_common.skc_state
182 #define sk_reuse		__sk_common.skc_reuse
183 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
184 #define sk_node			__sk_common.skc_node
185 #define sk_bind_node		__sk_common.skc_bind_node
186 #define sk_refcnt		__sk_common.skc_refcnt
187 	unsigned char		sk_shutdown : 2,
188 				sk_no_check : 2,
189 				sk_userlocks : 4;
190 	unsigned char		sk_protocol;
191 	unsigned short		sk_type;
192 	int			sk_rcvbuf;
193 	socket_lock_t		sk_lock;
194 	wait_queue_head_t	*sk_sleep;
195 	struct dst_entry	*sk_dst_cache;
196 	struct xfrm_policy	*sk_policy[2];
197 	rwlock_t		sk_dst_lock;
198 	atomic_t		sk_rmem_alloc;
199 	atomic_t		sk_wmem_alloc;
200 	atomic_t		sk_omem_alloc;
201 	struct sk_buff_head	sk_receive_queue;
202 	struct sk_buff_head	sk_write_queue;
203 	int			sk_wmem_queued;
204 	int			sk_forward_alloc;
205 	unsigned int		sk_allocation;
206 	int			sk_sndbuf;
207 	int			sk_route_caps;
208 	int			sk_hashent;
209 	unsigned long 		sk_flags;
210 	unsigned long	        sk_lingertime;
211 	/*
212 	 * The backlog queue is special, it is always used with
213 	 * the per-socket spinlock held and requires low latency
214 	 * access. Therefore we special case it's implementation.
215 	 */
216 	struct {
217 		struct sk_buff *head;
218 		struct sk_buff *tail;
219 	} sk_backlog;
220 	struct sk_buff_head	sk_error_queue;
221 	struct proto		*sk_prot;
222 	struct proto		*sk_prot_creator;
223 	rwlock_t		sk_callback_lock;
224 	int			sk_err,
225 				sk_err_soft;
226 	unsigned short		sk_ack_backlog;
227 	unsigned short		sk_max_ack_backlog;
228 	__u32			sk_priority;
229 	struct ucred		sk_peercred;
230 	int			sk_rcvlowat;
231 	long			sk_rcvtimeo;
232 	long			sk_sndtimeo;
233 	struct sk_filter      	*sk_filter;
234 	void			*sk_protinfo;
235 	struct timer_list	sk_timer;
236 	struct timeval		sk_stamp;
237 	struct socket		*sk_socket;
238 	void			*sk_user_data;
239 	struct page		*sk_sndmsg_page;
240 	struct sk_buff		*sk_send_head;
241 	__u32			sk_sndmsg_off;
242 	int			sk_write_pending;
243 	void			*sk_security;
244 	void			(*sk_state_change)(struct sock *sk);
245 	void			(*sk_data_ready)(struct sock *sk, int bytes);
246 	void			(*sk_write_space)(struct sock *sk);
247 	void			(*sk_error_report)(struct sock *sk);
248   	int			(*sk_backlog_rcv)(struct sock *sk,
249 						  struct sk_buff *skb);
250 	void                    (*sk_destruct)(struct sock *sk);
251 };
252 
253 /*
254  * Hashed lists helper routines
255  */
256 static inline struct sock *__sk_head(struct hlist_head *head)
257 {
258 	return hlist_entry(head->first, struct sock, sk_node);
259 }
260 
261 static inline struct sock *sk_head(struct hlist_head *head)
262 {
263 	return hlist_empty(head) ? NULL : __sk_head(head);
264 }
265 
266 static inline struct sock *sk_next(struct sock *sk)
267 {
268 	return sk->sk_node.next ?
269 		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
270 }
271 
272 static inline int sk_unhashed(struct sock *sk)
273 {
274 	return hlist_unhashed(&sk->sk_node);
275 }
276 
277 static inline int sk_hashed(struct sock *sk)
278 {
279 	return sk->sk_node.pprev != NULL;
280 }
281 
282 static __inline__ void sk_node_init(struct hlist_node *node)
283 {
284 	node->pprev = NULL;
285 }
286 
287 static __inline__ void __sk_del_node(struct sock *sk)
288 {
289 	__hlist_del(&sk->sk_node);
290 }
291 
292 static __inline__ int __sk_del_node_init(struct sock *sk)
293 {
294 	if (sk_hashed(sk)) {
295 		__sk_del_node(sk);
296 		sk_node_init(&sk->sk_node);
297 		return 1;
298 	}
299 	return 0;
300 }
301 
302 /* Grab socket reference count. This operation is valid only
303    when sk is ALREADY grabbed f.e. it is found in hash table
304    or a list and the lookup is made under lock preventing hash table
305    modifications.
306  */
307 
308 static inline void sock_hold(struct sock *sk)
309 {
310 	atomic_inc(&sk->sk_refcnt);
311 }
312 
313 /* Ungrab socket in the context, which assumes that socket refcnt
314    cannot hit zero, f.e. it is true in context of any socketcall.
315  */
316 static inline void __sock_put(struct sock *sk)
317 {
318 	atomic_dec(&sk->sk_refcnt);
319 }
320 
321 static __inline__ int sk_del_node_init(struct sock *sk)
322 {
323 	int rc = __sk_del_node_init(sk);
324 
325 	if (rc) {
326 		/* paranoid for a while -acme */
327 		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
328 		__sock_put(sk);
329 	}
330 	return rc;
331 }
332 
333 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
334 {
335 	hlist_add_head(&sk->sk_node, list);
336 }
337 
338 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
339 {
340 	sock_hold(sk);
341 	__sk_add_node(sk, list);
342 }
343 
344 static __inline__ void __sk_del_bind_node(struct sock *sk)
345 {
346 	__hlist_del(&sk->sk_bind_node);
347 }
348 
349 static __inline__ void sk_add_bind_node(struct sock *sk,
350 					struct hlist_head *list)
351 {
352 	hlist_add_head(&sk->sk_bind_node, list);
353 }
354 
355 #define sk_for_each(__sk, node, list) \
356 	hlist_for_each_entry(__sk, node, list, sk_node)
357 #define sk_for_each_from(__sk, node) \
358 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
359 		hlist_for_each_entry_from(__sk, node, sk_node)
360 #define sk_for_each_continue(__sk, node) \
361 	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
362 		hlist_for_each_entry_continue(__sk, node, sk_node)
363 #define sk_for_each_safe(__sk, node, tmp, list) \
364 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
365 #define sk_for_each_bound(__sk, node, list) \
366 	hlist_for_each_entry(__sk, node, list, sk_bind_node)
367 
368 /* Sock flags */
369 enum sock_flags {
370 	SOCK_DEAD,
371 	SOCK_DONE,
372 	SOCK_URGINLINE,
373 	SOCK_KEEPOPEN,
374 	SOCK_LINGER,
375 	SOCK_DESTROY,
376 	SOCK_BROADCAST,
377 	SOCK_TIMESTAMP,
378 	SOCK_ZAPPED,
379 	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
380 	SOCK_DBG, /* %SO_DEBUG setting */
381 	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
382 	SOCK_NO_LARGESEND, /* whether to sent large segments or not */
383 	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
384 	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
385 };
386 
387 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
388 {
389 	__set_bit(flag, &sk->sk_flags);
390 }
391 
392 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
393 {
394 	__clear_bit(flag, &sk->sk_flags);
395 }
396 
397 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
398 {
399 	return test_bit(flag, &sk->sk_flags);
400 }
401 
402 static inline void sk_acceptq_removed(struct sock *sk)
403 {
404 	sk->sk_ack_backlog--;
405 }
406 
407 static inline void sk_acceptq_added(struct sock *sk)
408 {
409 	sk->sk_ack_backlog++;
410 }
411 
412 static inline int sk_acceptq_is_full(struct sock *sk)
413 {
414 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
415 }
416 
417 /*
418  * Compute minimal free write space needed to queue new packets.
419  */
420 static inline int sk_stream_min_wspace(struct sock *sk)
421 {
422 	return sk->sk_wmem_queued / 2;
423 }
424 
425 static inline int sk_stream_wspace(struct sock *sk)
426 {
427 	return sk->sk_sndbuf - sk->sk_wmem_queued;
428 }
429 
430 extern void sk_stream_write_space(struct sock *sk);
431 
432 static inline int sk_stream_memory_free(struct sock *sk)
433 {
434 	return sk->sk_wmem_queued < sk->sk_sndbuf;
435 }
436 
437 extern void sk_stream_rfree(struct sk_buff *skb);
438 
439 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
440 {
441 	skb->sk = sk;
442 	skb->destructor = sk_stream_rfree;
443 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
444 	sk->sk_forward_alloc -= skb->truesize;
445 }
446 
447 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
448 {
449 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
450 	sk->sk_wmem_queued   -= skb->truesize;
451 	sk->sk_forward_alloc += skb->truesize;
452 	__kfree_skb(skb);
453 }
454 
455 /* The per-socket spinlock must be held here. */
456 #define sk_add_backlog(__sk, __skb)				\
457 do {	if (!(__sk)->sk_backlog.tail) {				\
458 		(__sk)->sk_backlog.head =			\
459 		     (__sk)->sk_backlog.tail = (__skb);		\
460 	} else {						\
461 		((__sk)->sk_backlog.tail)->next = (__skb);	\
462 		(__sk)->sk_backlog.tail = (__skb);		\
463 	}							\
464 	(__skb)->next = NULL;					\
465 } while(0)
466 
467 #define sk_wait_event(__sk, __timeo, __condition)		\
468 ({	int rc;							\
469 	release_sock(__sk);					\
470 	rc = __condition;					\
471 	if (!rc) {						\
472 		*(__timeo) = schedule_timeout(*(__timeo));	\
473 		rc = __condition;				\
474 	}							\
475 	lock_sock(__sk);					\
476 	rc;							\
477 })
478 
479 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
480 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
481 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
482 extern int sk_stream_error(struct sock *sk, int flags, int err);
483 extern void sk_stream_kill_queues(struct sock *sk);
484 
485 extern int sk_wait_data(struct sock *sk, long *timeo);
486 
487 /* Networking protocol blocks we attach to sockets.
488  * socket layer -> transport layer interface
489  * transport -> network interface is defined by struct inet_proto
490  */
491 struct proto {
492 	void			(*close)(struct sock *sk,
493 					long timeout);
494 	int			(*connect)(struct sock *sk,
495 				        struct sockaddr *uaddr,
496 					int addr_len);
497 	int			(*disconnect)(struct sock *sk, int flags);
498 
499 	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
500 
501 	int			(*ioctl)(struct sock *sk, int cmd,
502 					 unsigned long arg);
503 	int			(*init)(struct sock *sk);
504 	int			(*destroy)(struct sock *sk);
505 	void			(*shutdown)(struct sock *sk, int how);
506 	int			(*setsockopt)(struct sock *sk, int level,
507 					int optname, char __user *optval,
508 					int optlen);
509 	int			(*getsockopt)(struct sock *sk, int level,
510 					int optname, char __user *optval,
511 					int __user *option);
512 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
513 					   struct msghdr *msg, size_t len);
514 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
515 					   struct msghdr *msg,
516 					size_t len, int noblock, int flags,
517 					int *addr_len);
518 	int			(*sendpage)(struct sock *sk, struct page *page,
519 					int offset, size_t size, int flags);
520 	int			(*bind)(struct sock *sk,
521 					struct sockaddr *uaddr, int addr_len);
522 
523 	int			(*backlog_rcv) (struct sock *sk,
524 						struct sk_buff *skb);
525 
526 	/* Keeping track of sk's, looking them up, and port selection methods. */
527 	void			(*hash)(struct sock *sk);
528 	void			(*unhash)(struct sock *sk);
529 	int			(*get_port)(struct sock *sk, unsigned short snum);
530 
531 	/* Memory pressure */
532 	void			(*enter_memory_pressure)(void);
533 	atomic_t		*memory_allocated;	/* Current allocated memory. */
534 	atomic_t		*sockets_allocated;	/* Current number of sockets. */
535 	/*
536 	 * Pressure flag: try to collapse.
537 	 * Technical note: it is used by multiple contexts non atomically.
538 	 * All the sk_stream_mem_schedule() is of this nature: accounting
539 	 * is strict, actions are advisory and have some latency.
540 	 */
541 	int			*memory_pressure;
542 	int			*sysctl_mem;
543 	int			*sysctl_wmem;
544 	int			*sysctl_rmem;
545 	int			max_header;
546 
547 	kmem_cache_t		*slab;
548 	unsigned int		obj_size;
549 
550 	struct module		*owner;
551 
552 	char			name[32];
553 
554 	struct list_head	node;
555 
556 	struct {
557 		int inuse;
558 		u8  __pad[SMP_CACHE_BYTES - sizeof(int)];
559 	} stats[NR_CPUS];
560 };
561 
562 extern int proto_register(struct proto *prot, int alloc_slab);
563 extern void proto_unregister(struct proto *prot);
564 
565 /* Called with local bh disabled */
566 static __inline__ void sock_prot_inc_use(struct proto *prot)
567 {
568 	prot->stats[smp_processor_id()].inuse++;
569 }
570 
571 static __inline__ void sock_prot_dec_use(struct proto *prot)
572 {
573 	prot->stats[smp_processor_id()].inuse--;
574 }
575 
576 /* About 10 seconds */
577 #define SOCK_DESTROY_TIME (10*HZ)
578 
579 /* Sockets 0-1023 can't be bound to unless you are superuser */
580 #define PROT_SOCK	1024
581 
582 #define SHUTDOWN_MASK	3
583 #define RCV_SHUTDOWN	1
584 #define SEND_SHUTDOWN	2
585 
586 #define SOCK_SNDBUF_LOCK	1
587 #define SOCK_RCVBUF_LOCK	2
588 #define SOCK_BINDADDR_LOCK	4
589 #define SOCK_BINDPORT_LOCK	8
590 
591 /* sock_iocb: used to kick off async processing of socket ios */
592 struct sock_iocb {
593 	struct list_head	list;
594 
595 	int			flags;
596 	int			size;
597 	struct socket		*sock;
598 	struct sock		*sk;
599 	struct scm_cookie	*scm;
600 	struct msghdr		*msg, async_msg;
601 	struct iovec		async_iov;
602 	struct kiocb		*kiocb;
603 };
604 
605 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
606 {
607 	return (struct sock_iocb *)iocb->private;
608 }
609 
610 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
611 {
612 	return si->kiocb;
613 }
614 
615 struct socket_alloc {
616 	struct socket socket;
617 	struct inode vfs_inode;
618 };
619 
620 static inline struct socket *SOCKET_I(struct inode *inode)
621 {
622 	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
623 }
624 
625 static inline struct inode *SOCK_INODE(struct socket *socket)
626 {
627 	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
628 }
629 
630 extern void __sk_stream_mem_reclaim(struct sock *sk);
631 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
632 
633 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
634 
635 static inline int sk_stream_pages(int amt)
636 {
637 	return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM;
638 }
639 
640 static inline void sk_stream_mem_reclaim(struct sock *sk)
641 {
642 	if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
643 		__sk_stream_mem_reclaim(sk);
644 }
645 
646 static inline void sk_stream_writequeue_purge(struct sock *sk)
647 {
648 	struct sk_buff *skb;
649 
650 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
651 		sk_stream_free_skb(sk, skb);
652 	sk_stream_mem_reclaim(sk);
653 }
654 
655 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
656 {
657 	return (int)skb->truesize <= sk->sk_forward_alloc ||
658 		sk_stream_mem_schedule(sk, skb->truesize, 1);
659 }
660 
661 /* Used by processes to "lock" a socket state, so that
662  * interrupts and bottom half handlers won't change it
663  * from under us. It essentially blocks any incoming
664  * packets, so that we won't get any new data or any
665  * packets that change the state of the socket.
666  *
667  * While locked, BH processing will add new packets to
668  * the backlog queue.  This queue is processed by the
669  * owner of the socket lock right before it is released.
670  *
671  * Since ~2.3.5 it is also exclusive sleep lock serializing
672  * accesses from user process context.
673  */
674 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
675 
676 extern void FASTCALL(lock_sock(struct sock *sk));
677 extern void FASTCALL(release_sock(struct sock *sk));
678 
679 /* BH context may only use the following locking interface. */
680 #define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
681 #define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
682 
683 extern struct sock		*sk_alloc(int family, int priority,
684 					  struct proto *prot, int zero_it);
685 extern void			sk_free(struct sock *sk);
686 
687 extern struct sk_buff		*sock_wmalloc(struct sock *sk,
688 					      unsigned long size, int force,
689 					      int priority);
690 extern struct sk_buff		*sock_rmalloc(struct sock *sk,
691 					      unsigned long size, int force,
692 					      int priority);
693 extern void			sock_wfree(struct sk_buff *skb);
694 extern void			sock_rfree(struct sk_buff *skb);
695 
696 extern int			sock_setsockopt(struct socket *sock, int level,
697 						int op, char __user *optval,
698 						int optlen);
699 
700 extern int			sock_getsockopt(struct socket *sock, int level,
701 						int op, char __user *optval,
702 						int __user *optlen);
703 extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
704 						     unsigned long size,
705 						     int noblock,
706 						     int *errcode);
707 extern void *sock_kmalloc(struct sock *sk, int size, int priority);
708 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
709 extern void sk_send_sigurg(struct sock *sk);
710 
711 /*
712  * Functions to fill in entries in struct proto_ops when a protocol
713  * does not implement a particular function.
714  */
715 extern int                      sock_no_bind(struct socket *,
716 					     struct sockaddr *, int);
717 extern int                      sock_no_connect(struct socket *,
718 						struct sockaddr *, int, int);
719 extern int                      sock_no_socketpair(struct socket *,
720 						   struct socket *);
721 extern int                      sock_no_accept(struct socket *,
722 					       struct socket *, int);
723 extern int                      sock_no_getname(struct socket *,
724 						struct sockaddr *, int *, int);
725 extern unsigned int             sock_no_poll(struct file *, struct socket *,
726 					     struct poll_table_struct *);
727 extern int                      sock_no_ioctl(struct socket *, unsigned int,
728 					      unsigned long);
729 extern int			sock_no_listen(struct socket *, int);
730 extern int                      sock_no_shutdown(struct socket *, int);
731 extern int			sock_no_getsockopt(struct socket *, int , int,
732 						   char __user *, int __user *);
733 extern int			sock_no_setsockopt(struct socket *, int, int,
734 						   char __user *, int);
735 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
736 						struct msghdr *, size_t);
737 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
738 						struct msghdr *, size_t, int);
739 extern int			sock_no_mmap(struct file *file,
740 					     struct socket *sock,
741 					     struct vm_area_struct *vma);
742 extern ssize_t			sock_no_sendpage(struct socket *sock,
743 						struct page *page,
744 						int offset, size_t size,
745 						int flags);
746 
747 /*
748  * Functions to fill in entries in struct proto_ops when a protocol
749  * uses the inet style.
750  */
751 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
752 				  char __user *optval, int __user *optlen);
753 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
754 			       struct msghdr *msg, size_t size, int flags);
755 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
756 				  char __user *optval, int optlen);
757 
758 extern void sk_common_release(struct sock *sk);
759 
760 /*
761  *	Default socket callbacks and setup code
762  */
763 
764 /* Initialise core socket variables */
765 extern void sock_init_data(struct socket *sock, struct sock *sk);
766 
767 /**
768  *	sk_filter - run a packet through a socket filter
769  *	@sk: sock associated with &sk_buff
770  *	@skb: buffer to filter
771  *	@needlock: set to 1 if the sock is not locked by caller.
772  *
773  * Run the filter code and then cut skb->data to correct size returned by
774  * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
775  * than pkt_len we keep whole skb->data. This is the socket level
776  * wrapper to sk_run_filter. It returns 0 if the packet should
777  * be accepted or -EPERM if the packet should be tossed.
778  *
779  */
780 
781 static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
782 {
783 	int err;
784 
785 	err = security_sock_rcv_skb(sk, skb);
786 	if (err)
787 		return err;
788 
789 	if (sk->sk_filter) {
790 		struct sk_filter *filter;
791 
792 		if (needlock)
793 			bh_lock_sock(sk);
794 
795 		filter = sk->sk_filter;
796 		if (filter) {
797 			int pkt_len = sk_run_filter(skb, filter->insns,
798 						    filter->len);
799 			if (!pkt_len)
800 				err = -EPERM;
801 			else
802 				skb_trim(skb, pkt_len);
803 		}
804 
805 		if (needlock)
806 			bh_unlock_sock(sk);
807 	}
808 	return err;
809 }
810 
811 /**
812  *	sk_filter_release: Release a socket filter
813  *	@sk: socket
814  *	@fp: filter to remove
815  *
816  *	Remove a filter from a socket and release its resources.
817  */
818 
819 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
820 {
821 	unsigned int size = sk_filter_len(fp);
822 
823 	atomic_sub(size, &sk->sk_omem_alloc);
824 
825 	if (atomic_dec_and_test(&fp->refcnt))
826 		kfree(fp);
827 }
828 
829 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
830 {
831 	atomic_inc(&fp->refcnt);
832 	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
833 }
834 
835 /*
836  * Socket reference counting postulates.
837  *
838  * * Each user of socket SHOULD hold a reference count.
839  * * Each access point to socket (an hash table bucket, reference from a list,
840  *   running timer, skb in flight MUST hold a reference count.
841  * * When reference count hits 0, it means it will never increase back.
842  * * When reference count hits 0, it means that no references from
843  *   outside exist to this socket and current process on current CPU
844  *   is last user and may/should destroy this socket.
845  * * sk_free is called from any context: process, BH, IRQ. When
846  *   it is called, socket has no references from outside -> sk_free
847  *   may release descendant resources allocated by the socket, but
848  *   to the time when it is called, socket is NOT referenced by any
849  *   hash tables, lists etc.
850  * * Packets, delivered from outside (from network or from another process)
851  *   and enqueued on receive/error queues SHOULD NOT grab reference count,
852  *   when they sit in queue. Otherwise, packets will leak to hole, when
853  *   socket is looked up by one cpu and unhasing is made by another CPU.
854  *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
855  *   (leak to backlog). Packet socket does all the processing inside
856  *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
857  *   use separate SMP lock, so that they are prone too.
858  */
859 
860 /* Ungrab socket and destroy it, if it was the last reference. */
861 static inline void sock_put(struct sock *sk)
862 {
863 	if (atomic_dec_and_test(&sk->sk_refcnt))
864 		sk_free(sk);
865 }
866 
867 /* Detach socket from process context.
868  * Announce socket dead, detach it from wait queue and inode.
869  * Note that parent inode held reference count on this struct sock,
870  * we do not release it in this function, because protocol
871  * probably wants some additional cleanups or even continuing
872  * to work with this socket (TCP).
873  */
874 static inline void sock_orphan(struct sock *sk)
875 {
876 	write_lock_bh(&sk->sk_callback_lock);
877 	sock_set_flag(sk, SOCK_DEAD);
878 	sk->sk_socket = NULL;
879 	sk->sk_sleep  = NULL;
880 	write_unlock_bh(&sk->sk_callback_lock);
881 }
882 
883 static inline void sock_graft(struct sock *sk, struct socket *parent)
884 {
885 	write_lock_bh(&sk->sk_callback_lock);
886 	sk->sk_sleep = &parent->wait;
887 	parent->sk = sk;
888 	sk->sk_socket = parent;
889 	write_unlock_bh(&sk->sk_callback_lock);
890 }
891 
892 extern int sock_i_uid(struct sock *sk);
893 extern unsigned long sock_i_ino(struct sock *sk);
894 
895 static inline struct dst_entry *
896 __sk_dst_get(struct sock *sk)
897 {
898 	return sk->sk_dst_cache;
899 }
900 
901 static inline struct dst_entry *
902 sk_dst_get(struct sock *sk)
903 {
904 	struct dst_entry *dst;
905 
906 	read_lock(&sk->sk_dst_lock);
907 	dst = sk->sk_dst_cache;
908 	if (dst)
909 		dst_hold(dst);
910 	read_unlock(&sk->sk_dst_lock);
911 	return dst;
912 }
913 
914 static inline void
915 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
916 {
917 	struct dst_entry *old_dst;
918 
919 	old_dst = sk->sk_dst_cache;
920 	sk->sk_dst_cache = dst;
921 	dst_release(old_dst);
922 }
923 
924 static inline void
925 sk_dst_set(struct sock *sk, struct dst_entry *dst)
926 {
927 	write_lock(&sk->sk_dst_lock);
928 	__sk_dst_set(sk, dst);
929 	write_unlock(&sk->sk_dst_lock);
930 }
931 
932 static inline void
933 __sk_dst_reset(struct sock *sk)
934 {
935 	struct dst_entry *old_dst;
936 
937 	old_dst = sk->sk_dst_cache;
938 	sk->sk_dst_cache = NULL;
939 	dst_release(old_dst);
940 }
941 
942 static inline void
943 sk_dst_reset(struct sock *sk)
944 {
945 	write_lock(&sk->sk_dst_lock);
946 	__sk_dst_reset(sk);
947 	write_unlock(&sk->sk_dst_lock);
948 }
949 
950 static inline struct dst_entry *
951 __sk_dst_check(struct sock *sk, u32 cookie)
952 {
953 	struct dst_entry *dst = sk->sk_dst_cache;
954 
955 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
956 		sk->sk_dst_cache = NULL;
957 		dst_release(dst);
958 		return NULL;
959 	}
960 
961 	return dst;
962 }
963 
964 static inline struct dst_entry *
965 sk_dst_check(struct sock *sk, u32 cookie)
966 {
967 	struct dst_entry *dst = sk_dst_get(sk);
968 
969 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
970 		sk_dst_reset(sk);
971 		dst_release(dst);
972 		return NULL;
973 	}
974 
975 	return dst;
976 }
977 
978 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
979 {
980 	sk->sk_wmem_queued   += skb->truesize;
981 	sk->sk_forward_alloc -= skb->truesize;
982 }
983 
984 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
985 				   struct sk_buff *skb, struct page *page,
986 				   int off, int copy)
987 {
988 	if (skb->ip_summed == CHECKSUM_NONE) {
989 		int err = 0;
990 		unsigned int csum = csum_and_copy_from_user(from,
991 						     page_address(page) + off,
992 							    copy, 0, &err);
993 		if (err)
994 			return err;
995 		skb->csum = csum_block_add(skb->csum, csum, skb->len);
996 	} else if (copy_from_user(page_address(page) + off, from, copy))
997 		return -EFAULT;
998 
999 	skb->len	     += copy;
1000 	skb->data_len	     += copy;
1001 	skb->truesize	     += copy;
1002 	sk->sk_wmem_queued   += copy;
1003 	sk->sk_forward_alloc -= copy;
1004 	return 0;
1005 }
1006 
1007 /*
1008  * 	Queue a received datagram if it will fit. Stream and sequenced
1009  *	protocols can't normally use this as they need to fit buffers in
1010  *	and play with them.
1011  *
1012  * 	Inlined as it's very short and called for pretty much every
1013  *	packet ever received.
1014  */
1015 
1016 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1017 {
1018 	sock_hold(sk);
1019 	skb->sk = sk;
1020 	skb->destructor = sock_wfree;
1021 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1022 }
1023 
1024 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1025 {
1026 	skb->sk = sk;
1027 	skb->destructor = sock_rfree;
1028 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1029 }
1030 
1031 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1032 			   unsigned long expires);
1033 
1034 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1035 
1036 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1037 {
1038 	int err = 0;
1039 	int skb_len;
1040 
1041 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1042 	   number of warnings when compiling with -W --ANK
1043 	 */
1044 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1045 	    (unsigned)sk->sk_rcvbuf) {
1046 		err = -ENOMEM;
1047 		goto out;
1048 	}
1049 
1050 	/* It would be deadlock, if sock_queue_rcv_skb is used
1051 	   with socket lock! We assume that users of this
1052 	   function are lock free.
1053 	*/
1054 	err = sk_filter(sk, skb, 1);
1055 	if (err)
1056 		goto out;
1057 
1058 	skb->dev = NULL;
1059 	skb_set_owner_r(skb, sk);
1060 
1061 	/* Cache the SKB length before we tack it onto the receive
1062 	 * queue.  Once it is added it no longer belongs to us and
1063 	 * may be freed by other threads of control pulling packets
1064 	 * from the queue.
1065 	 */
1066 	skb_len = skb->len;
1067 
1068 	skb_queue_tail(&sk->sk_receive_queue, skb);
1069 
1070 	if (!sock_flag(sk, SOCK_DEAD))
1071 		sk->sk_data_ready(sk, skb_len);
1072 out:
1073 	return err;
1074 }
1075 
1076 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1077 {
1078 	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1079 	   number of warnings when compiling with -W --ANK
1080 	 */
1081 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1082 	    (unsigned)sk->sk_rcvbuf)
1083 		return -ENOMEM;
1084 	skb_set_owner_r(skb, sk);
1085 	skb_queue_tail(&sk->sk_error_queue, skb);
1086 	if (!sock_flag(sk, SOCK_DEAD))
1087 		sk->sk_data_ready(sk, skb->len);
1088 	return 0;
1089 }
1090 
1091 /*
1092  *	Recover an error report and clear atomically
1093  */
1094 
1095 static inline int sock_error(struct sock *sk)
1096 {
1097 	int err = xchg(&sk->sk_err, 0);
1098 	return -err;
1099 }
1100 
1101 static inline unsigned long sock_wspace(struct sock *sk)
1102 {
1103 	int amt = 0;
1104 
1105 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1106 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1107 		if (amt < 0)
1108 			amt = 0;
1109 	}
1110 	return amt;
1111 }
1112 
1113 static inline void sk_wake_async(struct sock *sk, int how, int band)
1114 {
1115 	if (sk->sk_socket && sk->sk_socket->fasync_list)
1116 		sock_wake_async(sk->sk_socket, how, band);
1117 }
1118 
1119 #define SOCK_MIN_SNDBUF 2048
1120 #define SOCK_MIN_RCVBUF 256
1121 
1122 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1123 {
1124 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1125 		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1126 		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1127 	}
1128 }
1129 
1130 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1131 						   int size, int mem, int gfp)
1132 {
1133 	struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);
1134 
1135 	if (skb) {
1136 		skb->truesize += mem;
1137 		if (sk->sk_forward_alloc >= (int)skb->truesize ||
1138 		    sk_stream_mem_schedule(sk, skb->truesize, 0)) {
1139 			skb_reserve(skb, sk->sk_prot->max_header);
1140 			return skb;
1141 		}
1142 		__kfree_skb(skb);
1143 	} else {
1144 		sk->sk_prot->enter_memory_pressure();
1145 		sk_stream_moderate_sndbuf(sk);
1146 	}
1147 	return NULL;
1148 }
1149 
1150 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1151 						  int size, int gfp)
1152 {
1153 	return sk_stream_alloc_pskb(sk, size, 0, gfp);
1154 }
1155 
1156 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1157 {
1158 	struct page *page = NULL;
1159 
1160 	if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
1161 	    sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
1162 		page = alloc_pages(sk->sk_allocation, 0);
1163 	else {
1164 		sk->sk_prot->enter_memory_pressure();
1165 		sk_stream_moderate_sndbuf(sk);
1166 	}
1167 	return page;
1168 }
1169 
1170 #define sk_stream_for_retrans_queue(skb, sk)				\
1171 		for (skb = (sk)->sk_write_queue.next;			\
1172 		     (skb != (sk)->sk_send_head) &&			\
1173 		     (skb != (struct sk_buff *)&(sk)->sk_write_queue);	\
1174 		     skb = skb->next)
1175 
1176 /*
1177  *	Default write policy as shown to user space via poll/select/SIGIO
1178  */
1179 static inline int sock_writeable(const struct sock *sk)
1180 {
1181 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1182 }
1183 
1184 static inline int gfp_any(void)
1185 {
1186 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1187 }
1188 
1189 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1190 {
1191 	return noblock ? 0 : sk->sk_rcvtimeo;
1192 }
1193 
1194 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1195 {
1196 	return noblock ? 0 : sk->sk_sndtimeo;
1197 }
1198 
1199 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1200 {
1201 	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1202 }
1203 
1204 /* Alas, with timeout socket operations are not restartable.
1205  * Compare this to poll().
1206  */
1207 static inline int sock_intr_errno(long timeo)
1208 {
1209 	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1210 }
1211 
1212 static __inline__ void
1213 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1214 {
1215 	struct timeval *stamp = &skb->stamp;
1216 	if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1217 		/* Race occurred between timestamp enabling and packet
1218 		   receiving.  Fill in the current time for now. */
1219 		if (stamp->tv_sec == 0)
1220 			do_gettimeofday(stamp);
1221 		put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
1222 			 stamp);
1223 	} else
1224 		sk->sk_stamp = *stamp;
1225 }
1226 
1227 /**
1228  * sk_eat_skb - Release a skb if it is no longer needed
1229  * @sk: socket to eat this skb from
1230  * @skb: socket buffer to eat
1231  *
1232  * This routine must be called with interrupts disabled or with the socket
1233  * locked so that the sk_buff queue operation is ok.
1234 */
1235 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
1236 {
1237 	__skb_unlink(skb, &sk->sk_receive_queue);
1238 	__kfree_skb(skb);
1239 }
1240 
1241 extern void sock_enable_timestamp(struct sock *sk);
1242 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1243 
1244 /*
1245  *	Enable debug/info messages
1246  */
1247 
1248 #if 0
1249 #define NETDEBUG(x)	do { } while (0)
1250 #define LIMIT_NETDEBUG(x) do {} while(0)
1251 #else
1252 #define NETDEBUG(x)	do { x; } while (0)
1253 #define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0)
1254 #endif
1255 
1256 /*
1257  * Macros for sleeping on a socket. Use them like this:
1258  *
1259  * SOCK_SLEEP_PRE(sk)
1260  * if (condition)
1261  * 	schedule();
1262  * SOCK_SLEEP_POST(sk)
1263  *
1264  * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1265  * and when the last use of them in DECnet has gone, I'm intending to
1266  * remove them.
1267  */
1268 
1269 #define SOCK_SLEEP_PRE(sk) 	{ struct task_struct *tsk = current; \
1270 				DECLARE_WAITQUEUE(wait, tsk); \
1271 				tsk->state = TASK_INTERRUPTIBLE; \
1272 				add_wait_queue((sk)->sk_sleep, &wait); \
1273 				release_sock(sk);
1274 
1275 #define SOCK_SLEEP_POST(sk)	tsk->state = TASK_RUNNING; \
1276 				remove_wait_queue((sk)->sk_sleep, &wait); \
1277 				lock_sock(sk); \
1278 				}
1279 
1280 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1281 {
1282 	if (valbool)
1283 		sock_set_flag(sk, bit);
1284 	else
1285 		sock_reset_flag(sk, bit);
1286 }
1287 
1288 extern __u32 sysctl_wmem_max;
1289 extern __u32 sysctl_rmem_max;
1290 
1291 #ifdef CONFIG_NET
1292 int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
1293 #else
1294 static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
1295 {
1296 	return -ENODEV;
1297 }
1298 #endif
1299 
1300 #endif	/* _SOCK_H */
1301