1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40 #ifndef _SOCK_H 41 #define _SOCK_H 42 43 #include <linux/config.h> 44 #include <linux/list.h> 45 #include <linux/timer.h> 46 #include <linux/cache.h> 47 #include <linux/module.h> 48 #include <linux/netdevice.h> 49 #include <linux/skbuff.h> /* struct sk_buff */ 50 #include <linux/security.h> 51 52 #include <linux/filter.h> 53 54 #include <asm/atomic.h> 55 #include <net/dst.h> 56 #include <net/checksum.h> 57 58 /* 59 * This structure really needs to be cleaned up. 60 * Most of it is for TCP, and not used by any of 61 * the other protocols. 62 */ 63 64 /* Define this to get the SOCK_DBG debugging facility. */ 65 #define SOCK_DEBUGGING 66 #ifdef SOCK_DEBUGGING 67 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 68 printk(KERN_DEBUG msg); } while (0) 69 #else 70 #define SOCK_DEBUG(sk, msg...) do { } while (0) 71 #endif 72 73 /* This is the per-socket lock. The spinlock provides a synchronization 74 * between user contexts and software interrupt processing, whereas the 75 * mini-semaphore synchronizes multiple users amongst themselves. 76 */ 77 struct sock_iocb; 78 typedef struct { 79 spinlock_t slock; 80 struct sock_iocb *owner; 81 wait_queue_head_t wq; 82 } socket_lock_t; 83 84 #define sock_lock_init(__sk) \ 85 do { spin_lock_init(&((__sk)->sk_lock.slock)); \ 86 (__sk)->sk_lock.owner = NULL; \ 87 init_waitqueue_head(&((__sk)->sk_lock.wq)); \ 88 } while(0) 89 90 struct sock; 91 struct proto; 92 93 /** 94 * struct sock_common - minimal network layer representation of sockets 95 * @skc_family: network address family 96 * @skc_state: Connection state 97 * @skc_reuse: %SO_REUSEADDR setting 98 * @skc_bound_dev_if: bound device index if != 0 99 * @skc_node: main hash linkage for various protocol lookup tables 100 * @skc_bind_node: bind hash linkage for various protocol lookup tables 101 * @skc_refcnt: reference count 102 * @skc_hash: hash value used with various protocol lookup tables 103 * @skc_prot: protocol handlers inside a network family 104 * 105 * This is the minimal network layer representation of sockets, the header 106 * for struct sock and struct inet_timewait_sock. 107 */ 108 struct sock_common { 109 unsigned short skc_family; 110 volatile unsigned char skc_state; 111 unsigned char skc_reuse; 112 int skc_bound_dev_if; 113 struct hlist_node skc_node; 114 struct hlist_node skc_bind_node; 115 atomic_t skc_refcnt; 116 unsigned int skc_hash; 117 struct proto *skc_prot; 118 }; 119 120 /** 121 * struct sock - network layer representation of sockets 122 * @__sk_common: shared layout with inet_timewait_sock 123 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 124 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 125 * @sk_lock: synchronizer 126 * @sk_rcvbuf: size of receive buffer in bytes 127 * @sk_sleep: sock wait queue 128 * @sk_dst_cache: destination cache 129 * @sk_dst_lock: destination cache lock 130 * @sk_policy: flow policy 131 * @sk_rmem_alloc: receive queue bytes committed 132 * @sk_receive_queue: incoming packets 133 * @sk_wmem_alloc: transmit queue bytes committed 134 * @sk_write_queue: Packet sending queue 135 * @sk_omem_alloc: "o" is "option" or "other" 136 * @sk_wmem_queued: persistent queue size 137 * @sk_forward_alloc: space allocated forward 138 * @sk_allocation: allocation mode 139 * @sk_sndbuf: size of send buffer in bytes 140 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings 141 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 142 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 143 * @sk_lingertime: %SO_LINGER l_linger setting 144 * @sk_backlog: always used with the per-socket spinlock held 145 * @sk_callback_lock: used with the callbacks in the end of this struct 146 * @sk_error_queue: rarely used 147 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) 148 * @sk_err: last error 149 * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' 150 * @sk_ack_backlog: current listen backlog 151 * @sk_max_ack_backlog: listen backlog set in listen() 152 * @sk_priority: %SO_PRIORITY setting 153 * @sk_type: socket type (%SOCK_STREAM, etc) 154 * @sk_protocol: which protocol this socket belongs in this network family 155 * @sk_peercred: %SO_PEERCRED setting 156 * @sk_rcvlowat: %SO_RCVLOWAT setting 157 * @sk_rcvtimeo: %SO_RCVTIMEO setting 158 * @sk_sndtimeo: %SO_SNDTIMEO setting 159 * @sk_filter: socket filtering instructions 160 * @sk_protinfo: private area, net family specific, when not using slab 161 * @sk_timer: sock cleanup timer 162 * @sk_stamp: time stamp of last packet received 163 * @sk_socket: Identd and reporting IO signals 164 * @sk_user_data: RPC layer private data 165 * @sk_sndmsg_page: cached page for sendmsg 166 * @sk_sndmsg_off: cached offset for sendmsg 167 * @sk_send_head: front of stuff to transmit 168 * @sk_security: used by security modules 169 * @sk_write_pending: a write to stream socket waits to start 170 * @sk_state_change: callback to indicate change in the state of the sock 171 * @sk_data_ready: callback to indicate there is data to be processed 172 * @sk_write_space: callback to indicate there is bf sending space available 173 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 174 * @sk_backlog_rcv: callback to process the backlog 175 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 176 */ 177 struct sock { 178 /* 179 * Now struct inet_timewait_sock also uses sock_common, so please just 180 * don't add nothing before this first member (__sk_common) --acme 181 */ 182 struct sock_common __sk_common; 183 #define sk_family __sk_common.skc_family 184 #define sk_state __sk_common.skc_state 185 #define sk_reuse __sk_common.skc_reuse 186 #define sk_bound_dev_if __sk_common.skc_bound_dev_if 187 #define sk_node __sk_common.skc_node 188 #define sk_bind_node __sk_common.skc_bind_node 189 #define sk_refcnt __sk_common.skc_refcnt 190 #define sk_hash __sk_common.skc_hash 191 #define sk_prot __sk_common.skc_prot 192 unsigned char sk_shutdown : 2, 193 sk_no_check : 2, 194 sk_userlocks : 4; 195 unsigned char sk_protocol; 196 unsigned short sk_type; 197 int sk_rcvbuf; 198 socket_lock_t sk_lock; 199 wait_queue_head_t *sk_sleep; 200 struct dst_entry *sk_dst_cache; 201 struct xfrm_policy *sk_policy[2]; 202 rwlock_t sk_dst_lock; 203 atomic_t sk_rmem_alloc; 204 atomic_t sk_wmem_alloc; 205 atomic_t sk_omem_alloc; 206 struct sk_buff_head sk_receive_queue; 207 struct sk_buff_head sk_write_queue; 208 int sk_wmem_queued; 209 int sk_forward_alloc; 210 gfp_t sk_allocation; 211 int sk_sndbuf; 212 int sk_route_caps; 213 int sk_rcvlowat; 214 unsigned long sk_flags; 215 unsigned long sk_lingertime; 216 /* 217 * The backlog queue is special, it is always used with 218 * the per-socket spinlock held and requires low latency 219 * access. Therefore we special case it's implementation. 220 */ 221 struct { 222 struct sk_buff *head; 223 struct sk_buff *tail; 224 } sk_backlog; 225 struct sk_buff_head sk_error_queue; 226 struct proto *sk_prot_creator; 227 rwlock_t sk_callback_lock; 228 int sk_err, 229 sk_err_soft; 230 unsigned short sk_ack_backlog; 231 unsigned short sk_max_ack_backlog; 232 __u32 sk_priority; 233 struct ucred sk_peercred; 234 long sk_rcvtimeo; 235 long sk_sndtimeo; 236 struct sk_filter *sk_filter; 237 void *sk_protinfo; 238 struct timer_list sk_timer; 239 struct timeval sk_stamp; 240 struct socket *sk_socket; 241 void *sk_user_data; 242 struct page *sk_sndmsg_page; 243 struct sk_buff *sk_send_head; 244 __u32 sk_sndmsg_off; 245 int sk_write_pending; 246 void *sk_security; 247 void (*sk_state_change)(struct sock *sk); 248 void (*sk_data_ready)(struct sock *sk, int bytes); 249 void (*sk_write_space)(struct sock *sk); 250 void (*sk_error_report)(struct sock *sk); 251 int (*sk_backlog_rcv)(struct sock *sk, 252 struct sk_buff *skb); 253 void (*sk_destruct)(struct sock *sk); 254 }; 255 256 /* 257 * Hashed lists helper routines 258 */ 259 static inline struct sock *__sk_head(const struct hlist_head *head) 260 { 261 return hlist_entry(head->first, struct sock, sk_node); 262 } 263 264 static inline struct sock *sk_head(const struct hlist_head *head) 265 { 266 return hlist_empty(head) ? NULL : __sk_head(head); 267 } 268 269 static inline struct sock *sk_next(const struct sock *sk) 270 { 271 return sk->sk_node.next ? 272 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 273 } 274 275 static inline int sk_unhashed(const struct sock *sk) 276 { 277 return hlist_unhashed(&sk->sk_node); 278 } 279 280 static inline int sk_hashed(const struct sock *sk) 281 { 282 return sk->sk_node.pprev != NULL; 283 } 284 285 static __inline__ void sk_node_init(struct hlist_node *node) 286 { 287 node->pprev = NULL; 288 } 289 290 static __inline__ void __sk_del_node(struct sock *sk) 291 { 292 __hlist_del(&sk->sk_node); 293 } 294 295 static __inline__ int __sk_del_node_init(struct sock *sk) 296 { 297 if (sk_hashed(sk)) { 298 __sk_del_node(sk); 299 sk_node_init(&sk->sk_node); 300 return 1; 301 } 302 return 0; 303 } 304 305 /* Grab socket reference count. This operation is valid only 306 when sk is ALREADY grabbed f.e. it is found in hash table 307 or a list and the lookup is made under lock preventing hash table 308 modifications. 309 */ 310 311 static inline void sock_hold(struct sock *sk) 312 { 313 atomic_inc(&sk->sk_refcnt); 314 } 315 316 /* Ungrab socket in the context, which assumes that socket refcnt 317 cannot hit zero, f.e. it is true in context of any socketcall. 318 */ 319 static inline void __sock_put(struct sock *sk) 320 { 321 atomic_dec(&sk->sk_refcnt); 322 } 323 324 static __inline__ int sk_del_node_init(struct sock *sk) 325 { 326 int rc = __sk_del_node_init(sk); 327 328 if (rc) { 329 /* paranoid for a while -acme */ 330 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 331 __sock_put(sk); 332 } 333 return rc; 334 } 335 336 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 337 { 338 hlist_add_head(&sk->sk_node, list); 339 } 340 341 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) 342 { 343 sock_hold(sk); 344 __sk_add_node(sk, list); 345 } 346 347 static __inline__ void __sk_del_bind_node(struct sock *sk) 348 { 349 __hlist_del(&sk->sk_bind_node); 350 } 351 352 static __inline__ void sk_add_bind_node(struct sock *sk, 353 struct hlist_head *list) 354 { 355 hlist_add_head(&sk->sk_bind_node, list); 356 } 357 358 #define sk_for_each(__sk, node, list) \ 359 hlist_for_each_entry(__sk, node, list, sk_node) 360 #define sk_for_each_from(__sk, node) \ 361 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 362 hlist_for_each_entry_from(__sk, node, sk_node) 363 #define sk_for_each_continue(__sk, node) \ 364 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 365 hlist_for_each_entry_continue(__sk, node, sk_node) 366 #define sk_for_each_safe(__sk, node, tmp, list) \ 367 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 368 #define sk_for_each_bound(__sk, node, list) \ 369 hlist_for_each_entry(__sk, node, list, sk_bind_node) 370 371 /* Sock flags */ 372 enum sock_flags { 373 SOCK_DEAD, 374 SOCK_DONE, 375 SOCK_URGINLINE, 376 SOCK_KEEPOPEN, 377 SOCK_LINGER, 378 SOCK_DESTROY, 379 SOCK_BROADCAST, 380 SOCK_TIMESTAMP, 381 SOCK_ZAPPED, 382 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 383 SOCK_DBG, /* %SO_DEBUG setting */ 384 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 385 SOCK_NO_LARGESEND, /* whether to sent large segments or not */ 386 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 387 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 388 }; 389 390 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 391 { 392 nsk->sk_flags = osk->sk_flags; 393 } 394 395 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 396 { 397 __set_bit(flag, &sk->sk_flags); 398 } 399 400 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 401 { 402 __clear_bit(flag, &sk->sk_flags); 403 } 404 405 static inline int sock_flag(struct sock *sk, enum sock_flags flag) 406 { 407 return test_bit(flag, &sk->sk_flags); 408 } 409 410 static inline void sk_acceptq_removed(struct sock *sk) 411 { 412 sk->sk_ack_backlog--; 413 } 414 415 static inline void sk_acceptq_added(struct sock *sk) 416 { 417 sk->sk_ack_backlog++; 418 } 419 420 static inline int sk_acceptq_is_full(struct sock *sk) 421 { 422 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 423 } 424 425 /* 426 * Compute minimal free write space needed to queue new packets. 427 */ 428 static inline int sk_stream_min_wspace(struct sock *sk) 429 { 430 return sk->sk_wmem_queued / 2; 431 } 432 433 static inline int sk_stream_wspace(struct sock *sk) 434 { 435 return sk->sk_sndbuf - sk->sk_wmem_queued; 436 } 437 438 extern void sk_stream_write_space(struct sock *sk); 439 440 static inline int sk_stream_memory_free(struct sock *sk) 441 { 442 return sk->sk_wmem_queued < sk->sk_sndbuf; 443 } 444 445 extern void sk_stream_rfree(struct sk_buff *skb); 446 447 static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) 448 { 449 skb->sk = sk; 450 skb->destructor = sk_stream_rfree; 451 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 452 sk->sk_forward_alloc -= skb->truesize; 453 } 454 455 static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) 456 { 457 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 458 sk->sk_wmem_queued -= skb->truesize; 459 sk->sk_forward_alloc += skb->truesize; 460 __kfree_skb(skb); 461 } 462 463 /* The per-socket spinlock must be held here. */ 464 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 465 { 466 if (!sk->sk_backlog.tail) { 467 sk->sk_backlog.head = sk->sk_backlog.tail = skb; 468 } else { 469 sk->sk_backlog.tail->next = skb; 470 sk->sk_backlog.tail = skb; 471 } 472 skb->next = NULL; 473 } 474 475 #define sk_wait_event(__sk, __timeo, __condition) \ 476 ({ int rc; \ 477 release_sock(__sk); \ 478 rc = __condition; \ 479 if (!rc) { \ 480 *(__timeo) = schedule_timeout(*(__timeo)); \ 481 } \ 482 lock_sock(__sk); \ 483 rc = __condition; \ 484 rc; \ 485 }) 486 487 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 488 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 489 extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 490 extern int sk_stream_error(struct sock *sk, int flags, int err); 491 extern void sk_stream_kill_queues(struct sock *sk); 492 493 extern int sk_wait_data(struct sock *sk, long *timeo); 494 495 struct request_sock_ops; 496 struct timewait_sock_ops; 497 498 /* Networking protocol blocks we attach to sockets. 499 * socket layer -> transport layer interface 500 * transport -> network interface is defined by struct inet_proto 501 */ 502 struct proto { 503 void (*close)(struct sock *sk, 504 long timeout); 505 int (*connect)(struct sock *sk, 506 struct sockaddr *uaddr, 507 int addr_len); 508 int (*disconnect)(struct sock *sk, int flags); 509 510 struct sock * (*accept) (struct sock *sk, int flags, int *err); 511 512 int (*ioctl)(struct sock *sk, int cmd, 513 unsigned long arg); 514 int (*init)(struct sock *sk); 515 int (*destroy)(struct sock *sk); 516 void (*shutdown)(struct sock *sk, int how); 517 int (*setsockopt)(struct sock *sk, int level, 518 int optname, char __user *optval, 519 int optlen); 520 int (*getsockopt)(struct sock *sk, int level, 521 int optname, char __user *optval, 522 int __user *option); 523 int (*compat_setsockopt)(struct sock *sk, 524 int level, 525 int optname, char __user *optval, 526 int optlen); 527 int (*compat_getsockopt)(struct sock *sk, 528 int level, 529 int optname, char __user *optval, 530 int __user *option); 531 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 532 struct msghdr *msg, size_t len); 533 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 534 struct msghdr *msg, 535 size_t len, int noblock, int flags, 536 int *addr_len); 537 int (*sendpage)(struct sock *sk, struct page *page, 538 int offset, size_t size, int flags); 539 int (*bind)(struct sock *sk, 540 struct sockaddr *uaddr, int addr_len); 541 542 int (*backlog_rcv) (struct sock *sk, 543 struct sk_buff *skb); 544 545 /* Keeping track of sk's, looking them up, and port selection methods. */ 546 void (*hash)(struct sock *sk); 547 void (*unhash)(struct sock *sk); 548 int (*get_port)(struct sock *sk, unsigned short snum); 549 550 /* Memory pressure */ 551 void (*enter_memory_pressure)(void); 552 atomic_t *memory_allocated; /* Current allocated memory. */ 553 atomic_t *sockets_allocated; /* Current number of sockets. */ 554 /* 555 * Pressure flag: try to collapse. 556 * Technical note: it is used by multiple contexts non atomically. 557 * All the sk_stream_mem_schedule() is of this nature: accounting 558 * is strict, actions are advisory and have some latency. 559 */ 560 int *memory_pressure; 561 int *sysctl_mem; 562 int *sysctl_wmem; 563 int *sysctl_rmem; 564 int max_header; 565 566 kmem_cache_t *slab; 567 unsigned int obj_size; 568 569 atomic_t *orphan_count; 570 571 struct request_sock_ops *rsk_prot; 572 struct timewait_sock_ops *twsk_prot; 573 574 struct module *owner; 575 576 char name[32]; 577 578 struct list_head node; 579 #ifdef SOCK_REFCNT_DEBUG 580 atomic_t socks; 581 #endif 582 struct { 583 int inuse; 584 u8 __pad[SMP_CACHE_BYTES - sizeof(int)]; 585 } stats[NR_CPUS]; 586 }; 587 588 extern int proto_register(struct proto *prot, int alloc_slab); 589 extern void proto_unregister(struct proto *prot); 590 591 #ifdef SOCK_REFCNT_DEBUG 592 static inline void sk_refcnt_debug_inc(struct sock *sk) 593 { 594 atomic_inc(&sk->sk_prot->socks); 595 } 596 597 static inline void sk_refcnt_debug_dec(struct sock *sk) 598 { 599 atomic_dec(&sk->sk_prot->socks); 600 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 601 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 602 } 603 604 static inline void sk_refcnt_debug_release(const struct sock *sk) 605 { 606 if (atomic_read(&sk->sk_refcnt) != 1) 607 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 608 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); 609 } 610 #else /* SOCK_REFCNT_DEBUG */ 611 #define sk_refcnt_debug_inc(sk) do { } while (0) 612 #define sk_refcnt_debug_dec(sk) do { } while (0) 613 #define sk_refcnt_debug_release(sk) do { } while (0) 614 #endif /* SOCK_REFCNT_DEBUG */ 615 616 /* Called with local bh disabled */ 617 static __inline__ void sock_prot_inc_use(struct proto *prot) 618 { 619 prot->stats[smp_processor_id()].inuse++; 620 } 621 622 static __inline__ void sock_prot_dec_use(struct proto *prot) 623 { 624 prot->stats[smp_processor_id()].inuse--; 625 } 626 627 /* With per-bucket locks this operation is not-atomic, so that 628 * this version is not worse. 629 */ 630 static inline void __sk_prot_rehash(struct sock *sk) 631 { 632 sk->sk_prot->unhash(sk); 633 sk->sk_prot->hash(sk); 634 } 635 636 /* About 10 seconds */ 637 #define SOCK_DESTROY_TIME (10*HZ) 638 639 /* Sockets 0-1023 can't be bound to unless you are superuser */ 640 #define PROT_SOCK 1024 641 642 #define SHUTDOWN_MASK 3 643 #define RCV_SHUTDOWN 1 644 #define SEND_SHUTDOWN 2 645 646 #define SOCK_SNDBUF_LOCK 1 647 #define SOCK_RCVBUF_LOCK 2 648 #define SOCK_BINDADDR_LOCK 4 649 #define SOCK_BINDPORT_LOCK 8 650 651 /* sock_iocb: used to kick off async processing of socket ios */ 652 struct sock_iocb { 653 struct list_head list; 654 655 int flags; 656 int size; 657 struct socket *sock; 658 struct sock *sk; 659 struct scm_cookie *scm; 660 struct msghdr *msg, async_msg; 661 struct iovec async_iov; 662 struct kiocb *kiocb; 663 }; 664 665 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) 666 { 667 return (struct sock_iocb *)iocb->private; 668 } 669 670 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si) 671 { 672 return si->kiocb; 673 } 674 675 struct socket_alloc { 676 struct socket socket; 677 struct inode vfs_inode; 678 }; 679 680 static inline struct socket *SOCKET_I(struct inode *inode) 681 { 682 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 683 } 684 685 static inline struct inode *SOCK_INODE(struct socket *socket) 686 { 687 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 688 } 689 690 extern void __sk_stream_mem_reclaim(struct sock *sk); 691 extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); 692 693 #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE) 694 695 static inline int sk_stream_pages(int amt) 696 { 697 return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; 698 } 699 700 static inline void sk_stream_mem_reclaim(struct sock *sk) 701 { 702 if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) 703 __sk_stream_mem_reclaim(sk); 704 } 705 706 static inline void sk_stream_writequeue_purge(struct sock *sk) 707 { 708 struct sk_buff *skb; 709 710 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) 711 sk_stream_free_skb(sk, skb); 712 sk_stream_mem_reclaim(sk); 713 } 714 715 static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb) 716 { 717 return (int)skb->truesize <= sk->sk_forward_alloc || 718 sk_stream_mem_schedule(sk, skb->truesize, 1); 719 } 720 721 static inline int sk_stream_wmem_schedule(struct sock *sk, int size) 722 { 723 return size <= sk->sk_forward_alloc || 724 sk_stream_mem_schedule(sk, size, 0); 725 } 726 727 /* Used by processes to "lock" a socket state, so that 728 * interrupts and bottom half handlers won't change it 729 * from under us. It essentially blocks any incoming 730 * packets, so that we won't get any new data or any 731 * packets that change the state of the socket. 732 * 733 * While locked, BH processing will add new packets to 734 * the backlog queue. This queue is processed by the 735 * owner of the socket lock right before it is released. 736 * 737 * Since ~2.3.5 it is also exclusive sleep lock serializing 738 * accesses from user process context. 739 */ 740 #define sock_owned_by_user(sk) ((sk)->sk_lock.owner) 741 742 extern void FASTCALL(lock_sock(struct sock *sk)); 743 extern void FASTCALL(release_sock(struct sock *sk)); 744 745 /* BH context may only use the following locking interface. */ 746 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 747 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 748 749 extern struct sock *sk_alloc(int family, 750 gfp_t priority, 751 struct proto *prot, int zero_it); 752 extern void sk_free(struct sock *sk); 753 extern struct sock *sk_clone(const struct sock *sk, 754 const gfp_t priority); 755 756 extern struct sk_buff *sock_wmalloc(struct sock *sk, 757 unsigned long size, int force, 758 gfp_t priority); 759 extern struct sk_buff *sock_rmalloc(struct sock *sk, 760 unsigned long size, int force, 761 gfp_t priority); 762 extern void sock_wfree(struct sk_buff *skb); 763 extern void sock_rfree(struct sk_buff *skb); 764 765 extern int sock_setsockopt(struct socket *sock, int level, 766 int op, char __user *optval, 767 int optlen); 768 769 extern int sock_getsockopt(struct socket *sock, int level, 770 int op, char __user *optval, 771 int __user *optlen); 772 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 773 unsigned long size, 774 int noblock, 775 int *errcode); 776 extern void *sock_kmalloc(struct sock *sk, int size, 777 gfp_t priority); 778 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 779 extern void sk_send_sigurg(struct sock *sk); 780 781 /* 782 * Functions to fill in entries in struct proto_ops when a protocol 783 * does not implement a particular function. 784 */ 785 extern int sock_no_bind(struct socket *, 786 struct sockaddr *, int); 787 extern int sock_no_connect(struct socket *, 788 struct sockaddr *, int, int); 789 extern int sock_no_socketpair(struct socket *, 790 struct socket *); 791 extern int sock_no_accept(struct socket *, 792 struct socket *, int); 793 extern int sock_no_getname(struct socket *, 794 struct sockaddr *, int *, int); 795 extern unsigned int sock_no_poll(struct file *, struct socket *, 796 struct poll_table_struct *); 797 extern int sock_no_ioctl(struct socket *, unsigned int, 798 unsigned long); 799 extern int sock_no_listen(struct socket *, int); 800 extern int sock_no_shutdown(struct socket *, int); 801 extern int sock_no_getsockopt(struct socket *, int , int, 802 char __user *, int __user *); 803 extern int sock_no_setsockopt(struct socket *, int, int, 804 char __user *, int); 805 extern int sock_no_sendmsg(struct kiocb *, struct socket *, 806 struct msghdr *, size_t); 807 extern int sock_no_recvmsg(struct kiocb *, struct socket *, 808 struct msghdr *, size_t, int); 809 extern int sock_no_mmap(struct file *file, 810 struct socket *sock, 811 struct vm_area_struct *vma); 812 extern ssize_t sock_no_sendpage(struct socket *sock, 813 struct page *page, 814 int offset, size_t size, 815 int flags); 816 817 /* 818 * Functions to fill in entries in struct proto_ops when a protocol 819 * uses the inet style. 820 */ 821 extern int sock_common_getsockopt(struct socket *sock, int level, int optname, 822 char __user *optval, int __user *optlen); 823 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 824 struct msghdr *msg, size_t size, int flags); 825 extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 826 char __user *optval, int optlen); 827 extern int compat_sock_common_getsockopt(struct socket *sock, int level, 828 int optname, char __user *optval, int __user *optlen); 829 extern int compat_sock_common_setsockopt(struct socket *sock, int level, 830 int optname, char __user *optval, int optlen); 831 832 extern void sk_common_release(struct sock *sk); 833 834 /* 835 * Default socket callbacks and setup code 836 */ 837 838 /* Initialise core socket variables */ 839 extern void sock_init_data(struct socket *sock, struct sock *sk); 840 841 /** 842 * sk_filter - run a packet through a socket filter 843 * @sk: sock associated with &sk_buff 844 * @skb: buffer to filter 845 * @needlock: set to 1 if the sock is not locked by caller. 846 * 847 * Run the filter code and then cut skb->data to correct size returned by 848 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller 849 * than pkt_len we keep whole skb->data. This is the socket level 850 * wrapper to sk_run_filter. It returns 0 if the packet should 851 * be accepted or -EPERM if the packet should be tossed. 852 * 853 */ 854 855 static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock) 856 { 857 int err; 858 859 err = security_sock_rcv_skb(sk, skb); 860 if (err) 861 return err; 862 863 if (sk->sk_filter) { 864 struct sk_filter *filter; 865 866 if (needlock) 867 bh_lock_sock(sk); 868 869 filter = sk->sk_filter; 870 if (filter) { 871 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 872 filter->len); 873 if (!pkt_len) 874 err = -EPERM; 875 else 876 skb_trim(skb, pkt_len); 877 } 878 879 if (needlock) 880 bh_unlock_sock(sk); 881 } 882 return err; 883 } 884 885 /** 886 * sk_filter_release: Release a socket filter 887 * @sk: socket 888 * @fp: filter to remove 889 * 890 * Remove a filter from a socket and release its resources. 891 */ 892 893 static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) 894 { 895 unsigned int size = sk_filter_len(fp); 896 897 atomic_sub(size, &sk->sk_omem_alloc); 898 899 if (atomic_dec_and_test(&fp->refcnt)) 900 kfree(fp); 901 } 902 903 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 904 { 905 atomic_inc(&fp->refcnt); 906 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 907 } 908 909 /* 910 * Socket reference counting postulates. 911 * 912 * * Each user of socket SHOULD hold a reference count. 913 * * Each access point to socket (an hash table bucket, reference from a list, 914 * running timer, skb in flight MUST hold a reference count. 915 * * When reference count hits 0, it means it will never increase back. 916 * * When reference count hits 0, it means that no references from 917 * outside exist to this socket and current process on current CPU 918 * is last user and may/should destroy this socket. 919 * * sk_free is called from any context: process, BH, IRQ. When 920 * it is called, socket has no references from outside -> sk_free 921 * may release descendant resources allocated by the socket, but 922 * to the time when it is called, socket is NOT referenced by any 923 * hash tables, lists etc. 924 * * Packets, delivered from outside (from network or from another process) 925 * and enqueued on receive/error queues SHOULD NOT grab reference count, 926 * when they sit in queue. Otherwise, packets will leak to hole, when 927 * socket is looked up by one cpu and unhasing is made by another CPU. 928 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 929 * (leak to backlog). Packet socket does all the processing inside 930 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 931 * use separate SMP lock, so that they are prone too. 932 */ 933 934 /* Ungrab socket and destroy it, if it was the last reference. */ 935 static inline void sock_put(struct sock *sk) 936 { 937 if (atomic_dec_and_test(&sk->sk_refcnt)) 938 sk_free(sk); 939 } 940 941 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb); 942 943 /* Detach socket from process context. 944 * Announce socket dead, detach it from wait queue and inode. 945 * Note that parent inode held reference count on this struct sock, 946 * we do not release it in this function, because protocol 947 * probably wants some additional cleanups or even continuing 948 * to work with this socket (TCP). 949 */ 950 static inline void sock_orphan(struct sock *sk) 951 { 952 write_lock_bh(&sk->sk_callback_lock); 953 sock_set_flag(sk, SOCK_DEAD); 954 sk->sk_socket = NULL; 955 sk->sk_sleep = NULL; 956 write_unlock_bh(&sk->sk_callback_lock); 957 } 958 959 static inline void sock_graft(struct sock *sk, struct socket *parent) 960 { 961 write_lock_bh(&sk->sk_callback_lock); 962 sk->sk_sleep = &parent->wait; 963 parent->sk = sk; 964 sk->sk_socket = parent; 965 write_unlock_bh(&sk->sk_callback_lock); 966 } 967 968 extern int sock_i_uid(struct sock *sk); 969 extern unsigned long sock_i_ino(struct sock *sk); 970 971 static inline struct dst_entry * 972 __sk_dst_get(struct sock *sk) 973 { 974 return sk->sk_dst_cache; 975 } 976 977 static inline struct dst_entry * 978 sk_dst_get(struct sock *sk) 979 { 980 struct dst_entry *dst; 981 982 read_lock(&sk->sk_dst_lock); 983 dst = sk->sk_dst_cache; 984 if (dst) 985 dst_hold(dst); 986 read_unlock(&sk->sk_dst_lock); 987 return dst; 988 } 989 990 static inline void 991 __sk_dst_set(struct sock *sk, struct dst_entry *dst) 992 { 993 struct dst_entry *old_dst; 994 995 old_dst = sk->sk_dst_cache; 996 sk->sk_dst_cache = dst; 997 dst_release(old_dst); 998 } 999 1000 static inline void 1001 sk_dst_set(struct sock *sk, struct dst_entry *dst) 1002 { 1003 write_lock(&sk->sk_dst_lock); 1004 __sk_dst_set(sk, dst); 1005 write_unlock(&sk->sk_dst_lock); 1006 } 1007 1008 static inline void 1009 __sk_dst_reset(struct sock *sk) 1010 { 1011 struct dst_entry *old_dst; 1012 1013 old_dst = sk->sk_dst_cache; 1014 sk->sk_dst_cache = NULL; 1015 dst_release(old_dst); 1016 } 1017 1018 static inline void 1019 sk_dst_reset(struct sock *sk) 1020 { 1021 write_lock(&sk->sk_dst_lock); 1022 __sk_dst_reset(sk); 1023 write_unlock(&sk->sk_dst_lock); 1024 } 1025 1026 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1027 1028 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1029 1030 static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1031 { 1032 __sk_dst_set(sk, dst); 1033 sk->sk_route_caps = dst->dev->features; 1034 if (sk->sk_route_caps & NETIF_F_TSO) { 1035 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) 1036 sk->sk_route_caps &= ~NETIF_F_TSO; 1037 } 1038 } 1039 1040 static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb) 1041 { 1042 sk->sk_wmem_queued += skb->truesize; 1043 sk->sk_forward_alloc -= skb->truesize; 1044 } 1045 1046 static inline int skb_copy_to_page(struct sock *sk, char __user *from, 1047 struct sk_buff *skb, struct page *page, 1048 int off, int copy) 1049 { 1050 if (skb->ip_summed == CHECKSUM_NONE) { 1051 int err = 0; 1052 unsigned int csum = csum_and_copy_from_user(from, 1053 page_address(page) + off, 1054 copy, 0, &err); 1055 if (err) 1056 return err; 1057 skb->csum = csum_block_add(skb->csum, csum, skb->len); 1058 } else if (copy_from_user(page_address(page) + off, from, copy)) 1059 return -EFAULT; 1060 1061 skb->len += copy; 1062 skb->data_len += copy; 1063 skb->truesize += copy; 1064 sk->sk_wmem_queued += copy; 1065 sk->sk_forward_alloc -= copy; 1066 return 0; 1067 } 1068 1069 /* 1070 * Queue a received datagram if it will fit. Stream and sequenced 1071 * protocols can't normally use this as they need to fit buffers in 1072 * and play with them. 1073 * 1074 * Inlined as it's very short and called for pretty much every 1075 * packet ever received. 1076 */ 1077 1078 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1079 { 1080 sock_hold(sk); 1081 skb->sk = sk; 1082 skb->destructor = sock_wfree; 1083 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1084 } 1085 1086 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1087 { 1088 skb->sk = sk; 1089 skb->destructor = sock_rfree; 1090 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1091 } 1092 1093 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1094 unsigned long expires); 1095 1096 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1097 1098 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1099 1100 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1101 { 1102 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1103 number of warnings when compiling with -W --ANK 1104 */ 1105 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1106 (unsigned)sk->sk_rcvbuf) 1107 return -ENOMEM; 1108 skb_set_owner_r(skb, sk); 1109 skb_queue_tail(&sk->sk_error_queue, skb); 1110 if (!sock_flag(sk, SOCK_DEAD)) 1111 sk->sk_data_ready(sk, skb->len); 1112 return 0; 1113 } 1114 1115 /* 1116 * Recover an error report and clear atomically 1117 */ 1118 1119 static inline int sock_error(struct sock *sk) 1120 { 1121 int err; 1122 if (likely(!sk->sk_err)) 1123 return 0; 1124 err = xchg(&sk->sk_err, 0); 1125 return -err; 1126 } 1127 1128 static inline unsigned long sock_wspace(struct sock *sk) 1129 { 1130 int amt = 0; 1131 1132 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1133 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1134 if (amt < 0) 1135 amt = 0; 1136 } 1137 return amt; 1138 } 1139 1140 static inline void sk_wake_async(struct sock *sk, int how, int band) 1141 { 1142 if (sk->sk_socket && sk->sk_socket->fasync_list) 1143 sock_wake_async(sk->sk_socket, how, band); 1144 } 1145 1146 #define SOCK_MIN_SNDBUF 2048 1147 #define SOCK_MIN_RCVBUF 256 1148 1149 static inline void sk_stream_moderate_sndbuf(struct sock *sk) 1150 { 1151 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 1152 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2); 1153 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); 1154 } 1155 } 1156 1157 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, 1158 int size, int mem, 1159 gfp_t gfp) 1160 { 1161 struct sk_buff *skb; 1162 int hdr_len; 1163 1164 hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); 1165 skb = alloc_skb_fclone(size + hdr_len, gfp); 1166 if (skb) { 1167 skb->truesize += mem; 1168 if (sk_stream_wmem_schedule(sk, skb->truesize)) { 1169 skb_reserve(skb, hdr_len); 1170 return skb; 1171 } 1172 __kfree_skb(skb); 1173 } else { 1174 sk->sk_prot->enter_memory_pressure(); 1175 sk_stream_moderate_sndbuf(sk); 1176 } 1177 return NULL; 1178 } 1179 1180 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 1181 int size, 1182 gfp_t gfp) 1183 { 1184 return sk_stream_alloc_pskb(sk, size, 0, gfp); 1185 } 1186 1187 static inline struct page *sk_stream_alloc_page(struct sock *sk) 1188 { 1189 struct page *page = NULL; 1190 1191 page = alloc_pages(sk->sk_allocation, 0); 1192 if (!page) { 1193 sk->sk_prot->enter_memory_pressure(); 1194 sk_stream_moderate_sndbuf(sk); 1195 } 1196 return page; 1197 } 1198 1199 #define sk_stream_for_retrans_queue(skb, sk) \ 1200 for (skb = (sk)->sk_write_queue.next; \ 1201 (skb != (sk)->sk_send_head) && \ 1202 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ 1203 skb = skb->next) 1204 1205 /*from STCP for fast SACK Process*/ 1206 #define sk_stream_for_retrans_queue_from(skb, sk) \ 1207 for (; (skb != (sk)->sk_send_head) && \ 1208 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ 1209 skb = skb->next) 1210 1211 /* 1212 * Default write policy as shown to user space via poll/select/SIGIO 1213 */ 1214 static inline int sock_writeable(const struct sock *sk) 1215 { 1216 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); 1217 } 1218 1219 static inline gfp_t gfp_any(void) 1220 { 1221 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1222 } 1223 1224 static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1225 { 1226 return noblock ? 0 : sk->sk_rcvtimeo; 1227 } 1228 1229 static inline long sock_sndtimeo(const struct sock *sk, int noblock) 1230 { 1231 return noblock ? 0 : sk->sk_sndtimeo; 1232 } 1233 1234 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 1235 { 1236 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 1237 } 1238 1239 /* Alas, with timeout socket operations are not restartable. 1240 * Compare this to poll(). 1241 */ 1242 static inline int sock_intr_errno(long timeo) 1243 { 1244 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 1245 } 1246 1247 static __inline__ void 1248 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1249 { 1250 struct timeval stamp; 1251 1252 skb_get_timestamp(skb, &stamp); 1253 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 1254 /* Race occurred between timestamp enabling and packet 1255 receiving. Fill in the current time for now. */ 1256 if (stamp.tv_sec == 0) 1257 do_gettimeofday(&stamp); 1258 skb_set_timestamp(skb, &stamp); 1259 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval), 1260 &stamp); 1261 } else 1262 sk->sk_stamp = stamp; 1263 } 1264 1265 /** 1266 * sk_eat_skb - Release a skb if it is no longer needed 1267 * @sk: socket to eat this skb from 1268 * @skb: socket buffer to eat 1269 * 1270 * This routine must be called with interrupts disabled or with the socket 1271 * locked so that the sk_buff queue operation is ok. 1272 */ 1273 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) 1274 { 1275 __skb_unlink(skb, &sk->sk_receive_queue); 1276 __kfree_skb(skb); 1277 } 1278 1279 extern void sock_enable_timestamp(struct sock *sk); 1280 extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1281 1282 /* 1283 * Enable debug/info messages 1284 */ 1285 1286 #ifdef CONFIG_NETDEBUG 1287 #define NETDEBUG(fmt, args...) printk(fmt,##args) 1288 #define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0) 1289 #else 1290 #define NETDEBUG(fmt, args...) do { } while (0) 1291 #define LIMIT_NETDEBUG(fmt, args...) do { } while(0) 1292 #endif 1293 1294 /* 1295 * Macros for sleeping on a socket. Use them like this: 1296 * 1297 * SOCK_SLEEP_PRE(sk) 1298 * if (condition) 1299 * schedule(); 1300 * SOCK_SLEEP_POST(sk) 1301 * 1302 * N.B. These are now obsolete and were, afaik, only ever used in DECnet 1303 * and when the last use of them in DECnet has gone, I'm intending to 1304 * remove them. 1305 */ 1306 1307 #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \ 1308 DECLARE_WAITQUEUE(wait, tsk); \ 1309 tsk->state = TASK_INTERRUPTIBLE; \ 1310 add_wait_queue((sk)->sk_sleep, &wait); \ 1311 release_sock(sk); 1312 1313 #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \ 1314 remove_wait_queue((sk)->sk_sleep, &wait); \ 1315 lock_sock(sk); \ 1316 } 1317 1318 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 1319 { 1320 if (valbool) 1321 sock_set_flag(sk, bit); 1322 else 1323 sock_reset_flag(sk, bit); 1324 } 1325 1326 extern __u32 sysctl_wmem_max; 1327 extern __u32 sysctl_rmem_max; 1328 1329 #ifdef CONFIG_NET 1330 int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); 1331 #else 1332 static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) 1333 { 1334 return -ENODEV; 1335 } 1336 #endif 1337 1338 extern void sk_init(void); 1339 1340 #ifdef CONFIG_SYSCTL 1341 extern struct ctl_table core_table[]; 1342 #endif 1343 1344 extern int sysctl_optmem_max; 1345 1346 extern __u32 sysctl_wmem_default; 1347 extern __u32 sysctl_rmem_default; 1348 1349 #endif /* _SOCK_H */ 1350