1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40 #ifndef _SOCK_H 41 #define _SOCK_H 42 43 #include <linux/kernel.h> 44 #include <linux/list.h> 45 #include <linux/list_nulls.h> 46 #include <linux/timer.h> 47 #include <linux/cache.h> 48 #include <linux/module.h> 49 #include <linux/lockdep.h> 50 #include <linux/netdevice.h> 51 #include <linux/skbuff.h> /* struct sk_buff */ 52 #include <linux/mm.h> 53 #include <linux/security.h> 54 55 #include <linux/filter.h> 56 #include <linux/rculist_nulls.h> 57 58 #include <asm/atomic.h> 59 #include <net/dst.h> 60 #include <net/checksum.h> 61 62 /* 63 * This structure really needs to be cleaned up. 64 * Most of it is for TCP, and not used by any of 65 * the other protocols. 66 */ 67 68 /* Define this to get the SOCK_DBG debugging facility. */ 69 #define SOCK_DEBUGGING 70 #ifdef SOCK_DEBUGGING 71 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 72 printk(KERN_DEBUG msg); } while (0) 73 #else 74 /* Validate arguments and do nothing */ 75 static void inline int __attribute__ ((format (printf, 2, 3))) 76 SOCK_DEBUG(struct sock *sk, const char *msg, ...) 77 { 78 } 79 #endif 80 81 /* This is the per-socket lock. The spinlock provides a synchronization 82 * between user contexts and software interrupt processing, whereas the 83 * mini-semaphore synchronizes multiple users amongst themselves. 84 */ 85 typedef struct { 86 spinlock_t slock; 87 int owned; 88 wait_queue_head_t wq; 89 /* 90 * We express the mutex-alike socket_lock semantics 91 * to the lock validator by explicitly managing 92 * the slock as a lock variant (in addition to 93 * the slock itself): 94 */ 95 #ifdef CONFIG_DEBUG_LOCK_ALLOC 96 struct lockdep_map dep_map; 97 #endif 98 } socket_lock_t; 99 100 struct sock; 101 struct proto; 102 struct net; 103 104 /** 105 * struct sock_common - minimal network layer representation of sockets 106 * @skc_family: network address family 107 * @skc_state: Connection state 108 * @skc_reuse: %SO_REUSEADDR setting 109 * @skc_bound_dev_if: bound device index if != 0 110 * @skc_node: main hash linkage for various protocol lookup tables 111 * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol 112 * @skc_bind_node: bind hash linkage for various protocol lookup tables 113 * @skc_refcnt: reference count 114 * @skc_hash: hash value used with various protocol lookup tables 115 * @skc_prot: protocol handlers inside a network family 116 * @skc_net: reference to the network namespace of this socket 117 * 118 * This is the minimal network layer representation of sockets, the header 119 * for struct sock and struct inet_timewait_sock. 120 */ 121 struct sock_common { 122 unsigned short skc_family; 123 volatile unsigned char skc_state; 124 unsigned char skc_reuse; 125 int skc_bound_dev_if; 126 union { 127 struct hlist_node skc_node; 128 struct hlist_nulls_node skc_nulls_node; 129 }; 130 struct hlist_node skc_bind_node; 131 atomic_t skc_refcnt; 132 unsigned int skc_hash; 133 struct proto *skc_prot; 134 #ifdef CONFIG_NET_NS 135 struct net *skc_net; 136 #endif 137 }; 138 139 /** 140 * struct sock - network layer representation of sockets 141 * @__sk_common: shared layout with inet_timewait_sock 142 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 143 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 144 * @sk_lock: synchronizer 145 * @sk_rcvbuf: size of receive buffer in bytes 146 * @sk_sleep: sock wait queue 147 * @sk_dst_cache: destination cache 148 * @sk_dst_lock: destination cache lock 149 * @sk_policy: flow policy 150 * @sk_rmem_alloc: receive queue bytes committed 151 * @sk_receive_queue: incoming packets 152 * @sk_wmem_alloc: transmit queue bytes committed 153 * @sk_write_queue: Packet sending queue 154 * @sk_async_wait_queue: DMA copied packets 155 * @sk_omem_alloc: "o" is "option" or "other" 156 * @sk_wmem_queued: persistent queue size 157 * @sk_forward_alloc: space allocated forward 158 * @sk_allocation: allocation mode 159 * @sk_sndbuf: size of send buffer in bytes 160 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 161 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 162 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 163 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 164 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 165 * @sk_gso_max_size: Maximum GSO segment size to build 166 * @sk_lingertime: %SO_LINGER l_linger setting 167 * @sk_backlog: always used with the per-socket spinlock held 168 * @sk_callback_lock: used with the callbacks in the end of this struct 169 * @sk_error_queue: rarely used 170 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, 171 * IPV6_ADDRFORM for instance) 172 * @sk_err: last error 173 * @sk_err_soft: errors that don't cause failure but are the cause of a 174 * persistent failure not just 'timed out' 175 * @sk_drops: raw/udp drops counter 176 * @sk_ack_backlog: current listen backlog 177 * @sk_max_ack_backlog: listen backlog set in listen() 178 * @sk_priority: %SO_PRIORITY setting 179 * @sk_type: socket type (%SOCK_STREAM, etc) 180 * @sk_protocol: which protocol this socket belongs in this network family 181 * @sk_peercred: %SO_PEERCRED setting 182 * @sk_rcvlowat: %SO_RCVLOWAT setting 183 * @sk_rcvtimeo: %SO_RCVTIMEO setting 184 * @sk_sndtimeo: %SO_SNDTIMEO setting 185 * @sk_filter: socket filtering instructions 186 * @sk_protinfo: private area, net family specific, when not using slab 187 * @sk_timer: sock cleanup timer 188 * @sk_stamp: time stamp of last packet received 189 * @sk_socket: Identd and reporting IO signals 190 * @sk_user_data: RPC layer private data 191 * @sk_sndmsg_page: cached page for sendmsg 192 * @sk_sndmsg_off: cached offset for sendmsg 193 * @sk_send_head: front of stuff to transmit 194 * @sk_security: used by security modules 195 * @sk_mark: generic packet mark 196 * @sk_write_pending: a write to stream socket waits to start 197 * @sk_state_change: callback to indicate change in the state of the sock 198 * @sk_data_ready: callback to indicate there is data to be processed 199 * @sk_write_space: callback to indicate there is bf sending space available 200 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 201 * @sk_backlog_rcv: callback to process the backlog 202 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 203 */ 204 struct sock { 205 /* 206 * Now struct inet_timewait_sock also uses sock_common, so please just 207 * don't add nothing before this first member (__sk_common) --acme 208 */ 209 struct sock_common __sk_common; 210 #define sk_family __sk_common.skc_family 211 #define sk_state __sk_common.skc_state 212 #define sk_reuse __sk_common.skc_reuse 213 #define sk_bound_dev_if __sk_common.skc_bound_dev_if 214 #define sk_node __sk_common.skc_node 215 #define sk_nulls_node __sk_common.skc_nulls_node 216 #define sk_bind_node __sk_common.skc_bind_node 217 #define sk_refcnt __sk_common.skc_refcnt 218 #define sk_hash __sk_common.skc_hash 219 #define sk_prot __sk_common.skc_prot 220 #define sk_net __sk_common.skc_net 221 kmemcheck_bitfield_begin(flags); 222 unsigned char sk_shutdown : 2, 223 sk_no_check : 2, 224 sk_userlocks : 4; 225 kmemcheck_bitfield_end(flags); 226 unsigned char sk_protocol; 227 unsigned short sk_type; 228 int sk_rcvbuf; 229 socket_lock_t sk_lock; 230 /* 231 * The backlog queue is special, it is always used with 232 * the per-socket spinlock held and requires low latency 233 * access. Therefore we special case it's implementation. 234 */ 235 struct { 236 struct sk_buff *head; 237 struct sk_buff *tail; 238 } sk_backlog; 239 wait_queue_head_t *sk_sleep; 240 struct dst_entry *sk_dst_cache; 241 #ifdef CONFIG_XFRM 242 struct xfrm_policy *sk_policy[2]; 243 #endif 244 rwlock_t sk_dst_lock; 245 atomic_t sk_rmem_alloc; 246 atomic_t sk_wmem_alloc; 247 atomic_t sk_omem_alloc; 248 int sk_sndbuf; 249 struct sk_buff_head sk_receive_queue; 250 struct sk_buff_head sk_write_queue; 251 #ifdef CONFIG_NET_DMA 252 struct sk_buff_head sk_async_wait_queue; 253 #endif 254 int sk_wmem_queued; 255 int sk_forward_alloc; 256 gfp_t sk_allocation; 257 int sk_route_caps; 258 int sk_gso_type; 259 unsigned int sk_gso_max_size; 260 int sk_rcvlowat; 261 unsigned long sk_flags; 262 unsigned long sk_lingertime; 263 struct sk_buff_head sk_error_queue; 264 struct proto *sk_prot_creator; 265 rwlock_t sk_callback_lock; 266 int sk_err, 267 sk_err_soft; 268 atomic_t sk_drops; 269 unsigned short sk_ack_backlog; 270 unsigned short sk_max_ack_backlog; 271 __u32 sk_priority; 272 struct ucred sk_peercred; 273 long sk_rcvtimeo; 274 long sk_sndtimeo; 275 struct sk_filter *sk_filter; 276 void *sk_protinfo; 277 struct timer_list sk_timer; 278 ktime_t sk_stamp; 279 struct socket *sk_socket; 280 void *sk_user_data; 281 struct page *sk_sndmsg_page; 282 struct sk_buff *sk_send_head; 283 __u32 sk_sndmsg_off; 284 int sk_write_pending; 285 #ifdef CONFIG_SECURITY 286 void *sk_security; 287 #endif 288 __u32 sk_mark; 289 /* XXX 4 bytes hole on 64 bit */ 290 void (*sk_state_change)(struct sock *sk); 291 void (*sk_data_ready)(struct sock *sk, int bytes); 292 void (*sk_write_space)(struct sock *sk); 293 void (*sk_error_report)(struct sock *sk); 294 int (*sk_backlog_rcv)(struct sock *sk, 295 struct sk_buff *skb); 296 void (*sk_destruct)(struct sock *sk); 297 }; 298 299 /* 300 * Hashed lists helper routines 301 */ 302 static inline struct sock *__sk_head(const struct hlist_head *head) 303 { 304 return hlist_entry(head->first, struct sock, sk_node); 305 } 306 307 static inline struct sock *sk_head(const struct hlist_head *head) 308 { 309 return hlist_empty(head) ? NULL : __sk_head(head); 310 } 311 312 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) 313 { 314 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); 315 } 316 317 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) 318 { 319 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); 320 } 321 322 static inline struct sock *sk_next(const struct sock *sk) 323 { 324 return sk->sk_node.next ? 325 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 326 } 327 328 static inline struct sock *sk_nulls_next(const struct sock *sk) 329 { 330 return (!is_a_nulls(sk->sk_nulls_node.next)) ? 331 hlist_nulls_entry(sk->sk_nulls_node.next, 332 struct sock, sk_nulls_node) : 333 NULL; 334 } 335 336 static inline int sk_unhashed(const struct sock *sk) 337 { 338 return hlist_unhashed(&sk->sk_node); 339 } 340 341 static inline int sk_hashed(const struct sock *sk) 342 { 343 return !sk_unhashed(sk); 344 } 345 346 static __inline__ void sk_node_init(struct hlist_node *node) 347 { 348 node->pprev = NULL; 349 } 350 351 static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) 352 { 353 node->pprev = NULL; 354 } 355 356 static __inline__ void __sk_del_node(struct sock *sk) 357 { 358 __hlist_del(&sk->sk_node); 359 } 360 361 static __inline__ int __sk_del_node_init(struct sock *sk) 362 { 363 if (sk_hashed(sk)) { 364 __sk_del_node(sk); 365 sk_node_init(&sk->sk_node); 366 return 1; 367 } 368 return 0; 369 } 370 371 /* Grab socket reference count. This operation is valid only 372 when sk is ALREADY grabbed f.e. it is found in hash table 373 or a list and the lookup is made under lock preventing hash table 374 modifications. 375 */ 376 377 static inline void sock_hold(struct sock *sk) 378 { 379 atomic_inc(&sk->sk_refcnt); 380 } 381 382 /* Ungrab socket in the context, which assumes that socket refcnt 383 cannot hit zero, f.e. it is true in context of any socketcall. 384 */ 385 static inline void __sock_put(struct sock *sk) 386 { 387 atomic_dec(&sk->sk_refcnt); 388 } 389 390 static __inline__ int sk_del_node_init(struct sock *sk) 391 { 392 int rc = __sk_del_node_init(sk); 393 394 if (rc) { 395 /* paranoid for a while -acme */ 396 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 397 __sock_put(sk); 398 } 399 return rc; 400 } 401 402 static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) 403 { 404 if (sk_hashed(sk)) { 405 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); 406 return 1; 407 } 408 return 0; 409 } 410 411 static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) 412 { 413 int rc = __sk_nulls_del_node_init_rcu(sk); 414 415 if (rc) { 416 /* paranoid for a while -acme */ 417 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 418 __sock_put(sk); 419 } 420 return rc; 421 } 422 423 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 424 { 425 hlist_add_head(&sk->sk_node, list); 426 } 427 428 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) 429 { 430 sock_hold(sk); 431 __sk_add_node(sk, list); 432 } 433 434 static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 435 { 436 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 437 } 438 439 static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 440 { 441 sock_hold(sk); 442 __sk_nulls_add_node_rcu(sk, list); 443 } 444 445 static __inline__ void __sk_del_bind_node(struct sock *sk) 446 { 447 __hlist_del(&sk->sk_bind_node); 448 } 449 450 static __inline__ void sk_add_bind_node(struct sock *sk, 451 struct hlist_head *list) 452 { 453 hlist_add_head(&sk->sk_bind_node, list); 454 } 455 456 #define sk_for_each(__sk, node, list) \ 457 hlist_for_each_entry(__sk, node, list, sk_node) 458 #define sk_nulls_for_each(__sk, node, list) \ 459 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 460 #define sk_nulls_for_each_rcu(__sk, node, list) \ 461 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 462 #define sk_for_each_from(__sk, node) \ 463 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 464 hlist_for_each_entry_from(__sk, node, sk_node) 465 #define sk_nulls_for_each_from(__sk, node) \ 466 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 467 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 468 #define sk_for_each_continue(__sk, node) \ 469 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 470 hlist_for_each_entry_continue(__sk, node, sk_node) 471 #define sk_for_each_safe(__sk, node, tmp, list) \ 472 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 473 #define sk_for_each_bound(__sk, node, list) \ 474 hlist_for_each_entry(__sk, node, list, sk_bind_node) 475 476 /* Sock flags */ 477 enum sock_flags { 478 SOCK_DEAD, 479 SOCK_DONE, 480 SOCK_URGINLINE, 481 SOCK_KEEPOPEN, 482 SOCK_LINGER, 483 SOCK_DESTROY, 484 SOCK_BROADCAST, 485 SOCK_TIMESTAMP, 486 SOCK_ZAPPED, 487 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 488 SOCK_DBG, /* %SO_DEBUG setting */ 489 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 490 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ 491 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 492 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 493 SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ 494 SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ 495 SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ 496 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ 497 SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */ 498 SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */ 499 SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */ 500 }; 501 502 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 503 { 504 nsk->sk_flags = osk->sk_flags; 505 } 506 507 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 508 { 509 __set_bit(flag, &sk->sk_flags); 510 } 511 512 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 513 { 514 __clear_bit(flag, &sk->sk_flags); 515 } 516 517 static inline int sock_flag(struct sock *sk, enum sock_flags flag) 518 { 519 return test_bit(flag, &sk->sk_flags); 520 } 521 522 static inline void sk_acceptq_removed(struct sock *sk) 523 { 524 sk->sk_ack_backlog--; 525 } 526 527 static inline void sk_acceptq_added(struct sock *sk) 528 { 529 sk->sk_ack_backlog++; 530 } 531 532 static inline int sk_acceptq_is_full(struct sock *sk) 533 { 534 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 535 } 536 537 /* 538 * Compute minimal free write space needed to queue new packets. 539 */ 540 static inline int sk_stream_min_wspace(struct sock *sk) 541 { 542 return sk->sk_wmem_queued >> 1; 543 } 544 545 static inline int sk_stream_wspace(struct sock *sk) 546 { 547 return sk->sk_sndbuf - sk->sk_wmem_queued; 548 } 549 550 extern void sk_stream_write_space(struct sock *sk); 551 552 static inline int sk_stream_memory_free(struct sock *sk) 553 { 554 return sk->sk_wmem_queued < sk->sk_sndbuf; 555 } 556 557 /* The per-socket spinlock must be held here. */ 558 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 559 { 560 if (!sk->sk_backlog.tail) { 561 sk->sk_backlog.head = sk->sk_backlog.tail = skb; 562 } else { 563 sk->sk_backlog.tail->next = skb; 564 sk->sk_backlog.tail = skb; 565 } 566 skb->next = NULL; 567 } 568 569 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 570 { 571 return sk->sk_backlog_rcv(sk, skb); 572 } 573 574 #define sk_wait_event(__sk, __timeo, __condition) \ 575 ({ int __rc; \ 576 release_sock(__sk); \ 577 __rc = __condition; \ 578 if (!__rc) { \ 579 *(__timeo) = schedule_timeout(*(__timeo)); \ 580 } \ 581 lock_sock(__sk); \ 582 __rc = __condition; \ 583 __rc; \ 584 }) 585 586 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 587 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 588 extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 589 extern int sk_stream_error(struct sock *sk, int flags, int err); 590 extern void sk_stream_kill_queues(struct sock *sk); 591 592 extern int sk_wait_data(struct sock *sk, long *timeo); 593 594 struct request_sock_ops; 595 struct timewait_sock_ops; 596 struct inet_hashinfo; 597 struct raw_hashinfo; 598 599 /* Networking protocol blocks we attach to sockets. 600 * socket layer -> transport layer interface 601 * transport -> network interface is defined by struct inet_proto 602 */ 603 struct proto { 604 void (*close)(struct sock *sk, 605 long timeout); 606 int (*connect)(struct sock *sk, 607 struct sockaddr *uaddr, 608 int addr_len); 609 int (*disconnect)(struct sock *sk, int flags); 610 611 struct sock * (*accept) (struct sock *sk, int flags, int *err); 612 613 int (*ioctl)(struct sock *sk, int cmd, 614 unsigned long arg); 615 int (*init)(struct sock *sk); 616 void (*destroy)(struct sock *sk); 617 void (*shutdown)(struct sock *sk, int how); 618 int (*setsockopt)(struct sock *sk, int level, 619 int optname, char __user *optval, 620 int optlen); 621 int (*getsockopt)(struct sock *sk, int level, 622 int optname, char __user *optval, 623 int __user *option); 624 #ifdef CONFIG_COMPAT 625 int (*compat_setsockopt)(struct sock *sk, 626 int level, 627 int optname, char __user *optval, 628 int optlen); 629 int (*compat_getsockopt)(struct sock *sk, 630 int level, 631 int optname, char __user *optval, 632 int __user *option); 633 #endif 634 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 635 struct msghdr *msg, size_t len); 636 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 637 struct msghdr *msg, 638 size_t len, int noblock, int flags, 639 int *addr_len); 640 int (*sendpage)(struct sock *sk, struct page *page, 641 int offset, size_t size, int flags); 642 int (*bind)(struct sock *sk, 643 struct sockaddr *uaddr, int addr_len); 644 645 int (*backlog_rcv) (struct sock *sk, 646 struct sk_buff *skb); 647 648 /* Keeping track of sk's, looking them up, and port selection methods. */ 649 void (*hash)(struct sock *sk); 650 void (*unhash)(struct sock *sk); 651 int (*get_port)(struct sock *sk, unsigned short snum); 652 653 /* Keeping track of sockets in use */ 654 #ifdef CONFIG_PROC_FS 655 unsigned int inuse_idx; 656 #endif 657 658 /* Memory pressure */ 659 void (*enter_memory_pressure)(struct sock *sk); 660 atomic_t *memory_allocated; /* Current allocated memory. */ 661 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 662 /* 663 * Pressure flag: try to collapse. 664 * Technical note: it is used by multiple contexts non atomically. 665 * All the __sk_mem_schedule() is of this nature: accounting 666 * is strict, actions are advisory and have some latency. 667 */ 668 int *memory_pressure; 669 int *sysctl_mem; 670 int *sysctl_wmem; 671 int *sysctl_rmem; 672 int max_header; 673 674 struct kmem_cache *slab; 675 unsigned int obj_size; 676 int slab_flags; 677 678 struct percpu_counter *orphan_count; 679 680 struct request_sock_ops *rsk_prot; 681 struct timewait_sock_ops *twsk_prot; 682 683 union { 684 struct inet_hashinfo *hashinfo; 685 struct udp_table *udp_table; 686 struct raw_hashinfo *raw_hash; 687 } h; 688 689 struct module *owner; 690 691 char name[32]; 692 693 struct list_head node; 694 #ifdef SOCK_REFCNT_DEBUG 695 atomic_t socks; 696 #endif 697 }; 698 699 extern int proto_register(struct proto *prot, int alloc_slab); 700 extern void proto_unregister(struct proto *prot); 701 702 #ifdef SOCK_REFCNT_DEBUG 703 static inline void sk_refcnt_debug_inc(struct sock *sk) 704 { 705 atomic_inc(&sk->sk_prot->socks); 706 } 707 708 static inline void sk_refcnt_debug_dec(struct sock *sk) 709 { 710 atomic_dec(&sk->sk_prot->socks); 711 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 712 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 713 } 714 715 static inline void sk_refcnt_debug_release(const struct sock *sk) 716 { 717 if (atomic_read(&sk->sk_refcnt) != 1) 718 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 719 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); 720 } 721 #else /* SOCK_REFCNT_DEBUG */ 722 #define sk_refcnt_debug_inc(sk) do { } while (0) 723 #define sk_refcnt_debug_dec(sk) do { } while (0) 724 #define sk_refcnt_debug_release(sk) do { } while (0) 725 #endif /* SOCK_REFCNT_DEBUG */ 726 727 728 #ifdef CONFIG_PROC_FS 729 /* Called with local bh disabled */ 730 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 731 extern int sock_prot_inuse_get(struct net *net, struct proto *proto); 732 #else 733 static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, 734 int inc) 735 { 736 } 737 #endif 738 739 740 /* With per-bucket locks this operation is not-atomic, so that 741 * this version is not worse. 742 */ 743 static inline void __sk_prot_rehash(struct sock *sk) 744 { 745 sk->sk_prot->unhash(sk); 746 sk->sk_prot->hash(sk); 747 } 748 749 /* About 10 seconds */ 750 #define SOCK_DESTROY_TIME (10*HZ) 751 752 /* Sockets 0-1023 can't be bound to unless you are superuser */ 753 #define PROT_SOCK 1024 754 755 #define SHUTDOWN_MASK 3 756 #define RCV_SHUTDOWN 1 757 #define SEND_SHUTDOWN 2 758 759 #define SOCK_SNDBUF_LOCK 1 760 #define SOCK_RCVBUF_LOCK 2 761 #define SOCK_BINDADDR_LOCK 4 762 #define SOCK_BINDPORT_LOCK 8 763 764 /* sock_iocb: used to kick off async processing of socket ios */ 765 struct sock_iocb { 766 struct list_head list; 767 768 int flags; 769 int size; 770 struct socket *sock; 771 struct sock *sk; 772 struct scm_cookie *scm; 773 struct msghdr *msg, async_msg; 774 struct kiocb *kiocb; 775 }; 776 777 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) 778 { 779 return (struct sock_iocb *)iocb->private; 780 } 781 782 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si) 783 { 784 return si->kiocb; 785 } 786 787 struct socket_alloc { 788 struct socket socket; 789 struct inode vfs_inode; 790 }; 791 792 static inline struct socket *SOCKET_I(struct inode *inode) 793 { 794 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 795 } 796 797 static inline struct inode *SOCK_INODE(struct socket *socket) 798 { 799 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 800 } 801 802 /* 803 * Functions for memory accounting 804 */ 805 extern int __sk_mem_schedule(struct sock *sk, int size, int kind); 806 extern void __sk_mem_reclaim(struct sock *sk); 807 808 #define SK_MEM_QUANTUM ((int)PAGE_SIZE) 809 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 810 #define SK_MEM_SEND 0 811 #define SK_MEM_RECV 1 812 813 static inline int sk_mem_pages(int amt) 814 { 815 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; 816 } 817 818 static inline int sk_has_account(struct sock *sk) 819 { 820 /* return true if protocol supports memory accounting */ 821 return !!sk->sk_prot->memory_allocated; 822 } 823 824 static inline int sk_wmem_schedule(struct sock *sk, int size) 825 { 826 if (!sk_has_account(sk)) 827 return 1; 828 return size <= sk->sk_forward_alloc || 829 __sk_mem_schedule(sk, size, SK_MEM_SEND); 830 } 831 832 static inline int sk_rmem_schedule(struct sock *sk, int size) 833 { 834 if (!sk_has_account(sk)) 835 return 1; 836 return size <= sk->sk_forward_alloc || 837 __sk_mem_schedule(sk, size, SK_MEM_RECV); 838 } 839 840 static inline void sk_mem_reclaim(struct sock *sk) 841 { 842 if (!sk_has_account(sk)) 843 return; 844 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) 845 __sk_mem_reclaim(sk); 846 } 847 848 static inline void sk_mem_reclaim_partial(struct sock *sk) 849 { 850 if (!sk_has_account(sk)) 851 return; 852 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) 853 __sk_mem_reclaim(sk); 854 } 855 856 static inline void sk_mem_charge(struct sock *sk, int size) 857 { 858 if (!sk_has_account(sk)) 859 return; 860 sk->sk_forward_alloc -= size; 861 } 862 863 static inline void sk_mem_uncharge(struct sock *sk, int size) 864 { 865 if (!sk_has_account(sk)) 866 return; 867 sk->sk_forward_alloc += size; 868 } 869 870 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 871 { 872 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 873 sk->sk_wmem_queued -= skb->truesize; 874 sk_mem_uncharge(sk, skb->truesize); 875 __kfree_skb(skb); 876 } 877 878 /* Used by processes to "lock" a socket state, so that 879 * interrupts and bottom half handlers won't change it 880 * from under us. It essentially blocks any incoming 881 * packets, so that we won't get any new data or any 882 * packets that change the state of the socket. 883 * 884 * While locked, BH processing will add new packets to 885 * the backlog queue. This queue is processed by the 886 * owner of the socket lock right before it is released. 887 * 888 * Since ~2.3.5 it is also exclusive sleep lock serializing 889 * accesses from user process context. 890 */ 891 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) 892 893 /* 894 * Macro so as to not evaluate some arguments when 895 * lockdep is not enabled. 896 * 897 * Mark both the sk_lock and the sk_lock.slock as a 898 * per-address-family lock class. 899 */ 900 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 901 do { \ 902 sk->sk_lock.owned = 0; \ 903 init_waitqueue_head(&sk->sk_lock.wq); \ 904 spin_lock_init(&(sk)->sk_lock.slock); \ 905 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 906 sizeof((sk)->sk_lock)); \ 907 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 908 (skey), (sname)); \ 909 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 910 } while (0) 911 912 extern void lock_sock_nested(struct sock *sk, int subclass); 913 914 static inline void lock_sock(struct sock *sk) 915 { 916 lock_sock_nested(sk, 0); 917 } 918 919 extern void release_sock(struct sock *sk); 920 921 /* BH context may only use the following locking interface. */ 922 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 923 #define bh_lock_sock_nested(__sk) \ 924 spin_lock_nested(&((__sk)->sk_lock.slock), \ 925 SINGLE_DEPTH_NESTING) 926 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 927 928 extern struct sock *sk_alloc(struct net *net, int family, 929 gfp_t priority, 930 struct proto *prot); 931 extern void sk_free(struct sock *sk); 932 extern void sk_release_kernel(struct sock *sk); 933 extern struct sock *sk_clone(const struct sock *sk, 934 const gfp_t priority); 935 936 extern struct sk_buff *sock_wmalloc(struct sock *sk, 937 unsigned long size, int force, 938 gfp_t priority); 939 extern struct sk_buff *sock_rmalloc(struct sock *sk, 940 unsigned long size, int force, 941 gfp_t priority); 942 extern void sock_wfree(struct sk_buff *skb); 943 extern void sock_rfree(struct sk_buff *skb); 944 945 extern int sock_setsockopt(struct socket *sock, int level, 946 int op, char __user *optval, 947 int optlen); 948 949 extern int sock_getsockopt(struct socket *sock, int level, 950 int op, char __user *optval, 951 int __user *optlen); 952 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 953 unsigned long size, 954 int noblock, 955 int *errcode); 956 extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, 957 unsigned long header_len, 958 unsigned long data_len, 959 int noblock, 960 int *errcode); 961 extern void *sock_kmalloc(struct sock *sk, int size, 962 gfp_t priority); 963 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 964 extern void sk_send_sigurg(struct sock *sk); 965 966 /* 967 * Functions to fill in entries in struct proto_ops when a protocol 968 * does not implement a particular function. 969 */ 970 extern int sock_no_bind(struct socket *, 971 struct sockaddr *, int); 972 extern int sock_no_connect(struct socket *, 973 struct sockaddr *, int, int); 974 extern int sock_no_socketpair(struct socket *, 975 struct socket *); 976 extern int sock_no_accept(struct socket *, 977 struct socket *, int); 978 extern int sock_no_getname(struct socket *, 979 struct sockaddr *, int *, int); 980 extern unsigned int sock_no_poll(struct file *, struct socket *, 981 struct poll_table_struct *); 982 extern int sock_no_ioctl(struct socket *, unsigned int, 983 unsigned long); 984 extern int sock_no_listen(struct socket *, int); 985 extern int sock_no_shutdown(struct socket *, int); 986 extern int sock_no_getsockopt(struct socket *, int , int, 987 char __user *, int __user *); 988 extern int sock_no_setsockopt(struct socket *, int, int, 989 char __user *, int); 990 extern int sock_no_sendmsg(struct kiocb *, struct socket *, 991 struct msghdr *, size_t); 992 extern int sock_no_recvmsg(struct kiocb *, struct socket *, 993 struct msghdr *, size_t, int); 994 extern int sock_no_mmap(struct file *file, 995 struct socket *sock, 996 struct vm_area_struct *vma); 997 extern ssize_t sock_no_sendpage(struct socket *sock, 998 struct page *page, 999 int offset, size_t size, 1000 int flags); 1001 1002 /* 1003 * Functions to fill in entries in struct proto_ops when a protocol 1004 * uses the inet style. 1005 */ 1006 extern int sock_common_getsockopt(struct socket *sock, int level, int optname, 1007 char __user *optval, int __user *optlen); 1008 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 1009 struct msghdr *msg, size_t size, int flags); 1010 extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 1011 char __user *optval, int optlen); 1012 extern int compat_sock_common_getsockopt(struct socket *sock, int level, 1013 int optname, char __user *optval, int __user *optlen); 1014 extern int compat_sock_common_setsockopt(struct socket *sock, int level, 1015 int optname, char __user *optval, int optlen); 1016 1017 extern void sk_common_release(struct sock *sk); 1018 1019 /* 1020 * Default socket callbacks and setup code 1021 */ 1022 1023 /* Initialise core socket variables */ 1024 extern void sock_init_data(struct socket *sock, struct sock *sk); 1025 1026 /** 1027 * sk_filter_release: Release a socket filter 1028 * @fp: filter to remove 1029 * 1030 * Remove a filter from a socket and release its resources. 1031 */ 1032 1033 static inline void sk_filter_release(struct sk_filter *fp) 1034 { 1035 if (atomic_dec_and_test(&fp->refcnt)) 1036 kfree(fp); 1037 } 1038 1039 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1040 { 1041 unsigned int size = sk_filter_len(fp); 1042 1043 atomic_sub(size, &sk->sk_omem_alloc); 1044 sk_filter_release(fp); 1045 } 1046 1047 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1048 { 1049 atomic_inc(&fp->refcnt); 1050 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 1051 } 1052 1053 /* 1054 * Socket reference counting postulates. 1055 * 1056 * * Each user of socket SHOULD hold a reference count. 1057 * * Each access point to socket (an hash table bucket, reference from a list, 1058 * running timer, skb in flight MUST hold a reference count. 1059 * * When reference count hits 0, it means it will never increase back. 1060 * * When reference count hits 0, it means that no references from 1061 * outside exist to this socket and current process on current CPU 1062 * is last user and may/should destroy this socket. 1063 * * sk_free is called from any context: process, BH, IRQ. When 1064 * it is called, socket has no references from outside -> sk_free 1065 * may release descendant resources allocated by the socket, but 1066 * to the time when it is called, socket is NOT referenced by any 1067 * hash tables, lists etc. 1068 * * Packets, delivered from outside (from network or from another process) 1069 * and enqueued on receive/error queues SHOULD NOT grab reference count, 1070 * when they sit in queue. Otherwise, packets will leak to hole, when 1071 * socket is looked up by one cpu and unhasing is made by another CPU. 1072 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 1073 * (leak to backlog). Packet socket does all the processing inside 1074 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 1075 * use separate SMP lock, so that they are prone too. 1076 */ 1077 1078 /* Ungrab socket and destroy it, if it was the last reference. */ 1079 static inline void sock_put(struct sock *sk) 1080 { 1081 if (atomic_dec_and_test(&sk->sk_refcnt)) 1082 sk_free(sk); 1083 } 1084 1085 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1086 const int nested); 1087 1088 static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1089 { 1090 sk->sk_socket = sock; 1091 } 1092 1093 /* Detach socket from process context. 1094 * Announce socket dead, detach it from wait queue and inode. 1095 * Note that parent inode held reference count on this struct sock, 1096 * we do not release it in this function, because protocol 1097 * probably wants some additional cleanups or even continuing 1098 * to work with this socket (TCP). 1099 */ 1100 static inline void sock_orphan(struct sock *sk) 1101 { 1102 write_lock_bh(&sk->sk_callback_lock); 1103 sock_set_flag(sk, SOCK_DEAD); 1104 sk_set_socket(sk, NULL); 1105 sk->sk_sleep = NULL; 1106 write_unlock_bh(&sk->sk_callback_lock); 1107 } 1108 1109 static inline void sock_graft(struct sock *sk, struct socket *parent) 1110 { 1111 write_lock_bh(&sk->sk_callback_lock); 1112 sk->sk_sleep = &parent->wait; 1113 parent->sk = sk; 1114 sk_set_socket(sk, parent); 1115 security_sock_graft(sk, parent); 1116 write_unlock_bh(&sk->sk_callback_lock); 1117 } 1118 1119 extern int sock_i_uid(struct sock *sk); 1120 extern unsigned long sock_i_ino(struct sock *sk); 1121 1122 static inline struct dst_entry * 1123 __sk_dst_get(struct sock *sk) 1124 { 1125 return sk->sk_dst_cache; 1126 } 1127 1128 static inline struct dst_entry * 1129 sk_dst_get(struct sock *sk) 1130 { 1131 struct dst_entry *dst; 1132 1133 read_lock(&sk->sk_dst_lock); 1134 dst = sk->sk_dst_cache; 1135 if (dst) 1136 dst_hold(dst); 1137 read_unlock(&sk->sk_dst_lock); 1138 return dst; 1139 } 1140 1141 static inline void 1142 __sk_dst_set(struct sock *sk, struct dst_entry *dst) 1143 { 1144 struct dst_entry *old_dst; 1145 1146 old_dst = sk->sk_dst_cache; 1147 sk->sk_dst_cache = dst; 1148 dst_release(old_dst); 1149 } 1150 1151 static inline void 1152 sk_dst_set(struct sock *sk, struct dst_entry *dst) 1153 { 1154 write_lock(&sk->sk_dst_lock); 1155 __sk_dst_set(sk, dst); 1156 write_unlock(&sk->sk_dst_lock); 1157 } 1158 1159 static inline void 1160 __sk_dst_reset(struct sock *sk) 1161 { 1162 struct dst_entry *old_dst; 1163 1164 old_dst = sk->sk_dst_cache; 1165 sk->sk_dst_cache = NULL; 1166 dst_release(old_dst); 1167 } 1168 1169 static inline void 1170 sk_dst_reset(struct sock *sk) 1171 { 1172 write_lock(&sk->sk_dst_lock); 1173 __sk_dst_reset(sk); 1174 write_unlock(&sk->sk_dst_lock); 1175 } 1176 1177 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1178 1179 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1180 1181 static inline int sk_can_gso(const struct sock *sk) 1182 { 1183 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1184 } 1185 1186 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1187 1188 static inline int skb_copy_to_page(struct sock *sk, char __user *from, 1189 struct sk_buff *skb, struct page *page, 1190 int off, int copy) 1191 { 1192 if (skb->ip_summed == CHECKSUM_NONE) { 1193 int err = 0; 1194 __wsum csum = csum_and_copy_from_user(from, 1195 page_address(page) + off, 1196 copy, 0, &err); 1197 if (err) 1198 return err; 1199 skb->csum = csum_block_add(skb->csum, csum, skb->len); 1200 } else if (copy_from_user(page_address(page) + off, from, copy)) 1201 return -EFAULT; 1202 1203 skb->len += copy; 1204 skb->data_len += copy; 1205 skb->truesize += copy; 1206 sk->sk_wmem_queued += copy; 1207 sk_mem_charge(sk, copy); 1208 return 0; 1209 } 1210 1211 /* 1212 * Queue a received datagram if it will fit. Stream and sequenced 1213 * protocols can't normally use this as they need to fit buffers in 1214 * and play with them. 1215 * 1216 * Inlined as it's very short and called for pretty much every 1217 * packet ever received. 1218 */ 1219 1220 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1221 { 1222 skb->sk = sk; 1223 skb->destructor = sock_wfree; 1224 /* 1225 * We used to take a refcount on sk, but following operation 1226 * is enough to guarantee sk_free() wont free this sock until 1227 * all in-flight packets are completed 1228 */ 1229 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1230 } 1231 1232 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1233 { 1234 skb->sk = sk; 1235 skb->destructor = sock_rfree; 1236 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1237 sk_mem_charge(sk, skb->truesize); 1238 } 1239 1240 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1241 unsigned long expires); 1242 1243 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1244 1245 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1246 1247 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1248 { 1249 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1250 number of warnings when compiling with -W --ANK 1251 */ 1252 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1253 (unsigned)sk->sk_rcvbuf) 1254 return -ENOMEM; 1255 skb_set_owner_r(skb, sk); 1256 skb_queue_tail(&sk->sk_error_queue, skb); 1257 if (!sock_flag(sk, SOCK_DEAD)) 1258 sk->sk_data_ready(sk, skb->len); 1259 return 0; 1260 } 1261 1262 /* 1263 * Recover an error report and clear atomically 1264 */ 1265 1266 static inline int sock_error(struct sock *sk) 1267 { 1268 int err; 1269 if (likely(!sk->sk_err)) 1270 return 0; 1271 err = xchg(&sk->sk_err, 0); 1272 return -err; 1273 } 1274 1275 static inline unsigned long sock_wspace(struct sock *sk) 1276 { 1277 int amt = 0; 1278 1279 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1280 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1281 if (amt < 0) 1282 amt = 0; 1283 } 1284 return amt; 1285 } 1286 1287 static inline void sk_wake_async(struct sock *sk, int how, int band) 1288 { 1289 if (sk->sk_socket && sk->sk_socket->fasync_list) 1290 sock_wake_async(sk->sk_socket, how, band); 1291 } 1292 1293 #define SOCK_MIN_SNDBUF 2048 1294 #define SOCK_MIN_RCVBUF 256 1295 1296 static inline void sk_stream_moderate_sndbuf(struct sock *sk) 1297 { 1298 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 1299 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); 1300 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); 1301 } 1302 } 1303 1304 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); 1305 1306 static inline struct page *sk_stream_alloc_page(struct sock *sk) 1307 { 1308 struct page *page = NULL; 1309 1310 page = alloc_pages(sk->sk_allocation, 0); 1311 if (!page) { 1312 sk->sk_prot->enter_memory_pressure(sk); 1313 sk_stream_moderate_sndbuf(sk); 1314 } 1315 return page; 1316 } 1317 1318 /* 1319 * Default write policy as shown to user space via poll/select/SIGIO 1320 */ 1321 static inline int sock_writeable(const struct sock *sk) 1322 { 1323 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 1324 } 1325 1326 static inline gfp_t gfp_any(void) 1327 { 1328 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1329 } 1330 1331 static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1332 { 1333 return noblock ? 0 : sk->sk_rcvtimeo; 1334 } 1335 1336 static inline long sock_sndtimeo(const struct sock *sk, int noblock) 1337 { 1338 return noblock ? 0 : sk->sk_sndtimeo; 1339 } 1340 1341 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 1342 { 1343 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 1344 } 1345 1346 /* Alas, with timeout socket operations are not restartable. 1347 * Compare this to poll(). 1348 */ 1349 static inline int sock_intr_errno(long timeo) 1350 { 1351 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 1352 } 1353 1354 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 1355 struct sk_buff *skb); 1356 1357 static __inline__ void 1358 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1359 { 1360 ktime_t kt = skb->tstamp; 1361 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 1362 1363 /* 1364 * generate control messages if 1365 * - receive time stamping in software requested (SOCK_RCVTSTAMP 1366 * or SOCK_TIMESTAMPING_RX_SOFTWARE) 1367 * - software time stamp available and wanted 1368 * (SOCK_TIMESTAMPING_SOFTWARE) 1369 * - hardware time stamps available and wanted 1370 * (SOCK_TIMESTAMPING_SYS_HARDWARE or 1371 * SOCK_TIMESTAMPING_RAW_HARDWARE) 1372 */ 1373 if (sock_flag(sk, SOCK_RCVTSTAMP) || 1374 sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) || 1375 (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) || 1376 (hwtstamps->hwtstamp.tv64 && 1377 sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) || 1378 (hwtstamps->syststamp.tv64 && 1379 sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))) 1380 __sock_recv_timestamp(msg, sk, skb); 1381 else 1382 sk->sk_stamp = kt; 1383 } 1384 1385 /** 1386 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 1387 * @msg: outgoing packet 1388 * @sk: socket sending this packet 1389 * @shtx: filled with instructions for time stamping 1390 * 1391 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if 1392 * parameters are invalid. 1393 */ 1394 extern int sock_tx_timestamp(struct msghdr *msg, 1395 struct sock *sk, 1396 union skb_shared_tx *shtx); 1397 1398 1399 /** 1400 * sk_eat_skb - Release a skb if it is no longer needed 1401 * @sk: socket to eat this skb from 1402 * @skb: socket buffer to eat 1403 * @copied_early: flag indicating whether DMA operations copied this data early 1404 * 1405 * This routine must be called with interrupts disabled or with the socket 1406 * locked so that the sk_buff queue operation is ok. 1407 */ 1408 #ifdef CONFIG_NET_DMA 1409 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 1410 { 1411 __skb_unlink(skb, &sk->sk_receive_queue); 1412 if (!copied_early) 1413 __kfree_skb(skb); 1414 else 1415 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 1416 } 1417 #else 1418 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 1419 { 1420 __skb_unlink(skb, &sk->sk_receive_queue); 1421 __kfree_skb(skb); 1422 } 1423 #endif 1424 1425 static inline 1426 struct net *sock_net(const struct sock *sk) 1427 { 1428 #ifdef CONFIG_NET_NS 1429 return sk->sk_net; 1430 #else 1431 return &init_net; 1432 #endif 1433 } 1434 1435 static inline 1436 void sock_net_set(struct sock *sk, struct net *net) 1437 { 1438 #ifdef CONFIG_NET_NS 1439 sk->sk_net = net; 1440 #endif 1441 } 1442 1443 /* 1444 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace. 1445 * They should not hold a referrence to a namespace in order to allow 1446 * to stop it. 1447 * Sockets after sk_change_net should be released using sk_release_kernel 1448 */ 1449 static inline void sk_change_net(struct sock *sk, struct net *net) 1450 { 1451 put_net(sock_net(sk)); 1452 sock_net_set(sk, hold_net(net)); 1453 } 1454 1455 static inline struct sock *skb_steal_sock(struct sk_buff *skb) 1456 { 1457 if (unlikely(skb->sk)) { 1458 struct sock *sk = skb->sk; 1459 1460 skb->destructor = NULL; 1461 skb->sk = NULL; 1462 return sk; 1463 } 1464 return NULL; 1465 } 1466 1467 extern void sock_enable_timestamp(struct sock *sk, int flag); 1468 extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1469 extern int sock_get_timestampns(struct sock *, struct timespec __user *); 1470 1471 /* 1472 * Enable debug/info messages 1473 */ 1474 extern int net_msg_warn; 1475 #define NETDEBUG(fmt, args...) \ 1476 do { if (net_msg_warn) printk(fmt,##args); } while (0) 1477 1478 #define LIMIT_NETDEBUG(fmt, args...) \ 1479 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) 1480 1481 extern __u32 sysctl_wmem_max; 1482 extern __u32 sysctl_rmem_max; 1483 1484 extern void sk_init(void); 1485 1486 extern int sysctl_optmem_max; 1487 1488 extern __u32 sysctl_wmem_default; 1489 extern __u32 sysctl_rmem_default; 1490 1491 #endif /* _SOCK_H */ 1492