1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40 #ifndef _SOCK_H 41 #define _SOCK_H 42 43 #include <linux/kernel.h> 44 #include <linux/list.h> 45 #include <linux/list_nulls.h> 46 #include <linux/timer.h> 47 #include <linux/cache.h> 48 #include <linux/module.h> 49 #include <linux/lockdep.h> 50 #include <linux/netdevice.h> 51 #include <linux/skbuff.h> /* struct sk_buff */ 52 #include <linux/mm.h> 53 #include <linux/security.h> 54 #include <linux/slab.h> 55 56 #include <linux/filter.h> 57 #include <linux/rculist_nulls.h> 58 #include <linux/poll.h> 59 60 #include <asm/atomic.h> 61 #include <net/dst.h> 62 #include <net/checksum.h> 63 64 /* 65 * This structure really needs to be cleaned up. 66 * Most of it is for TCP, and not used by any of 67 * the other protocols. 68 */ 69 70 /* Define this to get the SOCK_DBG debugging facility. */ 71 #define SOCK_DEBUGGING 72 #ifdef SOCK_DEBUGGING 73 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 74 printk(KERN_DEBUG msg); } while (0) 75 #else 76 /* Validate arguments and do nothing */ 77 static inline void __attribute__ ((format (printf, 2, 3))) 78 SOCK_DEBUG(struct sock *sk, const char *msg, ...) 79 { 80 } 81 #endif 82 83 /* This is the per-socket lock. The spinlock provides a synchronization 84 * between user contexts and software interrupt processing, whereas the 85 * mini-semaphore synchronizes multiple users amongst themselves. 86 */ 87 typedef struct { 88 spinlock_t slock; 89 int owned; 90 wait_queue_head_t wq; 91 /* 92 * We express the mutex-alike socket_lock semantics 93 * to the lock validator by explicitly managing 94 * the slock as a lock variant (in addition to 95 * the slock itself): 96 */ 97 #ifdef CONFIG_DEBUG_LOCK_ALLOC 98 struct lockdep_map dep_map; 99 #endif 100 } socket_lock_t; 101 102 struct sock; 103 struct proto; 104 struct net; 105 106 /** 107 * struct sock_common - minimal network layer representation of sockets 108 * @skc_node: main hash linkage for various protocol lookup tables 109 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 110 * @skc_refcnt: reference count 111 * @skc_tx_queue_mapping: tx queue number for this connection 112 * @skc_hash: hash value used with various protocol lookup tables 113 * @skc_u16hashes: two u16 hash values used by UDP lookup tables 114 * @skc_family: network address family 115 * @skc_state: Connection state 116 * @skc_reuse: %SO_REUSEADDR setting 117 * @skc_bound_dev_if: bound device index if != 0 118 * @skc_bind_node: bind hash linkage for various protocol lookup tables 119 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol 120 * @skc_prot: protocol handlers inside a network family 121 * @skc_net: reference to the network namespace of this socket 122 * 123 * This is the minimal network layer representation of sockets, the header 124 * for struct sock and struct inet_timewait_sock. 125 */ 126 struct sock_common { 127 /* 128 * first fields are not copied in sock_copy() 129 */ 130 union { 131 struct hlist_node skc_node; 132 struct hlist_nulls_node skc_nulls_node; 133 }; 134 atomic_t skc_refcnt; 135 int skc_tx_queue_mapping; 136 137 union { 138 unsigned int skc_hash; 139 __u16 skc_u16hashes[2]; 140 }; 141 unsigned short skc_family; 142 volatile unsigned char skc_state; 143 unsigned char skc_reuse; 144 int skc_bound_dev_if; 145 union { 146 struct hlist_node skc_bind_node; 147 struct hlist_nulls_node skc_portaddr_node; 148 }; 149 struct proto *skc_prot; 150 #ifdef CONFIG_NET_NS 151 struct net *skc_net; 152 #endif 153 }; 154 155 /** 156 * struct sock - network layer representation of sockets 157 * @__sk_common: shared layout with inet_timewait_sock 158 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 159 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 160 * @sk_lock: synchronizer 161 * @sk_rcvbuf: size of receive buffer in bytes 162 * @sk_wq: sock wait queue and async head 163 * @sk_dst_cache: destination cache 164 * @sk_dst_lock: destination cache lock 165 * @sk_policy: flow policy 166 * @sk_rmem_alloc: receive queue bytes committed 167 * @sk_receive_queue: incoming packets 168 * @sk_wmem_alloc: transmit queue bytes committed 169 * @sk_write_queue: Packet sending queue 170 * @sk_async_wait_queue: DMA copied packets 171 * @sk_omem_alloc: "o" is "option" or "other" 172 * @sk_wmem_queued: persistent queue size 173 * @sk_forward_alloc: space allocated forward 174 * @sk_allocation: allocation mode 175 * @sk_sndbuf: size of send buffer in bytes 176 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 177 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 178 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 179 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 180 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) 181 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 182 * @sk_gso_max_size: Maximum GSO segment size to build 183 * @sk_lingertime: %SO_LINGER l_linger setting 184 * @sk_backlog: always used with the per-socket spinlock held 185 * @sk_callback_lock: used with the callbacks in the end of this struct 186 * @sk_error_queue: rarely used 187 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, 188 * IPV6_ADDRFORM for instance) 189 * @sk_err: last error 190 * @sk_err_soft: errors that don't cause failure but are the cause of a 191 * persistent failure not just 'timed out' 192 * @sk_drops: raw/udp drops counter 193 * @sk_ack_backlog: current listen backlog 194 * @sk_max_ack_backlog: listen backlog set in listen() 195 * @sk_priority: %SO_PRIORITY setting 196 * @sk_type: socket type (%SOCK_STREAM, etc) 197 * @sk_protocol: which protocol this socket belongs in this network family 198 * @sk_peer_pid: &struct pid for this socket's peer 199 * @sk_peer_cred: %SO_PEERCRED setting 200 * @sk_rcvlowat: %SO_RCVLOWAT setting 201 * @sk_rcvtimeo: %SO_RCVTIMEO setting 202 * @sk_sndtimeo: %SO_SNDTIMEO setting 203 * @sk_rxhash: flow hash received from netif layer 204 * @sk_filter: socket filtering instructions 205 * @sk_protinfo: private area, net family specific, when not using slab 206 * @sk_timer: sock cleanup timer 207 * @sk_stamp: time stamp of last packet received 208 * @sk_socket: Identd and reporting IO signals 209 * @sk_user_data: RPC layer private data 210 * @sk_sndmsg_page: cached page for sendmsg 211 * @sk_sndmsg_off: cached offset for sendmsg 212 * @sk_send_head: front of stuff to transmit 213 * @sk_security: used by security modules 214 * @sk_mark: generic packet mark 215 * @sk_classid: this socket's cgroup classid 216 * @sk_write_pending: a write to stream socket waits to start 217 * @sk_state_change: callback to indicate change in the state of the sock 218 * @sk_data_ready: callback to indicate there is data to be processed 219 * @sk_write_space: callback to indicate there is bf sending space available 220 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 221 * @sk_backlog_rcv: callback to process the backlog 222 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 223 */ 224 struct sock { 225 /* 226 * Now struct inet_timewait_sock also uses sock_common, so please just 227 * don't add nothing before this first member (__sk_common) --acme 228 */ 229 struct sock_common __sk_common; 230 #define sk_node __sk_common.skc_node 231 #define sk_nulls_node __sk_common.skc_nulls_node 232 #define sk_refcnt __sk_common.skc_refcnt 233 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping 234 235 #define sk_copy_start __sk_common.skc_hash 236 #define sk_hash __sk_common.skc_hash 237 #define sk_family __sk_common.skc_family 238 #define sk_state __sk_common.skc_state 239 #define sk_reuse __sk_common.skc_reuse 240 #define sk_bound_dev_if __sk_common.skc_bound_dev_if 241 #define sk_bind_node __sk_common.skc_bind_node 242 #define sk_prot __sk_common.skc_prot 243 #define sk_net __sk_common.skc_net 244 kmemcheck_bitfield_begin(flags); 245 unsigned int sk_shutdown : 2, 246 sk_no_check : 2, 247 sk_userlocks : 4, 248 sk_protocol : 8, 249 sk_type : 16; 250 kmemcheck_bitfield_end(flags); 251 int sk_rcvbuf; 252 socket_lock_t sk_lock; 253 /* 254 * The backlog queue is special, it is always used with 255 * the per-socket spinlock held and requires low latency 256 * access. Therefore we special case it's implementation. 257 */ 258 struct { 259 struct sk_buff *head; 260 struct sk_buff *tail; 261 int len; 262 } sk_backlog; 263 struct socket_wq *sk_wq; 264 struct dst_entry *sk_dst_cache; 265 #ifdef CONFIG_XFRM 266 struct xfrm_policy *sk_policy[2]; 267 #endif 268 spinlock_t sk_dst_lock; 269 atomic_t sk_rmem_alloc; 270 atomic_t sk_wmem_alloc; 271 atomic_t sk_omem_alloc; 272 int sk_sndbuf; 273 struct sk_buff_head sk_receive_queue; 274 struct sk_buff_head sk_write_queue; 275 #ifdef CONFIG_NET_DMA 276 struct sk_buff_head sk_async_wait_queue; 277 #endif 278 int sk_wmem_queued; 279 int sk_forward_alloc; 280 gfp_t sk_allocation; 281 int sk_route_caps; 282 int sk_route_nocaps; 283 int sk_gso_type; 284 unsigned int sk_gso_max_size; 285 int sk_rcvlowat; 286 #ifdef CONFIG_RPS 287 __u32 sk_rxhash; 288 #endif 289 unsigned long sk_flags; 290 unsigned long sk_lingertime; 291 struct sk_buff_head sk_error_queue; 292 struct proto *sk_prot_creator; 293 rwlock_t sk_callback_lock; 294 int sk_err, 295 sk_err_soft; 296 atomic_t sk_drops; 297 unsigned short sk_ack_backlog; 298 unsigned short sk_max_ack_backlog; 299 __u32 sk_priority; 300 struct pid *sk_peer_pid; 301 const struct cred *sk_peer_cred; 302 long sk_rcvtimeo; 303 long sk_sndtimeo; 304 struct sk_filter *sk_filter; 305 void *sk_protinfo; 306 struct timer_list sk_timer; 307 ktime_t sk_stamp; 308 struct socket *sk_socket; 309 void *sk_user_data; 310 struct page *sk_sndmsg_page; 311 struct sk_buff *sk_send_head; 312 __u32 sk_sndmsg_off; 313 int sk_write_pending; 314 #ifdef CONFIG_SECURITY 315 void *sk_security; 316 #endif 317 __u32 sk_mark; 318 u32 sk_classid; 319 void (*sk_state_change)(struct sock *sk); 320 void (*sk_data_ready)(struct sock *sk, int bytes); 321 void (*sk_write_space)(struct sock *sk); 322 void (*sk_error_report)(struct sock *sk); 323 int (*sk_backlog_rcv)(struct sock *sk, 324 struct sk_buff *skb); 325 void (*sk_destruct)(struct sock *sk); 326 }; 327 328 /* 329 * Hashed lists helper routines 330 */ 331 static inline struct sock *sk_entry(const struct hlist_node *node) 332 { 333 return hlist_entry(node, struct sock, sk_node); 334 } 335 336 static inline struct sock *__sk_head(const struct hlist_head *head) 337 { 338 return hlist_entry(head->first, struct sock, sk_node); 339 } 340 341 static inline struct sock *sk_head(const struct hlist_head *head) 342 { 343 return hlist_empty(head) ? NULL : __sk_head(head); 344 } 345 346 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) 347 { 348 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); 349 } 350 351 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) 352 { 353 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); 354 } 355 356 static inline struct sock *sk_next(const struct sock *sk) 357 { 358 return sk->sk_node.next ? 359 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 360 } 361 362 static inline struct sock *sk_nulls_next(const struct sock *sk) 363 { 364 return (!is_a_nulls(sk->sk_nulls_node.next)) ? 365 hlist_nulls_entry(sk->sk_nulls_node.next, 366 struct sock, sk_nulls_node) : 367 NULL; 368 } 369 370 static inline int sk_unhashed(const struct sock *sk) 371 { 372 return hlist_unhashed(&sk->sk_node); 373 } 374 375 static inline int sk_hashed(const struct sock *sk) 376 { 377 return !sk_unhashed(sk); 378 } 379 380 static __inline__ void sk_node_init(struct hlist_node *node) 381 { 382 node->pprev = NULL; 383 } 384 385 static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) 386 { 387 node->pprev = NULL; 388 } 389 390 static __inline__ void __sk_del_node(struct sock *sk) 391 { 392 __hlist_del(&sk->sk_node); 393 } 394 395 /* NB: equivalent to hlist_del_init_rcu */ 396 static __inline__ int __sk_del_node_init(struct sock *sk) 397 { 398 if (sk_hashed(sk)) { 399 __sk_del_node(sk); 400 sk_node_init(&sk->sk_node); 401 return 1; 402 } 403 return 0; 404 } 405 406 /* Grab socket reference count. This operation is valid only 407 when sk is ALREADY grabbed f.e. it is found in hash table 408 or a list and the lookup is made under lock preventing hash table 409 modifications. 410 */ 411 412 static inline void sock_hold(struct sock *sk) 413 { 414 atomic_inc(&sk->sk_refcnt); 415 } 416 417 /* Ungrab socket in the context, which assumes that socket refcnt 418 cannot hit zero, f.e. it is true in context of any socketcall. 419 */ 420 static inline void __sock_put(struct sock *sk) 421 { 422 atomic_dec(&sk->sk_refcnt); 423 } 424 425 static __inline__ int sk_del_node_init(struct sock *sk) 426 { 427 int rc = __sk_del_node_init(sk); 428 429 if (rc) { 430 /* paranoid for a while -acme */ 431 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 432 __sock_put(sk); 433 } 434 return rc; 435 } 436 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) 437 438 static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) 439 { 440 if (sk_hashed(sk)) { 441 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); 442 return 1; 443 } 444 return 0; 445 } 446 447 static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) 448 { 449 int rc = __sk_nulls_del_node_init_rcu(sk); 450 451 if (rc) { 452 /* paranoid for a while -acme */ 453 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 454 __sock_put(sk); 455 } 456 return rc; 457 } 458 459 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) 460 { 461 hlist_add_head(&sk->sk_node, list); 462 } 463 464 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) 465 { 466 sock_hold(sk); 467 __sk_add_node(sk, list); 468 } 469 470 static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 471 { 472 sock_hold(sk); 473 hlist_add_head_rcu(&sk->sk_node, list); 474 } 475 476 static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 477 { 478 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 479 } 480 481 static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 482 { 483 sock_hold(sk); 484 __sk_nulls_add_node_rcu(sk, list); 485 } 486 487 static __inline__ void __sk_del_bind_node(struct sock *sk) 488 { 489 __hlist_del(&sk->sk_bind_node); 490 } 491 492 static __inline__ void sk_add_bind_node(struct sock *sk, 493 struct hlist_head *list) 494 { 495 hlist_add_head(&sk->sk_bind_node, list); 496 } 497 498 #define sk_for_each(__sk, node, list) \ 499 hlist_for_each_entry(__sk, node, list, sk_node) 500 #define sk_for_each_rcu(__sk, node, list) \ 501 hlist_for_each_entry_rcu(__sk, node, list, sk_node) 502 #define sk_nulls_for_each(__sk, node, list) \ 503 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 504 #define sk_nulls_for_each_rcu(__sk, node, list) \ 505 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 506 #define sk_for_each_from(__sk, node) \ 507 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 508 hlist_for_each_entry_from(__sk, node, sk_node) 509 #define sk_nulls_for_each_from(__sk, node) \ 510 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 511 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 512 #define sk_for_each_continue(__sk, node) \ 513 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 514 hlist_for_each_entry_continue(__sk, node, sk_node) 515 #define sk_for_each_safe(__sk, node, tmp, list) \ 516 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node) 517 #define sk_for_each_bound(__sk, node, list) \ 518 hlist_for_each_entry(__sk, node, list, sk_bind_node) 519 520 /* Sock flags */ 521 enum sock_flags { 522 SOCK_DEAD, 523 SOCK_DONE, 524 SOCK_URGINLINE, 525 SOCK_KEEPOPEN, 526 SOCK_LINGER, 527 SOCK_DESTROY, 528 SOCK_BROADCAST, 529 SOCK_TIMESTAMP, 530 SOCK_ZAPPED, 531 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 532 SOCK_DBG, /* %SO_DEBUG setting */ 533 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 534 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ 535 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 536 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 537 SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */ 538 SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */ 539 SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */ 540 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ 541 SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */ 542 SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */ 543 SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */ 544 SOCK_FASYNC, /* fasync() active */ 545 SOCK_RXQ_OVFL, 546 }; 547 548 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 549 { 550 nsk->sk_flags = osk->sk_flags; 551 } 552 553 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 554 { 555 __set_bit(flag, &sk->sk_flags); 556 } 557 558 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 559 { 560 __clear_bit(flag, &sk->sk_flags); 561 } 562 563 static inline int sock_flag(struct sock *sk, enum sock_flags flag) 564 { 565 return test_bit(flag, &sk->sk_flags); 566 } 567 568 static inline void sk_acceptq_removed(struct sock *sk) 569 { 570 sk->sk_ack_backlog--; 571 } 572 573 static inline void sk_acceptq_added(struct sock *sk) 574 { 575 sk->sk_ack_backlog++; 576 } 577 578 static inline int sk_acceptq_is_full(struct sock *sk) 579 { 580 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 581 } 582 583 /* 584 * Compute minimal free write space needed to queue new packets. 585 */ 586 static inline int sk_stream_min_wspace(struct sock *sk) 587 { 588 return sk->sk_wmem_queued >> 1; 589 } 590 591 static inline int sk_stream_wspace(struct sock *sk) 592 { 593 return sk->sk_sndbuf - sk->sk_wmem_queued; 594 } 595 596 extern void sk_stream_write_space(struct sock *sk); 597 598 static inline int sk_stream_memory_free(struct sock *sk) 599 { 600 return sk->sk_wmem_queued < sk->sk_sndbuf; 601 } 602 603 /* OOB backlog add */ 604 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 605 { 606 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 607 skb_dst_force(skb); 608 609 if (!sk->sk_backlog.tail) 610 sk->sk_backlog.head = skb; 611 else 612 sk->sk_backlog.tail->next = skb; 613 614 sk->sk_backlog.tail = skb; 615 skb->next = NULL; 616 } 617 618 /* 619 * Take into account size of receive queue and backlog queue 620 */ 621 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) 622 { 623 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 624 625 return qsize + skb->truesize > sk->sk_rcvbuf; 626 } 627 628 /* The per-socket spinlock must be held here. */ 629 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) 630 { 631 if (sk_rcvqueues_full(sk, skb)) 632 return -ENOBUFS; 633 634 __sk_add_backlog(sk, skb); 635 sk->sk_backlog.len += skb->truesize; 636 return 0; 637 } 638 639 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 640 { 641 return sk->sk_backlog_rcv(sk, skb); 642 } 643 644 static inline void sock_rps_record_flow(const struct sock *sk) 645 { 646 #ifdef CONFIG_RPS 647 struct rps_sock_flow_table *sock_flow_table; 648 649 rcu_read_lock(); 650 sock_flow_table = rcu_dereference(rps_sock_flow_table); 651 rps_record_sock_flow(sock_flow_table, sk->sk_rxhash); 652 rcu_read_unlock(); 653 #endif 654 } 655 656 static inline void sock_rps_reset_flow(const struct sock *sk) 657 { 658 #ifdef CONFIG_RPS 659 struct rps_sock_flow_table *sock_flow_table; 660 661 rcu_read_lock(); 662 sock_flow_table = rcu_dereference(rps_sock_flow_table); 663 rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash); 664 rcu_read_unlock(); 665 #endif 666 } 667 668 static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash) 669 { 670 #ifdef CONFIG_RPS 671 if (unlikely(sk->sk_rxhash != rxhash)) { 672 sock_rps_reset_flow(sk); 673 sk->sk_rxhash = rxhash; 674 } 675 #endif 676 } 677 678 #define sk_wait_event(__sk, __timeo, __condition) \ 679 ({ int __rc; \ 680 release_sock(__sk); \ 681 __rc = __condition; \ 682 if (!__rc) { \ 683 *(__timeo) = schedule_timeout(*(__timeo)); \ 684 } \ 685 lock_sock(__sk); \ 686 __rc = __condition; \ 687 __rc; \ 688 }) 689 690 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 691 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 692 extern void sk_stream_wait_close(struct sock *sk, long timeo_p); 693 extern int sk_stream_error(struct sock *sk, int flags, int err); 694 extern void sk_stream_kill_queues(struct sock *sk); 695 696 extern int sk_wait_data(struct sock *sk, long *timeo); 697 698 struct request_sock_ops; 699 struct timewait_sock_ops; 700 struct inet_hashinfo; 701 struct raw_hashinfo; 702 703 /* Networking protocol blocks we attach to sockets. 704 * socket layer -> transport layer interface 705 * transport -> network interface is defined by struct inet_proto 706 */ 707 struct proto { 708 void (*close)(struct sock *sk, 709 long timeout); 710 int (*connect)(struct sock *sk, 711 struct sockaddr *uaddr, 712 int addr_len); 713 int (*disconnect)(struct sock *sk, int flags); 714 715 struct sock * (*accept) (struct sock *sk, int flags, int *err); 716 717 int (*ioctl)(struct sock *sk, int cmd, 718 unsigned long arg); 719 int (*init)(struct sock *sk); 720 void (*destroy)(struct sock *sk); 721 void (*shutdown)(struct sock *sk, int how); 722 int (*setsockopt)(struct sock *sk, int level, 723 int optname, char __user *optval, 724 unsigned int optlen); 725 int (*getsockopt)(struct sock *sk, int level, 726 int optname, char __user *optval, 727 int __user *option); 728 #ifdef CONFIG_COMPAT 729 int (*compat_setsockopt)(struct sock *sk, 730 int level, 731 int optname, char __user *optval, 732 unsigned int optlen); 733 int (*compat_getsockopt)(struct sock *sk, 734 int level, 735 int optname, char __user *optval, 736 int __user *option); 737 #endif 738 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 739 struct msghdr *msg, size_t len); 740 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 741 struct msghdr *msg, 742 size_t len, int noblock, int flags, 743 int *addr_len); 744 int (*sendpage)(struct sock *sk, struct page *page, 745 int offset, size_t size, int flags); 746 int (*bind)(struct sock *sk, 747 struct sockaddr *uaddr, int addr_len); 748 749 int (*backlog_rcv) (struct sock *sk, 750 struct sk_buff *skb); 751 752 /* Keeping track of sk's, looking them up, and port selection methods. */ 753 void (*hash)(struct sock *sk); 754 void (*unhash)(struct sock *sk); 755 void (*rehash)(struct sock *sk); 756 int (*get_port)(struct sock *sk, unsigned short snum); 757 758 /* Keeping track of sockets in use */ 759 #ifdef CONFIG_PROC_FS 760 unsigned int inuse_idx; 761 #endif 762 763 /* Memory pressure */ 764 void (*enter_memory_pressure)(struct sock *sk); 765 atomic_t *memory_allocated; /* Current allocated memory. */ 766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 767 /* 768 * Pressure flag: try to collapse. 769 * Technical note: it is used by multiple contexts non atomically. 770 * All the __sk_mem_schedule() is of this nature: accounting 771 * is strict, actions are advisory and have some latency. 772 */ 773 int *memory_pressure; 774 int *sysctl_mem; 775 int *sysctl_wmem; 776 int *sysctl_rmem; 777 int max_header; 778 bool no_autobind; 779 780 struct kmem_cache *slab; 781 unsigned int obj_size; 782 int slab_flags; 783 784 struct percpu_counter *orphan_count; 785 786 struct request_sock_ops *rsk_prot; 787 struct timewait_sock_ops *twsk_prot; 788 789 union { 790 struct inet_hashinfo *hashinfo; 791 struct udp_table *udp_table; 792 struct raw_hashinfo *raw_hash; 793 } h; 794 795 struct module *owner; 796 797 char name[32]; 798 799 struct list_head node; 800 #ifdef SOCK_REFCNT_DEBUG 801 atomic_t socks; 802 #endif 803 }; 804 805 extern int proto_register(struct proto *prot, int alloc_slab); 806 extern void proto_unregister(struct proto *prot); 807 808 #ifdef SOCK_REFCNT_DEBUG 809 static inline void sk_refcnt_debug_inc(struct sock *sk) 810 { 811 atomic_inc(&sk->sk_prot->socks); 812 } 813 814 static inline void sk_refcnt_debug_dec(struct sock *sk) 815 { 816 atomic_dec(&sk->sk_prot->socks); 817 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 818 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 819 } 820 821 static inline void sk_refcnt_debug_release(const struct sock *sk) 822 { 823 if (atomic_read(&sk->sk_refcnt) != 1) 824 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 825 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); 826 } 827 #else /* SOCK_REFCNT_DEBUG */ 828 #define sk_refcnt_debug_inc(sk) do { } while (0) 829 #define sk_refcnt_debug_dec(sk) do { } while (0) 830 #define sk_refcnt_debug_release(sk) do { } while (0) 831 #endif /* SOCK_REFCNT_DEBUG */ 832 833 834 #ifdef CONFIG_PROC_FS 835 /* Called with local bh disabled */ 836 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 837 extern int sock_prot_inuse_get(struct net *net, struct proto *proto); 838 #else 839 static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, 840 int inc) 841 { 842 } 843 #endif 844 845 846 /* With per-bucket locks this operation is not-atomic, so that 847 * this version is not worse. 848 */ 849 static inline void __sk_prot_rehash(struct sock *sk) 850 { 851 sk->sk_prot->unhash(sk); 852 sk->sk_prot->hash(sk); 853 } 854 855 /* About 10 seconds */ 856 #define SOCK_DESTROY_TIME (10*HZ) 857 858 /* Sockets 0-1023 can't be bound to unless you are superuser */ 859 #define PROT_SOCK 1024 860 861 #define SHUTDOWN_MASK 3 862 #define RCV_SHUTDOWN 1 863 #define SEND_SHUTDOWN 2 864 865 #define SOCK_SNDBUF_LOCK 1 866 #define SOCK_RCVBUF_LOCK 2 867 #define SOCK_BINDADDR_LOCK 4 868 #define SOCK_BINDPORT_LOCK 8 869 870 /* sock_iocb: used to kick off async processing of socket ios */ 871 struct sock_iocb { 872 struct list_head list; 873 874 int flags; 875 int size; 876 struct socket *sock; 877 struct sock *sk; 878 struct scm_cookie *scm; 879 struct msghdr *msg, async_msg; 880 struct kiocb *kiocb; 881 }; 882 883 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb) 884 { 885 return (struct sock_iocb *)iocb->private; 886 } 887 888 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si) 889 { 890 return si->kiocb; 891 } 892 893 struct socket_alloc { 894 struct socket socket; 895 struct inode vfs_inode; 896 }; 897 898 static inline struct socket *SOCKET_I(struct inode *inode) 899 { 900 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 901 } 902 903 static inline struct inode *SOCK_INODE(struct socket *socket) 904 { 905 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 906 } 907 908 /* 909 * Functions for memory accounting 910 */ 911 extern int __sk_mem_schedule(struct sock *sk, int size, int kind); 912 extern void __sk_mem_reclaim(struct sock *sk); 913 914 #define SK_MEM_QUANTUM ((int)PAGE_SIZE) 915 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 916 #define SK_MEM_SEND 0 917 #define SK_MEM_RECV 1 918 919 static inline int sk_mem_pages(int amt) 920 { 921 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; 922 } 923 924 static inline int sk_has_account(struct sock *sk) 925 { 926 /* return true if protocol supports memory accounting */ 927 return !!sk->sk_prot->memory_allocated; 928 } 929 930 static inline int sk_wmem_schedule(struct sock *sk, int size) 931 { 932 if (!sk_has_account(sk)) 933 return 1; 934 return size <= sk->sk_forward_alloc || 935 __sk_mem_schedule(sk, size, SK_MEM_SEND); 936 } 937 938 static inline int sk_rmem_schedule(struct sock *sk, int size) 939 { 940 if (!sk_has_account(sk)) 941 return 1; 942 return size <= sk->sk_forward_alloc || 943 __sk_mem_schedule(sk, size, SK_MEM_RECV); 944 } 945 946 static inline void sk_mem_reclaim(struct sock *sk) 947 { 948 if (!sk_has_account(sk)) 949 return; 950 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) 951 __sk_mem_reclaim(sk); 952 } 953 954 static inline void sk_mem_reclaim_partial(struct sock *sk) 955 { 956 if (!sk_has_account(sk)) 957 return; 958 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) 959 __sk_mem_reclaim(sk); 960 } 961 962 static inline void sk_mem_charge(struct sock *sk, int size) 963 { 964 if (!sk_has_account(sk)) 965 return; 966 sk->sk_forward_alloc -= size; 967 } 968 969 static inline void sk_mem_uncharge(struct sock *sk, int size) 970 { 971 if (!sk_has_account(sk)) 972 return; 973 sk->sk_forward_alloc += size; 974 } 975 976 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 977 { 978 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 979 sk->sk_wmem_queued -= skb->truesize; 980 sk_mem_uncharge(sk, skb->truesize); 981 __kfree_skb(skb); 982 } 983 984 /* Used by processes to "lock" a socket state, so that 985 * interrupts and bottom half handlers won't change it 986 * from under us. It essentially blocks any incoming 987 * packets, so that we won't get any new data or any 988 * packets that change the state of the socket. 989 * 990 * While locked, BH processing will add new packets to 991 * the backlog queue. This queue is processed by the 992 * owner of the socket lock right before it is released. 993 * 994 * Since ~2.3.5 it is also exclusive sleep lock serializing 995 * accesses from user process context. 996 */ 997 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) 998 999 /* 1000 * Macro so as to not evaluate some arguments when 1001 * lockdep is not enabled. 1002 * 1003 * Mark both the sk_lock and the sk_lock.slock as a 1004 * per-address-family lock class. 1005 */ 1006 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1007 do { \ 1008 sk->sk_lock.owned = 0; \ 1009 init_waitqueue_head(&sk->sk_lock.wq); \ 1010 spin_lock_init(&(sk)->sk_lock.slock); \ 1011 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 1012 sizeof((sk)->sk_lock)); \ 1013 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 1014 (skey), (sname)); \ 1015 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1016 } while (0) 1017 1018 extern void lock_sock_nested(struct sock *sk, int subclass); 1019 1020 static inline void lock_sock(struct sock *sk) 1021 { 1022 lock_sock_nested(sk, 0); 1023 } 1024 1025 extern void release_sock(struct sock *sk); 1026 1027 /* BH context may only use the following locking interface. */ 1028 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 1029 #define bh_lock_sock_nested(__sk) \ 1030 spin_lock_nested(&((__sk)->sk_lock.slock), \ 1031 SINGLE_DEPTH_NESTING) 1032 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1033 1034 extern bool lock_sock_fast(struct sock *sk); 1035 /** 1036 * unlock_sock_fast - complement of lock_sock_fast 1037 * @sk: socket 1038 * @slow: slow mode 1039 * 1040 * fast unlock socket for user context. 1041 * If slow mode is on, we call regular release_sock() 1042 */ 1043 static inline void unlock_sock_fast(struct sock *sk, bool slow) 1044 { 1045 if (slow) 1046 release_sock(sk); 1047 else 1048 spin_unlock_bh(&sk->sk_lock.slock); 1049 } 1050 1051 1052 extern struct sock *sk_alloc(struct net *net, int family, 1053 gfp_t priority, 1054 struct proto *prot); 1055 extern void sk_free(struct sock *sk); 1056 extern void sk_release_kernel(struct sock *sk); 1057 extern struct sock *sk_clone(const struct sock *sk, 1058 const gfp_t priority); 1059 1060 extern struct sk_buff *sock_wmalloc(struct sock *sk, 1061 unsigned long size, int force, 1062 gfp_t priority); 1063 extern struct sk_buff *sock_rmalloc(struct sock *sk, 1064 unsigned long size, int force, 1065 gfp_t priority); 1066 extern void sock_wfree(struct sk_buff *skb); 1067 extern void sock_rfree(struct sk_buff *skb); 1068 1069 extern int sock_setsockopt(struct socket *sock, int level, 1070 int op, char __user *optval, 1071 unsigned int optlen); 1072 1073 extern int sock_getsockopt(struct socket *sock, int level, 1074 int op, char __user *optval, 1075 int __user *optlen); 1076 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, 1077 unsigned long size, 1078 int noblock, 1079 int *errcode); 1080 extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, 1081 unsigned long header_len, 1082 unsigned long data_len, 1083 int noblock, 1084 int *errcode); 1085 extern void *sock_kmalloc(struct sock *sk, int size, 1086 gfp_t priority); 1087 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1088 extern void sk_send_sigurg(struct sock *sk); 1089 1090 #ifdef CONFIG_CGROUPS 1091 extern void sock_update_classid(struct sock *sk); 1092 #else 1093 static inline void sock_update_classid(struct sock *sk) 1094 { 1095 } 1096 #endif 1097 1098 /* 1099 * Functions to fill in entries in struct proto_ops when a protocol 1100 * does not implement a particular function. 1101 */ 1102 extern int sock_no_bind(struct socket *, 1103 struct sockaddr *, int); 1104 extern int sock_no_connect(struct socket *, 1105 struct sockaddr *, int, int); 1106 extern int sock_no_socketpair(struct socket *, 1107 struct socket *); 1108 extern int sock_no_accept(struct socket *, 1109 struct socket *, int); 1110 extern int sock_no_getname(struct socket *, 1111 struct sockaddr *, int *, int); 1112 extern unsigned int sock_no_poll(struct file *, struct socket *, 1113 struct poll_table_struct *); 1114 extern int sock_no_ioctl(struct socket *, unsigned int, 1115 unsigned long); 1116 extern int sock_no_listen(struct socket *, int); 1117 extern int sock_no_shutdown(struct socket *, int); 1118 extern int sock_no_getsockopt(struct socket *, int , int, 1119 char __user *, int __user *); 1120 extern int sock_no_setsockopt(struct socket *, int, int, 1121 char __user *, unsigned int); 1122 extern int sock_no_sendmsg(struct kiocb *, struct socket *, 1123 struct msghdr *, size_t); 1124 extern int sock_no_recvmsg(struct kiocb *, struct socket *, 1125 struct msghdr *, size_t, int); 1126 extern int sock_no_mmap(struct file *file, 1127 struct socket *sock, 1128 struct vm_area_struct *vma); 1129 extern ssize_t sock_no_sendpage(struct socket *sock, 1130 struct page *page, 1131 int offset, size_t size, 1132 int flags); 1133 1134 /* 1135 * Functions to fill in entries in struct proto_ops when a protocol 1136 * uses the inet style. 1137 */ 1138 extern int sock_common_getsockopt(struct socket *sock, int level, int optname, 1139 char __user *optval, int __user *optlen); 1140 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 1141 struct msghdr *msg, size_t size, int flags); 1142 extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 1143 char __user *optval, unsigned int optlen); 1144 extern int compat_sock_common_getsockopt(struct socket *sock, int level, 1145 int optname, char __user *optval, int __user *optlen); 1146 extern int compat_sock_common_setsockopt(struct socket *sock, int level, 1147 int optname, char __user *optval, unsigned int optlen); 1148 1149 extern void sk_common_release(struct sock *sk); 1150 1151 /* 1152 * Default socket callbacks and setup code 1153 */ 1154 1155 /* Initialise core socket variables */ 1156 extern void sock_init_data(struct socket *sock, struct sock *sk); 1157 1158 /** 1159 * sk_filter_release - release a socket filter 1160 * @fp: filter to remove 1161 * 1162 * Remove a filter from a socket and release its resources. 1163 */ 1164 1165 static inline void sk_filter_release(struct sk_filter *fp) 1166 { 1167 if (atomic_dec_and_test(&fp->refcnt)) 1168 kfree(fp); 1169 } 1170 1171 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1172 { 1173 unsigned int size = sk_filter_len(fp); 1174 1175 atomic_sub(size, &sk->sk_omem_alloc); 1176 sk_filter_release(fp); 1177 } 1178 1179 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1180 { 1181 atomic_inc(&fp->refcnt); 1182 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 1183 } 1184 1185 /* 1186 * Socket reference counting postulates. 1187 * 1188 * * Each user of socket SHOULD hold a reference count. 1189 * * Each access point to socket (an hash table bucket, reference from a list, 1190 * running timer, skb in flight MUST hold a reference count. 1191 * * When reference count hits 0, it means it will never increase back. 1192 * * When reference count hits 0, it means that no references from 1193 * outside exist to this socket and current process on current CPU 1194 * is last user and may/should destroy this socket. 1195 * * sk_free is called from any context: process, BH, IRQ. When 1196 * it is called, socket has no references from outside -> sk_free 1197 * may release descendant resources allocated by the socket, but 1198 * to the time when it is called, socket is NOT referenced by any 1199 * hash tables, lists etc. 1200 * * Packets, delivered from outside (from network or from another process) 1201 * and enqueued on receive/error queues SHOULD NOT grab reference count, 1202 * when they sit in queue. Otherwise, packets will leak to hole, when 1203 * socket is looked up by one cpu and unhasing is made by another CPU. 1204 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 1205 * (leak to backlog). Packet socket does all the processing inside 1206 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 1207 * use separate SMP lock, so that they are prone too. 1208 */ 1209 1210 /* Ungrab socket and destroy it, if it was the last reference. */ 1211 static inline void sock_put(struct sock *sk) 1212 { 1213 if (atomic_dec_and_test(&sk->sk_refcnt)) 1214 sk_free(sk); 1215 } 1216 1217 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1218 const int nested); 1219 1220 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1221 { 1222 sk->sk_tx_queue_mapping = tx_queue; 1223 } 1224 1225 static inline void sk_tx_queue_clear(struct sock *sk) 1226 { 1227 sk->sk_tx_queue_mapping = -1; 1228 } 1229 1230 static inline int sk_tx_queue_get(const struct sock *sk) 1231 { 1232 return sk ? sk->sk_tx_queue_mapping : -1; 1233 } 1234 1235 static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1236 { 1237 sk_tx_queue_clear(sk); 1238 sk->sk_socket = sock; 1239 } 1240 1241 static inline wait_queue_head_t *sk_sleep(struct sock *sk) 1242 { 1243 return &sk->sk_wq->wait; 1244 } 1245 /* Detach socket from process context. 1246 * Announce socket dead, detach it from wait queue and inode. 1247 * Note that parent inode held reference count on this struct sock, 1248 * we do not release it in this function, because protocol 1249 * probably wants some additional cleanups or even continuing 1250 * to work with this socket (TCP). 1251 */ 1252 static inline void sock_orphan(struct sock *sk) 1253 { 1254 write_lock_bh(&sk->sk_callback_lock); 1255 sock_set_flag(sk, SOCK_DEAD); 1256 sk_set_socket(sk, NULL); 1257 sk->sk_wq = NULL; 1258 write_unlock_bh(&sk->sk_callback_lock); 1259 } 1260 1261 static inline void sock_graft(struct sock *sk, struct socket *parent) 1262 { 1263 write_lock_bh(&sk->sk_callback_lock); 1264 rcu_assign_pointer(sk->sk_wq, parent->wq); 1265 parent->sk = sk; 1266 sk_set_socket(sk, parent); 1267 security_sock_graft(sk, parent); 1268 write_unlock_bh(&sk->sk_callback_lock); 1269 } 1270 1271 extern int sock_i_uid(struct sock *sk); 1272 extern unsigned long sock_i_ino(struct sock *sk); 1273 1274 static inline struct dst_entry * 1275 __sk_dst_get(struct sock *sk) 1276 { 1277 return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() || 1278 sock_owned_by_user(sk) || 1279 lockdep_is_held(&sk->sk_lock.slock)); 1280 } 1281 1282 static inline struct dst_entry * 1283 sk_dst_get(struct sock *sk) 1284 { 1285 struct dst_entry *dst; 1286 1287 rcu_read_lock(); 1288 dst = rcu_dereference(sk->sk_dst_cache); 1289 if (dst) 1290 dst_hold(dst); 1291 rcu_read_unlock(); 1292 return dst; 1293 } 1294 1295 extern void sk_reset_txq(struct sock *sk); 1296 1297 static inline void dst_negative_advice(struct sock *sk) 1298 { 1299 struct dst_entry *ndst, *dst = __sk_dst_get(sk); 1300 1301 if (dst && dst->ops->negative_advice) { 1302 ndst = dst->ops->negative_advice(dst); 1303 1304 if (ndst != dst) { 1305 rcu_assign_pointer(sk->sk_dst_cache, ndst); 1306 sk_reset_txq(sk); 1307 } 1308 } 1309 } 1310 1311 static inline void 1312 __sk_dst_set(struct sock *sk, struct dst_entry *dst) 1313 { 1314 struct dst_entry *old_dst; 1315 1316 sk_tx_queue_clear(sk); 1317 /* 1318 * This can be called while sk is owned by the caller only, 1319 * with no state that can be checked in a rcu_dereference_check() cond 1320 */ 1321 old_dst = rcu_dereference_raw(sk->sk_dst_cache); 1322 rcu_assign_pointer(sk->sk_dst_cache, dst); 1323 dst_release(old_dst); 1324 } 1325 1326 static inline void 1327 sk_dst_set(struct sock *sk, struct dst_entry *dst) 1328 { 1329 spin_lock(&sk->sk_dst_lock); 1330 __sk_dst_set(sk, dst); 1331 spin_unlock(&sk->sk_dst_lock); 1332 } 1333 1334 static inline void 1335 __sk_dst_reset(struct sock *sk) 1336 { 1337 __sk_dst_set(sk, NULL); 1338 } 1339 1340 static inline void 1341 sk_dst_reset(struct sock *sk) 1342 { 1343 spin_lock(&sk->sk_dst_lock); 1344 __sk_dst_reset(sk); 1345 spin_unlock(&sk->sk_dst_lock); 1346 } 1347 1348 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1349 1350 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1351 1352 static inline int sk_can_gso(const struct sock *sk) 1353 { 1354 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1355 } 1356 1357 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1358 1359 static inline void sk_nocaps_add(struct sock *sk, int flags) 1360 { 1361 sk->sk_route_nocaps |= flags; 1362 sk->sk_route_caps &= ~flags; 1363 } 1364 1365 static inline int skb_copy_to_page(struct sock *sk, char __user *from, 1366 struct sk_buff *skb, struct page *page, 1367 int off, int copy) 1368 { 1369 if (skb->ip_summed == CHECKSUM_NONE) { 1370 int err = 0; 1371 __wsum csum = csum_and_copy_from_user(from, 1372 page_address(page) + off, 1373 copy, 0, &err); 1374 if (err) 1375 return err; 1376 skb->csum = csum_block_add(skb->csum, csum, skb->len); 1377 } else if (copy_from_user(page_address(page) + off, from, copy)) 1378 return -EFAULT; 1379 1380 skb->len += copy; 1381 skb->data_len += copy; 1382 skb->truesize += copy; 1383 sk->sk_wmem_queued += copy; 1384 sk_mem_charge(sk, copy); 1385 return 0; 1386 } 1387 1388 /** 1389 * sk_wmem_alloc_get - returns write allocations 1390 * @sk: socket 1391 * 1392 * Returns sk_wmem_alloc minus initial offset of one 1393 */ 1394 static inline int sk_wmem_alloc_get(const struct sock *sk) 1395 { 1396 return atomic_read(&sk->sk_wmem_alloc) - 1; 1397 } 1398 1399 /** 1400 * sk_rmem_alloc_get - returns read allocations 1401 * @sk: socket 1402 * 1403 * Returns sk_rmem_alloc 1404 */ 1405 static inline int sk_rmem_alloc_get(const struct sock *sk) 1406 { 1407 return atomic_read(&sk->sk_rmem_alloc); 1408 } 1409 1410 /** 1411 * sk_has_allocations - check if allocations are outstanding 1412 * @sk: socket 1413 * 1414 * Returns true if socket has write or read allocations 1415 */ 1416 static inline int sk_has_allocations(const struct sock *sk) 1417 { 1418 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); 1419 } 1420 1421 /** 1422 * wq_has_sleeper - check if there are any waiting processes 1423 * @wq: struct socket_wq 1424 * 1425 * Returns true if socket_wq has waiting processes 1426 * 1427 * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory 1428 * barrier call. They were added due to the race found within the tcp code. 1429 * 1430 * Consider following tcp code paths: 1431 * 1432 * CPU1 CPU2 1433 * 1434 * sys_select receive packet 1435 * ... ... 1436 * __add_wait_queue update tp->rcv_nxt 1437 * ... ... 1438 * tp->rcv_nxt check sock_def_readable 1439 * ... { 1440 * schedule rcu_read_lock(); 1441 * wq = rcu_dereference(sk->sk_wq); 1442 * if (wq && waitqueue_active(&wq->wait)) 1443 * wake_up_interruptible(&wq->wait) 1444 * ... 1445 * } 1446 * 1447 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay 1448 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 1449 * could then endup calling schedule and sleep forever if there are no more 1450 * data on the socket. 1451 * 1452 */ 1453 static inline bool wq_has_sleeper(struct socket_wq *wq) 1454 { 1455 1456 /* 1457 * We need to be sure we are in sync with the 1458 * add_wait_queue modifications to the wait queue. 1459 * 1460 * This memory barrier is paired in the sock_poll_wait. 1461 */ 1462 smp_mb(); 1463 return wq && waitqueue_active(&wq->wait); 1464 } 1465 1466 /** 1467 * sock_poll_wait - place memory barrier behind the poll_wait call. 1468 * @filp: file 1469 * @wait_address: socket wait queue 1470 * @p: poll_table 1471 * 1472 * See the comments in the wq_has_sleeper function. 1473 */ 1474 static inline void sock_poll_wait(struct file *filp, 1475 wait_queue_head_t *wait_address, poll_table *p) 1476 { 1477 if (p && wait_address) { 1478 poll_wait(filp, wait_address, p); 1479 /* 1480 * We need to be sure we are in sync with the 1481 * socket flags modification. 1482 * 1483 * This memory barrier is paired in the wq_has_sleeper. 1484 */ 1485 smp_mb(); 1486 } 1487 } 1488 1489 /* 1490 * Queue a received datagram if it will fit. Stream and sequenced 1491 * protocols can't normally use this as they need to fit buffers in 1492 * and play with them. 1493 * 1494 * Inlined as it's very short and called for pretty much every 1495 * packet ever received. 1496 */ 1497 1498 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1499 { 1500 skb_orphan(skb); 1501 skb->sk = sk; 1502 skb->destructor = sock_wfree; 1503 /* 1504 * We used to take a refcount on sk, but following operation 1505 * is enough to guarantee sk_free() wont free this sock until 1506 * all in-flight packets are completed 1507 */ 1508 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 1509 } 1510 1511 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1512 { 1513 skb_orphan(skb); 1514 skb->sk = sk; 1515 skb->destructor = sock_rfree; 1516 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1517 sk_mem_charge(sk, skb->truesize); 1518 } 1519 1520 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1521 unsigned long expires); 1522 1523 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); 1524 1525 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1526 1527 extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 1528 1529 /* 1530 * Recover an error report and clear atomically 1531 */ 1532 1533 static inline int sock_error(struct sock *sk) 1534 { 1535 int err; 1536 if (likely(!sk->sk_err)) 1537 return 0; 1538 err = xchg(&sk->sk_err, 0); 1539 return -err; 1540 } 1541 1542 static inline unsigned long sock_wspace(struct sock *sk) 1543 { 1544 int amt = 0; 1545 1546 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1547 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1548 if (amt < 0) 1549 amt = 0; 1550 } 1551 return amt; 1552 } 1553 1554 static inline void sk_wake_async(struct sock *sk, int how, int band) 1555 { 1556 if (sock_flag(sk, SOCK_FASYNC)) 1557 sock_wake_async(sk->sk_socket, how, band); 1558 } 1559 1560 #define SOCK_MIN_SNDBUF 2048 1561 /* 1562 * Since sk_rmem_alloc sums skb->truesize, even a small frame might need 1563 * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak 1564 */ 1565 #define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff)) 1566 1567 static inline void sk_stream_moderate_sndbuf(struct sock *sk) 1568 { 1569 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 1570 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); 1571 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); 1572 } 1573 } 1574 1575 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); 1576 1577 static inline struct page *sk_stream_alloc_page(struct sock *sk) 1578 { 1579 struct page *page = NULL; 1580 1581 page = alloc_pages(sk->sk_allocation, 0); 1582 if (!page) { 1583 sk->sk_prot->enter_memory_pressure(sk); 1584 sk_stream_moderate_sndbuf(sk); 1585 } 1586 return page; 1587 } 1588 1589 /* 1590 * Default write policy as shown to user space via poll/select/SIGIO 1591 */ 1592 static inline int sock_writeable(const struct sock *sk) 1593 { 1594 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 1595 } 1596 1597 static inline gfp_t gfp_any(void) 1598 { 1599 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1600 } 1601 1602 static inline long sock_rcvtimeo(const struct sock *sk, int noblock) 1603 { 1604 return noblock ? 0 : sk->sk_rcvtimeo; 1605 } 1606 1607 static inline long sock_sndtimeo(const struct sock *sk, int noblock) 1608 { 1609 return noblock ? 0 : sk->sk_sndtimeo; 1610 } 1611 1612 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 1613 { 1614 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 1615 } 1616 1617 /* Alas, with timeout socket operations are not restartable. 1618 * Compare this to poll(). 1619 */ 1620 static inline int sock_intr_errno(long timeo) 1621 { 1622 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 1623 } 1624 1625 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 1626 struct sk_buff *skb); 1627 1628 static __inline__ void 1629 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 1630 { 1631 ktime_t kt = skb->tstamp; 1632 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 1633 1634 /* 1635 * generate control messages if 1636 * - receive time stamping in software requested (SOCK_RCVTSTAMP 1637 * or SOCK_TIMESTAMPING_RX_SOFTWARE) 1638 * - software time stamp available and wanted 1639 * (SOCK_TIMESTAMPING_SOFTWARE) 1640 * - hardware time stamps available and wanted 1641 * (SOCK_TIMESTAMPING_SYS_HARDWARE or 1642 * SOCK_TIMESTAMPING_RAW_HARDWARE) 1643 */ 1644 if (sock_flag(sk, SOCK_RCVTSTAMP) || 1645 sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) || 1646 (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) || 1647 (hwtstamps->hwtstamp.tv64 && 1648 sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) || 1649 (hwtstamps->syststamp.tv64 && 1650 sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))) 1651 __sock_recv_timestamp(msg, sk, skb); 1652 else 1653 sk->sk_stamp = kt; 1654 } 1655 1656 extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 1657 struct sk_buff *skb); 1658 1659 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 1660 struct sk_buff *skb) 1661 { 1662 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ 1663 (1UL << SOCK_RCVTSTAMP) | \ 1664 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ 1665 (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ 1666 (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ 1667 (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) 1668 1669 if (sk->sk_flags & FLAGS_TS_OR_DROPS) 1670 __sock_recv_ts_and_drops(msg, sk, skb); 1671 else 1672 sk->sk_stamp = skb->tstamp; 1673 } 1674 1675 /** 1676 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 1677 * @sk: socket sending this packet 1678 * @tx_flags: filled with instructions for time stamping 1679 * 1680 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if 1681 * parameters are invalid. 1682 */ 1683 extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); 1684 1685 /** 1686 * sk_eat_skb - Release a skb if it is no longer needed 1687 * @sk: socket to eat this skb from 1688 * @skb: socket buffer to eat 1689 * @copied_early: flag indicating whether DMA operations copied this data early 1690 * 1691 * This routine must be called with interrupts disabled or with the socket 1692 * locked so that the sk_buff queue operation is ok. 1693 */ 1694 #ifdef CONFIG_NET_DMA 1695 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 1696 { 1697 __skb_unlink(skb, &sk->sk_receive_queue); 1698 if (!copied_early) 1699 __kfree_skb(skb); 1700 else 1701 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 1702 } 1703 #else 1704 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) 1705 { 1706 __skb_unlink(skb, &sk->sk_receive_queue); 1707 __kfree_skb(skb); 1708 } 1709 #endif 1710 1711 static inline 1712 struct net *sock_net(const struct sock *sk) 1713 { 1714 return read_pnet(&sk->sk_net); 1715 } 1716 1717 static inline 1718 void sock_net_set(struct sock *sk, struct net *net) 1719 { 1720 write_pnet(&sk->sk_net, net); 1721 } 1722 1723 /* 1724 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace. 1725 * They should not hold a referrence to a namespace in order to allow 1726 * to stop it. 1727 * Sockets after sk_change_net should be released using sk_release_kernel 1728 */ 1729 static inline void sk_change_net(struct sock *sk, struct net *net) 1730 { 1731 put_net(sock_net(sk)); 1732 sock_net_set(sk, hold_net(net)); 1733 } 1734 1735 static inline struct sock *skb_steal_sock(struct sk_buff *skb) 1736 { 1737 if (unlikely(skb->sk)) { 1738 struct sock *sk = skb->sk; 1739 1740 skb->destructor = NULL; 1741 skb->sk = NULL; 1742 return sk; 1743 } 1744 return NULL; 1745 } 1746 1747 extern void sock_enable_timestamp(struct sock *sk, int flag); 1748 extern int sock_get_timestamp(struct sock *, struct timeval __user *); 1749 extern int sock_get_timestampns(struct sock *, struct timespec __user *); 1750 1751 /* 1752 * Enable debug/info messages 1753 */ 1754 extern int net_msg_warn; 1755 #define NETDEBUG(fmt, args...) \ 1756 do { if (net_msg_warn) printk(fmt,##args); } while (0) 1757 1758 #define LIMIT_NETDEBUG(fmt, args...) \ 1759 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) 1760 1761 extern __u32 sysctl_wmem_max; 1762 extern __u32 sysctl_rmem_max; 1763 1764 extern void sk_init(void); 1765 1766 extern int sysctl_optmem_max; 1767 1768 extern __u32 sysctl_wmem_default; 1769 extern __u32 sysctl_rmem_default; 1770 1771 #endif /* _SOCK_H */ 1772