1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* IP Virtual Server 3 * data structure and functionality definitions 4 */ 5 6 #ifndef _NET_IP_VS_H 7 #define _NET_IP_VS_H 8 9 #include <linux/ip_vs.h> /* definitions shared with userland */ 10 11 #include <asm/types.h> /* for __uXX types */ 12 13 #include <linux/list.h> /* for struct list_head */ 14 #include <linux/spinlock.h> /* for struct rwlock_t */ 15 #include <linux/atomic.h> /* for struct atomic_t */ 16 #include <linux/refcount.h> /* for struct refcount_t */ 17 18 #include <linux/compiler.h> 19 #include <linux/timer.h> 20 #include <linux/bug.h> 21 22 #include <net/checksum.h> 23 #include <linux/netfilter.h> /* for union nf_inet_addr */ 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> /* for struct ipv6hdr */ 26 #include <net/ipv6.h> 27 #if IS_ENABLED(CONFIG_IP_VS_IPV6) 28 #include <linux/netfilter_ipv6/ip6_tables.h> 29 #endif 30 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 31 #include <net/netfilter/nf_conntrack.h> 32 #endif 33 #include <net/net_namespace.h> /* Netw namespace */ 34 35 #define IP_VS_HDR_INVERSE 1 36 #define IP_VS_HDR_ICMP 2 37 38 /* Generic access of ipvs struct */ 39 static inline struct netns_ipvs *net_ipvs(struct net* net) 40 { 41 return net->ipvs; 42 } 43 44 /* Connections' size value needed by ip_vs_ctl.c */ 45 extern int ip_vs_conn_tab_size; 46 47 struct ip_vs_iphdr { 48 int hdr_flags; /* ipvs flags */ 49 __u32 off; /* Where IP or IPv4 header starts */ 50 __u32 len; /* IPv4 simply where L4 starts 51 * IPv6 where L4 Transport Header starts */ 52 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 53 __s16 protocol; 54 __s32 flags; 55 union nf_inet_addr saddr; 56 union nf_inet_addr daddr; 57 }; 58 59 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 60 int len, void *buffer) 61 { 62 return skb_header_pointer(skb, offset, len, buffer); 63 } 64 65 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. 66 * IPv6 requires some extra work, as finding proper header position, 67 * depend on the IPv6 extension headers. 68 */ 69 static inline int 70 ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset, 71 int hdr_flags, struct ip_vs_iphdr *iphdr) 72 { 73 iphdr->hdr_flags = hdr_flags; 74 iphdr->off = offset; 75 76 #ifdef CONFIG_IP_VS_IPV6 77 if (af == AF_INET6) { 78 struct ipv6hdr _iph; 79 const struct ipv6hdr *iph = skb_header_pointer( 80 skb, offset, sizeof(_iph), &_iph); 81 if (!iph) 82 return 0; 83 84 iphdr->saddr.in6 = iph->saddr; 85 iphdr->daddr.in6 = iph->daddr; 86 /* ipv6_find_hdr() updates len, flags */ 87 iphdr->len = offset; 88 iphdr->flags = 0; 89 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, 90 &iphdr->fragoffs, 91 &iphdr->flags); 92 if (iphdr->protocol < 0) 93 return 0; 94 } else 95 #endif 96 { 97 struct iphdr _iph; 98 const struct iphdr *iph = skb_header_pointer( 99 skb, offset, sizeof(_iph), &_iph); 100 if (!iph) 101 return 0; 102 103 iphdr->len = offset + iph->ihl * 4; 104 iphdr->fragoffs = 0; 105 iphdr->protocol = iph->protocol; 106 iphdr->saddr.ip = iph->saddr; 107 iphdr->daddr.ip = iph->daddr; 108 } 109 110 return 1; 111 } 112 113 static inline int 114 ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset, 115 bool inverse, struct ip_vs_iphdr *iphdr) 116 { 117 int hdr_flags = IP_VS_HDR_ICMP; 118 119 if (inverse) 120 hdr_flags |= IP_VS_HDR_INVERSE; 121 122 return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr); 123 } 124 125 static inline int 126 ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse, 127 struct ip_vs_iphdr *iphdr) 128 { 129 int hdr_flags = 0; 130 131 if (inverse) 132 hdr_flags |= IP_VS_HDR_INVERSE; 133 134 return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb), 135 hdr_flags, iphdr); 136 } 137 138 static inline bool 139 ip_vs_iph_inverse(const struct ip_vs_iphdr *iph) 140 { 141 return !!(iph->hdr_flags & IP_VS_HDR_INVERSE); 142 } 143 144 static inline bool 145 ip_vs_iph_icmp(const struct ip_vs_iphdr *iph) 146 { 147 return !!(iph->hdr_flags & IP_VS_HDR_ICMP); 148 } 149 150 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, 151 const union nf_inet_addr *src) 152 { 153 #ifdef CONFIG_IP_VS_IPV6 154 if (af == AF_INET6) 155 dst->in6 = src->in6; 156 else 157 #endif 158 dst->ip = src->ip; 159 } 160 161 static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, 162 const union nf_inet_addr *src) 163 { 164 #ifdef CONFIG_IP_VS_IPV6 165 if (af == AF_INET6) { 166 dst->in6 = src->in6; 167 return; 168 } 169 #endif 170 dst->ip = src->ip; 171 dst->all[1] = 0; 172 dst->all[2] = 0; 173 dst->all[3] = 0; 174 } 175 176 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, 177 const union nf_inet_addr *b) 178 { 179 #ifdef CONFIG_IP_VS_IPV6 180 if (af == AF_INET6) 181 return ipv6_addr_equal(&a->in6, &b->in6); 182 #endif 183 return a->ip == b->ip; 184 } 185 186 #ifdef CONFIG_IP_VS_DEBUG 187 #include <linux/net.h> 188 189 int ip_vs_get_debug_level(void); 190 191 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, 192 const union nf_inet_addr *addr, 193 int *idx) 194 { 195 int len; 196 #ifdef CONFIG_IP_VS_IPV6 197 if (af == AF_INET6) 198 len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", 199 &addr->in6) + 1; 200 else 201 #endif 202 len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", 203 &addr->ip) + 1; 204 205 *idx += len; 206 BUG_ON(*idx > buf_len + 1); 207 return &buf[*idx - len]; 208 } 209 210 #define IP_VS_DBG_BUF(level, msg, ...) \ 211 do { \ 212 char ip_vs_dbg_buf[160]; \ 213 int ip_vs_dbg_idx = 0; \ 214 if (level <= ip_vs_get_debug_level()) \ 215 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 216 } while (0) 217 #define IP_VS_ERR_BUF(msg...) \ 218 do { \ 219 char ip_vs_dbg_buf[160]; \ 220 int ip_vs_dbg_idx = 0; \ 221 pr_err(msg); \ 222 } while (0) 223 224 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ 225 #define IP_VS_DBG_ADDR(af, addr) \ 226 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ 227 sizeof(ip_vs_dbg_buf), addr, \ 228 &ip_vs_dbg_idx) 229 230 #define IP_VS_DBG(level, msg, ...) \ 231 do { \ 232 if (level <= ip_vs_get_debug_level()) \ 233 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 234 } while (0) 235 #define IP_VS_DBG_RL(msg, ...) \ 236 do { \ 237 if (net_ratelimit()) \ 238 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 239 } while (0) 240 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ 241 do { \ 242 if (level <= ip_vs_get_debug_level()) \ 243 pp->debug_packet(af, pp, skb, ofs, msg); \ 244 } while (0) 245 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ 246 do { \ 247 if (level <= ip_vs_get_debug_level() && \ 248 net_ratelimit()) \ 249 pp->debug_packet(af, pp, skb, ofs, msg); \ 250 } while (0) 251 #else /* NO DEBUGGING at ALL */ 252 #define IP_VS_DBG_BUF(level, msg...) do {} while (0) 253 #define IP_VS_ERR_BUF(msg...) do {} while (0) 254 #define IP_VS_DBG(level, msg...) do {} while (0) 255 #define IP_VS_DBG_RL(msg...) do {} while (0) 256 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 257 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 258 #endif 259 260 #define IP_VS_BUG() BUG() 261 #define IP_VS_ERR_RL(msg, ...) \ 262 do { \ 263 if (net_ratelimit()) \ 264 pr_err(msg, ##__VA_ARGS__); \ 265 } while (0) 266 267 #ifdef CONFIG_IP_VS_DEBUG 268 #define EnterFunction(level) \ 269 do { \ 270 if (level <= ip_vs_get_debug_level()) \ 271 printk(KERN_DEBUG \ 272 pr_fmt("Enter: %s, %s line %i\n"), \ 273 __func__, __FILE__, __LINE__); \ 274 } while (0) 275 #define LeaveFunction(level) \ 276 do { \ 277 if (level <= ip_vs_get_debug_level()) \ 278 printk(KERN_DEBUG \ 279 pr_fmt("Leave: %s, %s line %i\n"), \ 280 __func__, __FILE__, __LINE__); \ 281 } while (0) 282 #else 283 #define EnterFunction(level) do {} while (0) 284 #define LeaveFunction(level) do {} while (0) 285 #endif 286 287 /* The port number of FTP service (in network order). */ 288 #define FTPPORT cpu_to_be16(21) 289 #define FTPDATA cpu_to_be16(20) 290 291 /* TCP State Values */ 292 enum { 293 IP_VS_TCP_S_NONE = 0, 294 IP_VS_TCP_S_ESTABLISHED, 295 IP_VS_TCP_S_SYN_SENT, 296 IP_VS_TCP_S_SYN_RECV, 297 IP_VS_TCP_S_FIN_WAIT, 298 IP_VS_TCP_S_TIME_WAIT, 299 IP_VS_TCP_S_CLOSE, 300 IP_VS_TCP_S_CLOSE_WAIT, 301 IP_VS_TCP_S_LAST_ACK, 302 IP_VS_TCP_S_LISTEN, 303 IP_VS_TCP_S_SYNACK, 304 IP_VS_TCP_S_LAST 305 }; 306 307 /* UDP State Values */ 308 enum { 309 IP_VS_UDP_S_NORMAL, 310 IP_VS_UDP_S_LAST, 311 }; 312 313 /* ICMP State Values */ 314 enum { 315 IP_VS_ICMP_S_NORMAL, 316 IP_VS_ICMP_S_LAST, 317 }; 318 319 /* SCTP State Values */ 320 enum ip_vs_sctp_states { 321 IP_VS_SCTP_S_NONE, 322 IP_VS_SCTP_S_INIT1, 323 IP_VS_SCTP_S_INIT, 324 IP_VS_SCTP_S_COOKIE_SENT, 325 IP_VS_SCTP_S_COOKIE_REPLIED, 326 IP_VS_SCTP_S_COOKIE_WAIT, 327 IP_VS_SCTP_S_COOKIE, 328 IP_VS_SCTP_S_COOKIE_ECHOED, 329 IP_VS_SCTP_S_ESTABLISHED, 330 IP_VS_SCTP_S_SHUTDOWN_SENT, 331 IP_VS_SCTP_S_SHUTDOWN_RECEIVED, 332 IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, 333 IP_VS_SCTP_S_REJECTED, 334 IP_VS_SCTP_S_CLOSED, 335 IP_VS_SCTP_S_LAST 336 }; 337 338 /* Delta sequence info structure 339 * Each ip_vs_conn has 2 (output AND input seq. changes). 340 * Only used in the VS/NAT. 341 */ 342 struct ip_vs_seq { 343 __u32 init_seq; /* Add delta from this seq */ 344 __u32 delta; /* Delta in sequence numbers */ 345 __u32 previous_delta; /* Delta in sequence numbers 346 * before last resized pkt */ 347 }; 348 349 /* counters per cpu */ 350 struct ip_vs_counters { 351 __u64 conns; /* connections scheduled */ 352 __u64 inpkts; /* incoming packets */ 353 __u64 outpkts; /* outgoing packets */ 354 __u64 inbytes; /* incoming bytes */ 355 __u64 outbytes; /* outgoing bytes */ 356 }; 357 /* Stats per cpu */ 358 struct ip_vs_cpu_stats { 359 struct ip_vs_counters cnt; 360 struct u64_stats_sync syncp; 361 }; 362 363 /* IPVS statistics objects */ 364 struct ip_vs_estimator { 365 struct list_head list; 366 367 u64 last_inbytes; 368 u64 last_outbytes; 369 u64 last_conns; 370 u64 last_inpkts; 371 u64 last_outpkts; 372 373 u64 cps; 374 u64 inpps; 375 u64 outpps; 376 u64 inbps; 377 u64 outbps; 378 }; 379 380 /* 381 * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user 382 */ 383 struct ip_vs_kstats { 384 u64 conns; /* connections scheduled */ 385 u64 inpkts; /* incoming packets */ 386 u64 outpkts; /* outgoing packets */ 387 u64 inbytes; /* incoming bytes */ 388 u64 outbytes; /* outgoing bytes */ 389 390 u64 cps; /* current connection rate */ 391 u64 inpps; /* current in packet rate */ 392 u64 outpps; /* current out packet rate */ 393 u64 inbps; /* current in byte rate */ 394 u64 outbps; /* current out byte rate */ 395 }; 396 397 struct ip_vs_stats { 398 struct ip_vs_kstats kstats; /* kernel statistics */ 399 struct ip_vs_estimator est; /* estimator */ 400 struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ 401 spinlock_t lock; /* spin lock */ 402 struct ip_vs_kstats kstats0; /* reset values */ 403 }; 404 405 struct dst_entry; 406 struct iphdr; 407 struct ip_vs_conn; 408 struct ip_vs_app; 409 struct sk_buff; 410 struct ip_vs_proto_data; 411 412 struct ip_vs_protocol { 413 struct ip_vs_protocol *next; 414 char *name; 415 u16 protocol; 416 u16 num_states; 417 int dont_defrag; 418 419 void (*init)(struct ip_vs_protocol *pp); 420 421 void (*exit)(struct ip_vs_protocol *pp); 422 423 int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 424 425 void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 426 427 int (*conn_schedule)(struct netns_ipvs *ipvs, 428 int af, struct sk_buff *skb, 429 struct ip_vs_proto_data *pd, 430 int *verdict, struct ip_vs_conn **cpp, 431 struct ip_vs_iphdr *iph); 432 433 struct ip_vs_conn * 434 (*conn_in_get)(struct netns_ipvs *ipvs, 435 int af, 436 const struct sk_buff *skb, 437 const struct ip_vs_iphdr *iph); 438 439 struct ip_vs_conn * 440 (*conn_out_get)(struct netns_ipvs *ipvs, 441 int af, 442 const struct sk_buff *skb, 443 const struct ip_vs_iphdr *iph); 444 445 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 446 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 447 448 int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 449 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 450 451 int (*csum_check)(int af, struct sk_buff *skb, 452 struct ip_vs_protocol *pp); 453 454 const char *(*state_name)(int state); 455 456 void (*state_transition)(struct ip_vs_conn *cp, int direction, 457 const struct sk_buff *skb, 458 struct ip_vs_proto_data *pd); 459 460 int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 461 462 void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 463 464 int (*app_conn_bind)(struct ip_vs_conn *cp); 465 466 void (*debug_packet)(int af, struct ip_vs_protocol *pp, 467 const struct sk_buff *skb, 468 int offset, 469 const char *msg); 470 471 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 472 }; 473 474 /* protocol data per netns */ 475 struct ip_vs_proto_data { 476 struct ip_vs_proto_data *next; 477 struct ip_vs_protocol *pp; 478 int *timeout_table; /* protocol timeout table */ 479 atomic_t appcnt; /* counter of proto app incs. */ 480 struct tcp_states_t *tcp_state_table; 481 }; 482 483 struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); 484 struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs, 485 unsigned short proto); 486 487 struct ip_vs_conn_param { 488 struct netns_ipvs *ipvs; 489 const union nf_inet_addr *caddr; 490 const union nf_inet_addr *vaddr; 491 __be16 cport; 492 __be16 vport; 493 __u16 protocol; 494 u16 af; 495 496 const struct ip_vs_pe *pe; 497 char *pe_data; 498 __u8 pe_data_len; 499 }; 500 501 /* IP_VS structure allocated for each dynamically scheduled connection */ 502 struct ip_vs_conn { 503 struct hlist_node c_list; /* hashed list heads */ 504 /* Protocol, addresses and port numbers */ 505 __be16 cport; 506 __be16 dport; 507 __be16 vport; 508 u16 af; /* address family */ 509 union nf_inet_addr caddr; /* client address */ 510 union nf_inet_addr vaddr; /* virtual address */ 511 union nf_inet_addr daddr; /* destination address */ 512 volatile __u32 flags; /* status flags */ 513 __u16 protocol; /* Which protocol (TCP/UDP) */ 514 __u16 daf; /* Address family of the dest */ 515 struct netns_ipvs *ipvs; 516 517 /* counter and timer */ 518 refcount_t refcnt; /* reference count */ 519 struct timer_list timer; /* Expiration timer */ 520 volatile unsigned long timeout; /* timeout */ 521 522 /* Flags and state transition */ 523 spinlock_t lock; /* lock for state transition */ 524 volatile __u16 state; /* state info */ 525 volatile __u16 old_state; /* old state, to be used for 526 * state transition triggerd 527 * synchronization 528 */ 529 __u32 fwmark; /* Fire wall mark from skb */ 530 unsigned long sync_endtime; /* jiffies + sent_retries */ 531 532 /* Control members */ 533 struct ip_vs_conn *control; /* Master control connection */ 534 atomic_t n_control; /* Number of controlled ones */ 535 struct ip_vs_dest *dest; /* real server */ 536 atomic_t in_pkts; /* incoming packet counter */ 537 538 /* Packet transmitter for different forwarding methods. If it 539 * mangles the packet, it must return NF_DROP or better NF_STOLEN, 540 * otherwise this must be changed to a sk_buff **. 541 * NF_ACCEPT can be returned when destination is local. 542 */ 543 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 544 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 545 546 /* Note: we can group the following members into a structure, 547 * in order to save more space, and the following members are 548 * only used in VS/NAT anyway 549 */ 550 struct ip_vs_app *app; /* bound ip_vs_app object */ 551 void *app_data; /* Application private data */ 552 struct ip_vs_seq in_seq; /* incoming seq. struct */ 553 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 554 555 const struct ip_vs_pe *pe; 556 char *pe_data; 557 __u8 pe_data_len; 558 559 struct rcu_head rcu_head; 560 }; 561 562 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user 563 * for IPv6 support. 564 * 565 * We need these to conveniently pass around service and destination 566 * options, but unfortunately, we also need to keep the old definitions to 567 * maintain userspace backwards compatibility for the setsockopt interface. 568 */ 569 struct ip_vs_service_user_kern { 570 /* virtual service addresses */ 571 u16 af; 572 u16 protocol; 573 union nf_inet_addr addr; /* virtual ip address */ 574 __be16 port; 575 u32 fwmark; /* firwall mark of service */ 576 577 /* virtual service options */ 578 char *sched_name; 579 char *pe_name; 580 unsigned int flags; /* virtual service flags */ 581 unsigned int timeout; /* persistent timeout in sec */ 582 __be32 netmask; /* persistent netmask or plen */ 583 }; 584 585 586 struct ip_vs_dest_user_kern { 587 /* destination server address */ 588 union nf_inet_addr addr; 589 __be16 port; 590 591 /* real server options */ 592 unsigned int conn_flags; /* connection flags */ 593 int weight; /* destination weight */ 594 595 /* thresholds for active connections */ 596 u32 u_threshold; /* upper threshold */ 597 u32 l_threshold; /* lower threshold */ 598 599 /* Address family of addr */ 600 u16 af; 601 }; 602 603 604 /* 605 * The information about the virtual service offered to the net and the 606 * forwarding entries. 607 */ 608 struct ip_vs_service { 609 struct hlist_node s_list; /* for normal service table */ 610 struct hlist_node f_list; /* for fwmark-based service table */ 611 atomic_t refcnt; /* reference counter */ 612 613 u16 af; /* address family */ 614 __u16 protocol; /* which protocol (TCP/UDP) */ 615 union nf_inet_addr addr; /* IP address for virtual service */ 616 __be16 port; /* port number for the service */ 617 __u32 fwmark; /* firewall mark of the service */ 618 unsigned int flags; /* service status flags */ 619 unsigned int timeout; /* persistent timeout in ticks */ 620 __be32 netmask; /* grouping granularity, mask/plen */ 621 struct netns_ipvs *ipvs; 622 623 struct list_head destinations; /* real server d-linked list */ 624 __u32 num_dests; /* number of servers */ 625 struct ip_vs_stats stats; /* statistics for the service */ 626 627 /* for scheduling */ 628 struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ 629 spinlock_t sched_lock; /* lock sched_data */ 630 void *sched_data; /* scheduler application data */ 631 632 /* alternate persistence engine */ 633 struct ip_vs_pe __rcu *pe; 634 int conntrack_afmask; 635 636 struct rcu_head rcu_head; 637 }; 638 639 /* Information for cached dst */ 640 struct ip_vs_dest_dst { 641 struct dst_entry *dst_cache; /* destination cache entry */ 642 u32 dst_cookie; 643 union nf_inet_addr dst_saddr; 644 struct rcu_head rcu_head; 645 }; 646 647 /* The real server destination forwarding entry with ip address, port number, 648 * and so on. 649 */ 650 struct ip_vs_dest { 651 struct list_head n_list; /* for the dests in the service */ 652 struct hlist_node d_list; /* for table with all the dests */ 653 654 u16 af; /* address family */ 655 __be16 port; /* port number of the server */ 656 union nf_inet_addr addr; /* IP address of the server */ 657 volatile unsigned int flags; /* dest status flags */ 658 atomic_t conn_flags; /* flags to copy to conn */ 659 atomic_t weight; /* server weight */ 660 atomic_t last_weight; /* server latest weight */ 661 662 refcount_t refcnt; /* reference counter */ 663 struct ip_vs_stats stats; /* statistics */ 664 unsigned long idle_start; /* start time, jiffies */ 665 666 /* connection counters and thresholds */ 667 atomic_t activeconns; /* active connections */ 668 atomic_t inactconns; /* inactive connections */ 669 atomic_t persistconns; /* persistent connections */ 670 __u32 u_threshold; /* upper threshold */ 671 __u32 l_threshold; /* lower threshold */ 672 673 /* for destination cache */ 674 spinlock_t dst_lock; /* lock of dst_cache */ 675 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 676 677 /* for virtual service */ 678 struct ip_vs_service __rcu *svc; /* service it belongs to */ 679 __u16 protocol; /* which protocol (TCP/UDP) */ 680 __be16 vport; /* virtual port number */ 681 union nf_inet_addr vaddr; /* virtual IP address */ 682 __u32 vfwmark; /* firewall mark of service */ 683 684 struct list_head t_list; /* in dest_trash */ 685 unsigned int in_rs_table:1; /* we are in rs_table */ 686 }; 687 688 /* The scheduler object */ 689 struct ip_vs_scheduler { 690 struct list_head n_list; /* d-linked list head */ 691 char *name; /* scheduler name */ 692 atomic_t refcnt; /* reference counter */ 693 struct module *module; /* THIS_MODULE/NULL */ 694 695 /* scheduler initializing service */ 696 int (*init_service)(struct ip_vs_service *svc); 697 /* scheduling service finish */ 698 void (*done_service)(struct ip_vs_service *svc); 699 /* dest is linked */ 700 int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 701 /* dest is unlinked */ 702 int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 703 /* dest is updated */ 704 int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 705 706 /* selecting a server from the given service */ 707 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, 708 const struct sk_buff *skb, 709 struct ip_vs_iphdr *iph); 710 }; 711 712 /* The persistence engine object */ 713 struct ip_vs_pe { 714 struct list_head n_list; /* d-linked list head */ 715 char *name; /* scheduler name */ 716 atomic_t refcnt; /* reference counter */ 717 struct module *module; /* THIS_MODULE/NULL */ 718 719 /* get the connection template, if any */ 720 int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); 721 bool (*ct_match)(const struct ip_vs_conn_param *p, 722 struct ip_vs_conn *ct); 723 u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, 724 bool inverse); 725 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 726 /* create connections for real-server outgoing packets */ 727 struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc, 728 struct ip_vs_dest *dest, 729 struct sk_buff *skb, 730 const struct ip_vs_iphdr *iph, 731 __be16 dport, __be16 cport); 732 }; 733 734 /* The application module object (a.k.a. app incarnation) */ 735 struct ip_vs_app { 736 struct list_head a_list; /* member in app list */ 737 int type; /* IP_VS_APP_TYPE_xxx */ 738 char *name; /* application module name */ 739 __u16 protocol; 740 struct module *module; /* THIS_MODULE/NULL */ 741 struct list_head incs_list; /* list of incarnations */ 742 743 /* members for application incarnations */ 744 struct list_head p_list; /* member in proto app list */ 745 struct ip_vs_app *app; /* its real application */ 746 __be16 port; /* port number in net order */ 747 atomic_t usecnt; /* usage counter */ 748 struct rcu_head rcu_head; 749 750 /* output hook: Process packet in inout direction, diff set for TCP. 751 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 752 * 2=Mangled but checksum was not updated 753 */ 754 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 755 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 756 757 /* input hook: Process packet in outin direction, diff set for TCP. 758 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 759 * 2=Mangled but checksum was not updated 760 */ 761 int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, 762 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 763 764 /* ip_vs_app initializer */ 765 int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); 766 767 /* ip_vs_app finish */ 768 int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); 769 770 771 /* not used now */ 772 int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, 773 struct ip_vs_protocol *); 774 775 void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); 776 777 int * timeout_table; 778 int * timeouts; 779 int timeouts_size; 780 781 int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app, 782 int *verdict, struct ip_vs_conn **cpp); 783 784 struct ip_vs_conn * 785 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, 786 const struct iphdr *iph, int inverse); 787 788 struct ip_vs_conn * 789 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, 790 const struct iphdr *iph, int inverse); 791 792 int (*state_transition)(struct ip_vs_conn *cp, int direction, 793 const struct sk_buff *skb, 794 struct ip_vs_app *app); 795 796 void (*timeout_change)(struct ip_vs_app *app, int flags); 797 }; 798 799 struct ipvs_master_sync_state { 800 struct list_head sync_queue; 801 struct ip_vs_sync_buff *sync_buff; 802 unsigned long sync_queue_len; 803 unsigned int sync_queue_delay; 804 struct task_struct *master_thread; 805 struct delayed_work master_wakeup_work; 806 struct netns_ipvs *ipvs; 807 }; 808 809 /* How much time to keep dests in trash */ 810 #define IP_VS_DEST_TRASH_PERIOD (120 * HZ) 811 812 struct ipvs_sync_daemon_cfg { 813 union nf_inet_addr mcast_group; 814 int syncid; 815 u16 sync_maxlen; 816 u16 mcast_port; 817 u8 mcast_af; 818 u8 mcast_ttl; 819 /* multicast interface name */ 820 char mcast_ifn[IP_VS_IFNAME_MAXLEN]; 821 }; 822 823 /* IPVS in network namespace */ 824 struct netns_ipvs { 825 int gen; /* Generation */ 826 int enable; /* enable like nf_hooks do */ 827 /* Hash table: for real service lookups */ 828 #define IP_VS_RTAB_BITS 4 829 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 830 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 831 832 struct hlist_head rs_table[IP_VS_RTAB_SIZE]; 833 /* ip_vs_app */ 834 struct list_head app_list; 835 /* ip_vs_proto */ 836 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 837 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 838 /* ip_vs_proto_tcp */ 839 #ifdef CONFIG_IP_VS_PROTO_TCP 840 #define TCP_APP_TAB_BITS 4 841 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) 842 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) 843 struct list_head tcp_apps[TCP_APP_TAB_SIZE]; 844 #endif 845 /* ip_vs_proto_udp */ 846 #ifdef CONFIG_IP_VS_PROTO_UDP 847 #define UDP_APP_TAB_BITS 4 848 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) 849 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) 850 struct list_head udp_apps[UDP_APP_TAB_SIZE]; 851 #endif 852 /* ip_vs_proto_sctp */ 853 #ifdef CONFIG_IP_VS_PROTO_SCTP 854 #define SCTP_APP_TAB_BITS 4 855 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) 856 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) 857 /* Hash table for SCTP application incarnations */ 858 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 859 #endif 860 /* ip_vs_conn */ 861 atomic_t conn_count; /* connection counter */ 862 863 /* ip_vs_ctl */ 864 struct ip_vs_stats tot_stats; /* Statistics & est. */ 865 866 int num_services; /* no of virtual services */ 867 868 /* Trash for destinations */ 869 struct list_head dest_trash; 870 spinlock_t dest_trash_lock; 871 struct timer_list dest_trash_timer; /* expiration timer */ 872 /* Service counters */ 873 atomic_t ftpsvc_counter; 874 atomic_t nullsvc_counter; 875 atomic_t conn_out_counter; 876 877 #ifdef CONFIG_SYSCTL 878 /* 1/rate drop and drop-entry variables */ 879 struct delayed_work defense_work; /* Work handler */ 880 int drop_rate; 881 int drop_counter; 882 atomic_t dropentry; 883 /* locks in ctl.c */ 884 spinlock_t dropentry_lock; /* drop entry handling */ 885 spinlock_t droppacket_lock; /* drop packet handling */ 886 spinlock_t securetcp_lock; /* state and timeout tables */ 887 888 /* sys-ctl struct */ 889 struct ctl_table_header *sysctl_hdr; 890 struct ctl_table *sysctl_tbl; 891 #endif 892 893 /* sysctl variables */ 894 int sysctl_amemthresh; 895 int sysctl_am_droprate; 896 int sysctl_drop_entry; 897 int sysctl_drop_packet; 898 int sysctl_secure_tcp; 899 #ifdef CONFIG_IP_VS_NFCT 900 int sysctl_conntrack; 901 #endif 902 int sysctl_snat_reroute; 903 int sysctl_sync_ver; 904 int sysctl_sync_ports; 905 int sysctl_sync_persist_mode; 906 unsigned long sysctl_sync_qlen_max; 907 int sysctl_sync_sock_size; 908 int sysctl_cache_bypass; 909 int sysctl_expire_nodest_conn; 910 int sysctl_sloppy_tcp; 911 int sysctl_sloppy_sctp; 912 int sysctl_expire_quiescent_template; 913 int sysctl_sync_threshold[2]; 914 unsigned int sysctl_sync_refresh_period; 915 int sysctl_sync_retries; 916 int sysctl_nat_icmp_send; 917 int sysctl_pmtu_disc; 918 int sysctl_backup_only; 919 int sysctl_conn_reuse_mode; 920 int sysctl_schedule_icmp; 921 int sysctl_ignore_tunneled; 922 923 /* ip_vs_lblc */ 924 int sysctl_lblc_expiration; 925 struct ctl_table_header *lblc_ctl_header; 926 struct ctl_table *lblc_ctl_table; 927 /* ip_vs_lblcr */ 928 int sysctl_lblcr_expiration; 929 struct ctl_table_header *lblcr_ctl_header; 930 struct ctl_table *lblcr_ctl_table; 931 /* ip_vs_est */ 932 struct list_head est_list; /* estimator list */ 933 spinlock_t est_lock; 934 struct timer_list est_timer; /* Estimation timer */ 935 /* ip_vs_sync */ 936 spinlock_t sync_lock; 937 struct ipvs_master_sync_state *ms; 938 spinlock_t sync_buff_lock; 939 struct task_struct **backup_threads; 940 int threads_mask; 941 volatile int sync_state; 942 struct mutex sync_mutex; 943 struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */ 944 struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */ 945 /* net name space ptr */ 946 struct net *net; /* Needed by timer routines */ 947 /* Number of heterogeneous destinations, needed becaus heterogeneous 948 * are not supported when synchronization is enabled. 949 */ 950 unsigned int mixed_address_family_dests; 951 }; 952 953 #define DEFAULT_SYNC_THRESHOLD 3 954 #define DEFAULT_SYNC_PERIOD 50 955 #define DEFAULT_SYNC_VER 1 956 #define DEFAULT_SLOPPY_TCP 0 957 #define DEFAULT_SLOPPY_SCTP 0 958 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) 959 #define DEFAULT_SYNC_RETRIES 0 960 #define IPVS_SYNC_WAKEUP_RATE 8 961 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) 962 #define IPVS_SYNC_SEND_DELAY (HZ / 50) 963 #define IPVS_SYNC_CHECK_PERIOD HZ 964 #define IPVS_SYNC_FLUSH_TIME (HZ * 2) 965 #define IPVS_SYNC_PORTS_MAX (1 << 6) 966 967 #ifdef CONFIG_SYSCTL 968 969 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 970 { 971 return ipvs->sysctl_sync_threshold[0]; 972 } 973 974 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 975 { 976 return READ_ONCE(ipvs->sysctl_sync_threshold[1]); 977 } 978 979 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 980 { 981 return READ_ONCE(ipvs->sysctl_sync_refresh_period); 982 } 983 984 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 985 { 986 return ipvs->sysctl_sync_retries; 987 } 988 989 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 990 { 991 return ipvs->sysctl_sync_ver; 992 } 993 994 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 995 { 996 return ipvs->sysctl_sloppy_tcp; 997 } 998 999 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1000 { 1001 return ipvs->sysctl_sloppy_sctp; 1002 } 1003 1004 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1005 { 1006 return READ_ONCE(ipvs->sysctl_sync_ports); 1007 } 1008 1009 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1010 { 1011 return ipvs->sysctl_sync_persist_mode; 1012 } 1013 1014 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1015 { 1016 return ipvs->sysctl_sync_qlen_max; 1017 } 1018 1019 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1020 { 1021 return ipvs->sysctl_sync_sock_size; 1022 } 1023 1024 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1025 { 1026 return ipvs->sysctl_pmtu_disc; 1027 } 1028 1029 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1030 { 1031 return ipvs->sync_state & IP_VS_STATE_BACKUP && 1032 ipvs->sysctl_backup_only; 1033 } 1034 1035 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1036 { 1037 return ipvs->sysctl_conn_reuse_mode; 1038 } 1039 1040 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1041 { 1042 return ipvs->sysctl_schedule_icmp; 1043 } 1044 1045 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1046 { 1047 return ipvs->sysctl_ignore_tunneled; 1048 } 1049 1050 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1051 { 1052 return ipvs->sysctl_cache_bypass; 1053 } 1054 1055 #else 1056 1057 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1058 { 1059 return DEFAULT_SYNC_THRESHOLD; 1060 } 1061 1062 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1063 { 1064 return DEFAULT_SYNC_PERIOD; 1065 } 1066 1067 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1068 { 1069 return DEFAULT_SYNC_REFRESH_PERIOD; 1070 } 1071 1072 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1073 { 1074 return DEFAULT_SYNC_RETRIES & 3; 1075 } 1076 1077 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1078 { 1079 return DEFAULT_SYNC_VER; 1080 } 1081 1082 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 1083 { 1084 return DEFAULT_SLOPPY_TCP; 1085 } 1086 1087 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1088 { 1089 return DEFAULT_SLOPPY_SCTP; 1090 } 1091 1092 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1093 { 1094 return 1; 1095 } 1096 1097 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1098 { 1099 return 0; 1100 } 1101 1102 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1103 { 1104 return IPVS_SYNC_QLEN_MAX; 1105 } 1106 1107 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1108 { 1109 return 0; 1110 } 1111 1112 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1113 { 1114 return 1; 1115 } 1116 1117 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1118 { 1119 return 0; 1120 } 1121 1122 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1123 { 1124 return 1; 1125 } 1126 1127 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1128 { 1129 return 0; 1130 } 1131 1132 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1133 { 1134 return 0; 1135 } 1136 1137 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1138 { 1139 return 0; 1140 } 1141 1142 #endif 1143 1144 /* IPVS core functions 1145 * (from ip_vs_core.c) 1146 */ 1147 const char *ip_vs_proto_name(unsigned int proto); 1148 void ip_vs_init_hash_table(struct list_head *table, int rows); 1149 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, 1150 struct ip_vs_dest *dest, 1151 struct sk_buff *skb, 1152 const struct ip_vs_iphdr *iph, 1153 __be16 dport, 1154 __be16 cport); 1155 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) 1156 1157 #define IP_VS_APP_TYPE_FTP 1 1158 1159 /* ip_vs_conn handling functions 1160 * (from ip_vs_conn.c) 1161 */ 1162 enum { 1163 IP_VS_DIR_INPUT = 0, 1164 IP_VS_DIR_OUTPUT, 1165 IP_VS_DIR_INPUT_ONLY, 1166 IP_VS_DIR_LAST, 1167 }; 1168 1169 static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol, 1170 const union nf_inet_addr *caddr, 1171 __be16 cport, 1172 const union nf_inet_addr *vaddr, 1173 __be16 vport, 1174 struct ip_vs_conn_param *p) 1175 { 1176 p->ipvs = ipvs; 1177 p->af = af; 1178 p->protocol = protocol; 1179 p->caddr = caddr; 1180 p->cport = cport; 1181 p->vaddr = vaddr; 1182 p->vport = vport; 1183 p->pe = NULL; 1184 p->pe_data = NULL; 1185 } 1186 1187 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); 1188 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 1189 1190 struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, 1191 const struct sk_buff *skb, 1192 const struct ip_vs_iphdr *iph); 1193 1194 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1195 1196 struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, 1197 const struct sk_buff *skb, 1198 const struct ip_vs_iphdr *iph); 1199 1200 /* Get reference to gain full access to conn. 1201 * By default, RCU read-side critical sections have access only to 1202 * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. 1203 */ 1204 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) 1205 { 1206 return refcount_inc_not_zero(&cp->refcnt); 1207 } 1208 1209 /* put back the conn without restarting its timer */ 1210 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1211 { 1212 smp_mb__before_atomic(); 1213 refcount_dec(&cp->refcnt); 1214 } 1215 void ip_vs_conn_put(struct ip_vs_conn *cp); 1216 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1217 1218 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, 1219 const union nf_inet_addr *daddr, 1220 __be16 dport, unsigned int flags, 1221 struct ip_vs_dest *dest, __u32 fwmark); 1222 void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1223 1224 const char *ip_vs_state_name(__u16 proto, int state); 1225 1226 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1227 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); 1228 void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1229 int ip_vs_conn_init(void); 1230 void ip_vs_conn_cleanup(void); 1231 1232 static inline void ip_vs_control_del(struct ip_vs_conn *cp) 1233 { 1234 struct ip_vs_conn *ctl_cp = cp->control; 1235 if (!ctl_cp) { 1236 IP_VS_ERR_BUF("request control DEL for uncontrolled: " 1237 "%s:%d to %s:%d\n", 1238 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1239 ntohs(cp->cport), 1240 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1241 ntohs(cp->vport)); 1242 1243 return; 1244 } 1245 1246 IP_VS_DBG_BUF(7, "DELeting control for: " 1247 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1248 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1249 ntohs(cp->cport), 1250 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1251 ntohs(ctl_cp->cport)); 1252 1253 cp->control = NULL; 1254 if (atomic_read(&ctl_cp->n_control) == 0) { 1255 IP_VS_ERR_BUF("BUG control DEL with n=0 : " 1256 "%s:%d to %s:%d\n", 1257 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1258 ntohs(cp->cport), 1259 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1260 ntohs(cp->vport)); 1261 1262 return; 1263 } 1264 atomic_dec(&ctl_cp->n_control); 1265 } 1266 1267 static inline void 1268 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 1269 { 1270 if (cp->control) { 1271 IP_VS_ERR_BUF("request control ADD for already controlled: " 1272 "%s:%d to %s:%d\n", 1273 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1274 ntohs(cp->cport), 1275 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1276 ntohs(cp->vport)); 1277 1278 ip_vs_control_del(cp); 1279 } 1280 1281 IP_VS_DBG_BUF(7, "ADDing control for: " 1282 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1283 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1284 ntohs(cp->cport), 1285 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1286 ntohs(ctl_cp->cport)); 1287 1288 cp->control = ctl_cp; 1289 atomic_inc(&ctl_cp->n_control); 1290 } 1291 1292 /* IPVS netns init & cleanup functions */ 1293 int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); 1294 int ip_vs_control_net_init(struct netns_ipvs *ipvs); 1295 int ip_vs_protocol_net_init(struct netns_ipvs *ipvs); 1296 int ip_vs_app_net_init(struct netns_ipvs *ipvs); 1297 int ip_vs_conn_net_init(struct netns_ipvs *ipvs); 1298 int ip_vs_sync_net_init(struct netns_ipvs *ipvs); 1299 void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs); 1300 void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs); 1301 void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs); 1302 void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs); 1303 void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs); 1304 void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs); 1305 void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs); 1306 1307 /* IPVS application functions 1308 * (from ip_vs_app.c) 1309 */ 1310 #define IP_VS_APP_MAX_PORTS 8 1311 struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1312 void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1313 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1314 void ip_vs_unbind_app(struct ip_vs_conn *cp); 1315 int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, 1316 __u16 port); 1317 int ip_vs_app_inc_get(struct ip_vs_app *inc); 1318 void ip_vs_app_inc_put(struct ip_vs_app *inc); 1319 1320 int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb, 1321 struct ip_vs_iphdr *ipvsh); 1322 int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb, 1323 struct ip_vs_iphdr *ipvsh); 1324 1325 int register_ip_vs_pe(struct ip_vs_pe *pe); 1326 int unregister_ip_vs_pe(struct ip_vs_pe *pe); 1327 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1328 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1329 1330 /* Use a #define to avoid all of module.h just for these trivial ops */ 1331 #define ip_vs_pe_get(pe) \ 1332 if (pe && pe->module) \ 1333 __module_get(pe->module); 1334 1335 #define ip_vs_pe_put(pe) \ 1336 if (pe && pe->module) \ 1337 module_put(pe->module); 1338 1339 /* IPVS protocol functions (from ip_vs_proto.c) */ 1340 int ip_vs_protocol_init(void); 1341 void ip_vs_protocol_cleanup(void); 1342 void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 1343 int *ip_vs_create_timeout_table(int *table, int size); 1344 void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, 1345 const struct sk_buff *skb, int offset, 1346 const char *msg); 1347 1348 extern struct ip_vs_protocol ip_vs_protocol_tcp; 1349 extern struct ip_vs_protocol ip_vs_protocol_udp; 1350 extern struct ip_vs_protocol ip_vs_protocol_icmp; 1351 extern struct ip_vs_protocol ip_vs_protocol_esp; 1352 extern struct ip_vs_protocol ip_vs_protocol_ah; 1353 extern struct ip_vs_protocol ip_vs_protocol_sctp; 1354 1355 /* Registering/unregistering scheduler functions 1356 * (from ip_vs_sched.c) 1357 */ 1358 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1359 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1360 int ip_vs_bind_scheduler(struct ip_vs_service *svc, 1361 struct ip_vs_scheduler *scheduler); 1362 void ip_vs_unbind_scheduler(struct ip_vs_service *svc, 1363 struct ip_vs_scheduler *sched); 1364 struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); 1365 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1366 struct ip_vs_conn * 1367 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 1368 struct ip_vs_proto_data *pd, int *ignored, 1369 struct ip_vs_iphdr *iph); 1370 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1371 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); 1372 1373 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1374 1375 /* IPVS control data and functions (from ip_vs_ctl.c) */ 1376 extern struct ip_vs_stats ip_vs_stats; 1377 extern int sysctl_ip_vs_sync_ver; 1378 1379 struct ip_vs_service * 1380 ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, 1381 const union nf_inet_addr *vaddr, __be16 vport); 1382 1383 bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1384 const union nf_inet_addr *daddr, __be16 dport); 1385 1386 struct ip_vs_dest * 1387 ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1388 const union nf_inet_addr *daddr, __be16 dport); 1389 1390 int ip_vs_use_count_inc(void); 1391 void ip_vs_use_count_dec(void); 1392 int ip_vs_register_nl_ioctl(void); 1393 void ip_vs_unregister_nl_ioctl(void); 1394 int ip_vs_control_init(void); 1395 void ip_vs_control_cleanup(void); 1396 struct ip_vs_dest * 1397 ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, 1398 const union nf_inet_addr *daddr, __be16 dport, 1399 const union nf_inet_addr *vaddr, __be16 vport, 1400 __u16 protocol, __u32 fwmark, __u32 flags); 1401 void ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1402 1403 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) 1404 { 1405 refcount_inc(&dest->refcnt); 1406 } 1407 1408 static inline void ip_vs_dest_put(struct ip_vs_dest *dest) 1409 { 1410 smp_mb__before_atomic(); 1411 refcount_dec(&dest->refcnt); 1412 } 1413 1414 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) 1415 { 1416 if (refcount_dec_and_test(&dest->refcnt)) 1417 kfree(dest); 1418 } 1419 1420 /* IPVS sync daemon data and function prototypes 1421 * (from ip_vs_sync.c) 1422 */ 1423 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg, 1424 int state); 1425 int stop_sync_thread(struct netns_ipvs *ipvs, int state); 1426 void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts); 1427 1428 /* IPVS rate estimator prototypes (from ip_vs_est.c) */ 1429 void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1430 void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1431 void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1432 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); 1433 1434 /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ 1435 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1436 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1437 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1438 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1439 int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1440 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1441 int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1442 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1443 int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1444 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1445 int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1446 struct ip_vs_protocol *pp, int offset, 1447 unsigned int hooknum, struct ip_vs_iphdr *iph); 1448 void ip_vs_dest_dst_rcu_free(struct rcu_head *head); 1449 1450 #ifdef CONFIG_IP_VS_IPV6 1451 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1452 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1453 int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1454 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1455 int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1456 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1457 int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1458 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1459 int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1460 struct ip_vs_protocol *pp, int offset, 1461 unsigned int hooknum, struct ip_vs_iphdr *iph); 1462 #endif 1463 1464 #ifdef CONFIG_SYSCTL 1465 /* This is a simple mechanism to ignore packets when 1466 * we are loaded. Just set ip_vs_drop_rate to 'n' and 1467 * we start to drop 1/rate of the packets 1468 */ 1469 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1470 { 1471 if (!ipvs->drop_rate) 1472 return 0; 1473 if (--ipvs->drop_counter > 0) 1474 return 0; 1475 ipvs->drop_counter = ipvs->drop_rate; 1476 return 1; 1477 } 1478 #else 1479 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1480 #endif 1481 1482 /* ip_vs_fwd_tag returns the forwarding tag of the connection */ 1483 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1484 1485 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) 1486 { 1487 char fwd; 1488 1489 switch (IP_VS_FWD_METHOD(cp)) { 1490 case IP_VS_CONN_F_MASQ: 1491 fwd = 'M'; break; 1492 case IP_VS_CONN_F_LOCALNODE: 1493 fwd = 'L'; break; 1494 case IP_VS_CONN_F_TUNNEL: 1495 fwd = 'T'; break; 1496 case IP_VS_CONN_F_DROUTE: 1497 fwd = 'R'; break; 1498 case IP_VS_CONN_F_BYPASS: 1499 fwd = 'B'; break; 1500 default: 1501 fwd = '?'; break; 1502 } 1503 return fwd; 1504 } 1505 1506 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 1507 struct ip_vs_conn *cp, int dir); 1508 1509 #ifdef CONFIG_IP_VS_IPV6 1510 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, 1511 struct ip_vs_conn *cp, int dir); 1512 #endif 1513 1514 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 1515 1516 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) 1517 { 1518 __be32 diff[2] = { ~old, new }; 1519 1520 return csum_partial(diff, sizeof(diff), oldsum); 1521 } 1522 1523 #ifdef CONFIG_IP_VS_IPV6 1524 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, 1525 __wsum oldsum) 1526 { 1527 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], 1528 new[3], new[2], new[1], new[0] }; 1529 1530 return csum_partial(diff, sizeof(diff), oldsum); 1531 } 1532 #endif 1533 1534 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 1535 { 1536 __be16 diff[2] = { ~old, new }; 1537 1538 return csum_partial(diff, sizeof(diff), oldsum); 1539 } 1540 1541 /* Forget current conntrack (unconfirmed) and attach notrack entry */ 1542 static inline void ip_vs_notrack(struct sk_buff *skb) 1543 { 1544 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1545 enum ip_conntrack_info ctinfo; 1546 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1547 1548 if (ct) { 1549 nf_conntrack_put(&ct->ct_general); 1550 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 1551 } 1552 #endif 1553 } 1554 1555 #ifdef CONFIG_IP_VS_NFCT 1556 /* Netfilter connection tracking 1557 * (from ip_vs_nfct.c) 1558 */ 1559 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1560 { 1561 #ifdef CONFIG_SYSCTL 1562 return ipvs->sysctl_conntrack; 1563 #else 1564 return 0; 1565 #endif 1566 } 1567 1568 void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, 1569 int outin); 1570 int ip_vs_confirm_conntrack(struct sk_buff *skb); 1571 void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, 1572 struct ip_vs_conn *cp, u_int8_t proto, 1573 const __be16 port, int from_rs); 1574 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); 1575 1576 #else 1577 1578 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1579 { 1580 return 0; 1581 } 1582 1583 static inline void ip_vs_update_conntrack(struct sk_buff *skb, 1584 struct ip_vs_conn *cp, int outin) 1585 { 1586 } 1587 1588 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) 1589 { 1590 return NF_ACCEPT; 1591 } 1592 1593 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 1594 { 1595 } 1596 #endif /* CONFIG_IP_VS_NFCT */ 1597 1598 /* Really using conntrack? */ 1599 static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, 1600 struct sk_buff *skb) 1601 { 1602 #ifdef CONFIG_IP_VS_NFCT 1603 enum ip_conntrack_info ctinfo; 1604 struct nf_conn *ct; 1605 1606 if (!(cp->flags & IP_VS_CONN_F_NFCT)) 1607 return false; 1608 ct = nf_ct_get(skb, &ctinfo); 1609 if (ct) 1610 return true; 1611 #endif 1612 return false; 1613 } 1614 1615 static inline int ip_vs_register_conntrack(struct ip_vs_service *svc) 1616 { 1617 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1618 int afmask = (svc->af == AF_INET6) ? 2 : 1; 1619 int ret = 0; 1620 1621 if (!(svc->conntrack_afmask & afmask)) { 1622 ret = nf_ct_netns_get(svc->ipvs->net, svc->af); 1623 if (ret >= 0) 1624 svc->conntrack_afmask |= afmask; 1625 } 1626 return ret; 1627 #else 1628 return 0; 1629 #endif 1630 } 1631 1632 static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) 1633 { 1634 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1635 int afmask = (svc->af == AF_INET6) ? 2 : 1; 1636 1637 if (svc->conntrack_afmask & afmask) { 1638 nf_ct_netns_put(svc->ipvs->net, svc->af); 1639 svc->conntrack_afmask &= ~afmask; 1640 } 1641 #endif 1642 } 1643 1644 static inline int 1645 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1646 { 1647 /* We think the overhead of processing active connections is 256 1648 * times higher than that of inactive connections in average. (This 1649 * 256 times might not be accurate, we will change it later) We 1650 * use the following formula to estimate the overhead now: 1651 * dest->activeconns*256 + dest->inactconns 1652 */ 1653 return (atomic_read(&dest->activeconns) << 8) + 1654 atomic_read(&dest->inactconns); 1655 } 1656 1657 #endif /* _NET_IP_VS_H */ 1658