1 /*- 2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote 14 * products derived from this software without specific prior written 15 * permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * The tcp_hostcache moves the tcp-specific cached metrics from the routing 32 * table to a dedicated structure indexed by the remote IP address. It keeps 33 * information on the measured TCP parameters of past TCP sessions to allow 34 * better initial start values to be used with later connections to/from the 35 * same source. Depending on the network parameters (delay, bandwidth, max 36 * MTU, congestion window) between local and remote sites, this can lead to 37 * significant speed-ups for new TCP connections after the first one. 38 * 39 * Due to the tcp_hostcache, all TCP-specific metrics information in the 40 * routing table have been removed. The inpcb no longer keeps a pointer to 41 * the routing entry, and protocol-initiated route cloning has been removed 42 * as well. With these changes, the routing table has gone back to being 43 * more lightwight and only carries information related to packet forwarding. 44 * 45 * tcp_hostcache is designed for multiple concurrent access in SMP 46 * environments and high contention. All bucket rows have their own lock and 47 * thus multiple lookups and modifies can be done at the same time as long as 48 * they are in different bucket rows. If a request for insertion of a new 49 * record can't be satisfied, it simply returns an empty structure. Nobody 50 * and nothing outside of tcp_hostcache.c will ever point directly to any 51 * entry in the tcp_hostcache. All communication is done in an 52 * object-oriented way and only functions of tcp_hostcache will manipulate 53 * hostcache entries. Otherwise, we are unable to achieve good behaviour in 54 * concurrent access situations. Since tcp_hostcache is only caching 55 * information, there are no fatal consequences if we either can't satisfy 56 * any particular request or have to drop/overwrite an existing entry because 57 * of bucket limit memory constrains. 58 */ 59 60 /* 61 * Many thanks to jlemon for basic structure of tcp_syncache which is being 62 * followed here. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include "opt_inet6.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/lock.h> 74 #include <sys/mutex.h> 75 #include <sys/malloc.h> 76 #include <sys/socket.h> 77 #include <sys/socketvar.h> 78 #include <sys/sysctl.h> 79 80 #include <net/if.h> 81 #include <net/if_var.h> 82 #include <net/route.h> 83 #include <net/vnet.h> 84 85 #include <netinet/in.h> 86 #include <netinet/in_systm.h> 87 #include <netinet/ip.h> 88 #include <netinet/in_var.h> 89 #include <netinet/in_pcb.h> 90 #include <netinet/ip_var.h> 91 #ifdef INET6 92 #include <netinet/ip6.h> 93 #include <netinet6/ip6_var.h> 94 #endif 95 #include <netinet/tcp.h> 96 #include <netinet/tcp_var.h> 97 #include <netinet/tcp_hostcache.h> 98 #ifdef INET6 99 #include <netinet6/tcp6_var.h> 100 #endif 101 102 #include <vm/uma.h> 103 104 /* Arbitrary values */ 105 #define TCP_HOSTCACHE_HASHSIZE 512 106 #define TCP_HOSTCACHE_BUCKETLIMIT 30 107 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */ 108 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */ 109 110 static VNET_DEFINE(struct tcp_hostcache, tcp_hostcache); 111 #define V_tcp_hostcache VNET(tcp_hostcache) 112 113 static VNET_DEFINE(struct callout, tcp_hc_callout); 114 #define V_tcp_hc_callout VNET(tcp_hc_callout) 115 116 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *); 117 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *); 118 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS); 119 static void tcp_hc_purge_internal(int); 120 static void tcp_hc_purge(void *); 121 122 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0, 123 "TCP Host cache"); 124 125 SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_RDTUN, 126 &VNET_NAME(tcp_hostcache.cache_limit), 0, 127 "Overall entry limit for hostcache"); 128 129 SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_RDTUN, 130 &VNET_NAME(tcp_hostcache.hashsize), 0, 131 "Size of TCP hostcache hashtable"); 132 133 SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit, 134 CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0, 135 "Per-bucket hash limit for hostcache"); 136 137 SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_RD, 138 &VNET_NAME(tcp_hostcache.cache_count), 0, 139 "Current number of entries in hostcache"); 140 141 SYSCTL_VNET_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_RW, 142 &VNET_NAME(tcp_hostcache.expire), 0, 143 "Expire time of TCP hostcache entries"); 144 145 SYSCTL_VNET_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_RW, 146 &VNET_NAME(tcp_hostcache.prune), 0, 147 "Time between purge runs"); 148 149 SYSCTL_VNET_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_RW, 150 &VNET_NAME(tcp_hostcache.purgeall), 0, 151 "Expire all entires on next purge run"); 152 153 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list, 154 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0, 155 sysctl_tcp_hc_list, "A", "List of all hostcache entries"); 156 157 158 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache"); 159 160 #define HOSTCACHE_HASH(ip) \ 161 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \ 162 V_tcp_hostcache.hashmask) 163 164 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */ 165 #define HOSTCACHE_HASH6(ip6) \ 166 (((ip6)->s6_addr32[0] ^ \ 167 (ip6)->s6_addr32[1] ^ \ 168 (ip6)->s6_addr32[2] ^ \ 169 (ip6)->s6_addr32[3]) & \ 170 V_tcp_hostcache.hashmask) 171 172 #define THC_LOCK(lp) mtx_lock(lp) 173 #define THC_UNLOCK(lp) mtx_unlock(lp) 174 175 void 176 tcp_hc_init(void) 177 { 178 u_int cache_limit; 179 int i; 180 181 /* 182 * Initialize hostcache structures. 183 */ 184 V_tcp_hostcache.cache_count = 0; 185 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; 186 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT; 187 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE; 188 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE; 189 190 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize", 191 &V_tcp_hostcache.hashsize); 192 if (!powerof2(V_tcp_hostcache.hashsize)) { 193 printf("WARNING: hostcache hash size is not a power of 2.\n"); 194 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */ 195 } 196 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1; 197 198 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit", 199 &V_tcp_hostcache.bucket_limit); 200 201 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit; 202 V_tcp_hostcache.cache_limit = cache_limit; 203 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit", 204 &V_tcp_hostcache.cache_limit); 205 if (V_tcp_hostcache.cache_limit > cache_limit) 206 V_tcp_hostcache.cache_limit = cache_limit; 207 208 /* 209 * Allocate the hash table. 210 */ 211 V_tcp_hostcache.hashbase = (struct hc_head *) 212 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head), 213 M_HOSTCACHE, M_WAITOK | M_ZERO); 214 215 /* 216 * Initialize the hash buckets. 217 */ 218 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 219 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket); 220 V_tcp_hostcache.hashbase[i].hch_length = 0; 221 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry", 222 NULL, MTX_DEF); 223 } 224 225 /* 226 * Allocate the hostcache entries. 227 */ 228 V_tcp_hostcache.zone = 229 uma_zcreate("hostcache", sizeof(struct hc_metrics), 230 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 231 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit); 232 233 /* 234 * Set up periodic cache cleanup. 235 */ 236 callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE); 237 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 238 tcp_hc_purge, curvnet); 239 } 240 241 #ifdef VIMAGE 242 void 243 tcp_hc_destroy(void) 244 { 245 int i; 246 247 callout_drain(&V_tcp_hc_callout); 248 249 /* Purge all hc entries. */ 250 tcp_hc_purge_internal(1); 251 252 /* Free the uma zone and the allocated hash table. */ 253 uma_zdestroy(V_tcp_hostcache.zone); 254 255 for (i = 0; i < V_tcp_hostcache.hashsize; i++) 256 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx); 257 free(V_tcp_hostcache.hashbase, M_HOSTCACHE); 258 } 259 #endif 260 261 /* 262 * Internal function: look up an entry in the hostcache or return NULL. 263 * 264 * If an entry has been returned, the caller becomes responsible for 265 * unlocking the bucket row after he is done reading/modifying the entry. 266 */ 267 static struct hc_metrics * 268 tcp_hc_lookup(struct in_conninfo *inc) 269 { 270 int hash; 271 struct hc_head *hc_head; 272 struct hc_metrics *hc_entry; 273 274 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer")); 275 276 /* 277 * Hash the foreign ip address. 278 */ 279 if (inc->inc_flags & INC_ISIPV6) 280 hash = HOSTCACHE_HASH6(&inc->inc6_faddr); 281 else 282 hash = HOSTCACHE_HASH(&inc->inc_faddr); 283 284 hc_head = &V_tcp_hostcache.hashbase[hash]; 285 286 /* 287 * Acquire lock for this bucket row; we release the lock if we don't 288 * find an entry, otherwise the caller has to unlock after he is 289 * done. 290 */ 291 THC_LOCK(&hc_head->hch_mtx); 292 293 /* 294 * Iterate through entries in bucket row looking for a match. 295 */ 296 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) { 297 if (inc->inc_flags & INC_ISIPV6) { 298 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6, 299 sizeof(inc->inc6_faddr)) == 0) 300 return hc_entry; 301 } else { 302 if (memcmp(&inc->inc_faddr, &hc_entry->ip4, 303 sizeof(inc->inc_faddr)) == 0) 304 return hc_entry; 305 } 306 } 307 308 /* 309 * We were unsuccessful and didn't find anything. 310 */ 311 THC_UNLOCK(&hc_head->hch_mtx); 312 return NULL; 313 } 314 315 /* 316 * Internal function: insert an entry into the hostcache or return NULL if 317 * unable to allocate a new one. 318 * 319 * If an entry has been returned, the caller becomes responsible for 320 * unlocking the bucket row after he is done reading/modifying the entry. 321 */ 322 static struct hc_metrics * 323 tcp_hc_insert(struct in_conninfo *inc) 324 { 325 int hash; 326 struct hc_head *hc_head; 327 struct hc_metrics *hc_entry; 328 329 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer")); 330 331 /* 332 * Hash the foreign ip address. 333 */ 334 if (inc->inc_flags & INC_ISIPV6) 335 hash = HOSTCACHE_HASH6(&inc->inc6_faddr); 336 else 337 hash = HOSTCACHE_HASH(&inc->inc_faddr); 338 339 hc_head = &V_tcp_hostcache.hashbase[hash]; 340 341 /* 342 * Acquire lock for this bucket row; we release the lock if we don't 343 * find an entry, otherwise the caller has to unlock after he is 344 * done. 345 */ 346 THC_LOCK(&hc_head->hch_mtx); 347 348 /* 349 * If the bucket limit is reached, reuse the least-used element. 350 */ 351 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit || 352 V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) { 353 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead); 354 /* 355 * At first we were dropping the last element, just to 356 * reacquire it in the next two lines again, which isn't very 357 * efficient. Instead just reuse the least used element. 358 * We may drop something that is still "in-use" but we can be 359 * "lossy". 360 * Just give up if this bucket row is empty and we don't have 361 * anything to replace. 362 */ 363 if (hc_entry == NULL) { 364 THC_UNLOCK(&hc_head->hch_mtx); 365 return NULL; 366 } 367 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q); 368 V_tcp_hostcache.hashbase[hash].hch_length--; 369 V_tcp_hostcache.cache_count--; 370 TCPSTAT_INC(tcps_hc_bucketoverflow); 371 #if 0 372 uma_zfree(V_tcp_hostcache.zone, hc_entry); 373 #endif 374 } else { 375 /* 376 * Allocate a new entry, or balk if not possible. 377 */ 378 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT); 379 if (hc_entry == NULL) { 380 THC_UNLOCK(&hc_head->hch_mtx); 381 return NULL; 382 } 383 } 384 385 /* 386 * Initialize basic information of hostcache entry. 387 */ 388 bzero(hc_entry, sizeof(*hc_entry)); 389 if (inc->inc_flags & INC_ISIPV6) 390 bcopy(&inc->inc6_faddr, &hc_entry->ip6, sizeof(hc_entry->ip6)); 391 else 392 hc_entry->ip4 = inc->inc_faddr; 393 hc_entry->rmx_head = hc_head; 394 hc_entry->rmx_expire = V_tcp_hostcache.expire; 395 396 /* 397 * Put it upfront. 398 */ 399 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); 400 V_tcp_hostcache.hashbase[hash].hch_length++; 401 V_tcp_hostcache.cache_count++; 402 TCPSTAT_INC(tcps_hc_added); 403 404 return hc_entry; 405 } 406 407 /* 408 * External function: look up an entry in the hostcache and fill out the 409 * supplied TCP metrics structure. Fills in NULL when no entry was found or 410 * a value is not set. 411 */ 412 void 413 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite) 414 { 415 struct hc_metrics *hc_entry; 416 417 /* 418 * Find the right bucket. 419 */ 420 hc_entry = tcp_hc_lookup(inc); 421 422 /* 423 * If we don't have an existing object. 424 */ 425 if (hc_entry == NULL) { 426 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite)); 427 return; 428 } 429 hc_entry->rmx_hits++; 430 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 431 432 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu; 433 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh; 434 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt; 435 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar; 436 hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth; 437 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd; 438 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe; 439 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe; 440 441 /* 442 * Unlock bucket row. 443 */ 444 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 445 } 446 447 /* 448 * External function: look up an entry in the hostcache and return the 449 * discovered path MTU. Returns NULL if no entry is found or value is not 450 * set. 451 */ 452 u_long 453 tcp_hc_getmtu(struct in_conninfo *inc) 454 { 455 struct hc_metrics *hc_entry; 456 u_long mtu; 457 458 hc_entry = tcp_hc_lookup(inc); 459 if (hc_entry == NULL) { 460 return 0; 461 } 462 hc_entry->rmx_hits++; 463 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 464 465 mtu = hc_entry->rmx_mtu; 466 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 467 return mtu; 468 } 469 470 /* 471 * External function: update the MTU value of an entry in the hostcache. 472 * Creates a new entry if none was found. 473 */ 474 void 475 tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu) 476 { 477 struct hc_metrics *hc_entry; 478 479 /* 480 * Find the right bucket. 481 */ 482 hc_entry = tcp_hc_lookup(inc); 483 484 /* 485 * If we don't have an existing object, try to insert a new one. 486 */ 487 if (hc_entry == NULL) { 488 hc_entry = tcp_hc_insert(inc); 489 if (hc_entry == NULL) 490 return; 491 } 492 hc_entry->rmx_updates++; 493 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 494 495 hc_entry->rmx_mtu = mtu; 496 497 /* 498 * Put it upfront so we find it faster next time. 499 */ 500 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 501 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 502 503 /* 504 * Unlock bucket row. 505 */ 506 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 507 } 508 509 /* 510 * External function: update the TCP metrics of an entry in the hostcache. 511 * Creates a new entry if none was found. 512 */ 513 void 514 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) 515 { 516 struct hc_metrics *hc_entry; 517 518 hc_entry = tcp_hc_lookup(inc); 519 if (hc_entry == NULL) { 520 hc_entry = tcp_hc_insert(inc); 521 if (hc_entry == NULL) 522 return; 523 } 524 hc_entry->rmx_updates++; 525 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 526 527 if (hcml->rmx_rtt != 0) { 528 if (hc_entry->rmx_rtt == 0) 529 hc_entry->rmx_rtt = hcml->rmx_rtt; 530 else 531 hc_entry->rmx_rtt = 532 (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2; 533 TCPSTAT_INC(tcps_cachedrtt); 534 } 535 if (hcml->rmx_rttvar != 0) { 536 if (hc_entry->rmx_rttvar == 0) 537 hc_entry->rmx_rttvar = hcml->rmx_rttvar; 538 else 539 hc_entry->rmx_rttvar = 540 (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2; 541 TCPSTAT_INC(tcps_cachedrttvar); 542 } 543 if (hcml->rmx_ssthresh != 0) { 544 if (hc_entry->rmx_ssthresh == 0) 545 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh; 546 else 547 hc_entry->rmx_ssthresh = 548 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2; 549 TCPSTAT_INC(tcps_cachedssthresh); 550 } 551 if (hcml->rmx_bandwidth != 0) { 552 if (hc_entry->rmx_bandwidth == 0) 553 hc_entry->rmx_bandwidth = hcml->rmx_bandwidth; 554 else 555 hc_entry->rmx_bandwidth = 556 (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2; 557 /* TCPSTAT_INC(tcps_cachedbandwidth); */ 558 } 559 if (hcml->rmx_cwnd != 0) { 560 if (hc_entry->rmx_cwnd == 0) 561 hc_entry->rmx_cwnd = hcml->rmx_cwnd; 562 else 563 hc_entry->rmx_cwnd = 564 (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2; 565 /* TCPSTAT_INC(tcps_cachedcwnd); */ 566 } 567 if (hcml->rmx_sendpipe != 0) { 568 if (hc_entry->rmx_sendpipe == 0) 569 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe; 570 else 571 hc_entry->rmx_sendpipe = 572 (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2; 573 /* TCPSTAT_INC(tcps_cachedsendpipe); */ 574 } 575 if (hcml->rmx_recvpipe != 0) { 576 if (hc_entry->rmx_recvpipe == 0) 577 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe; 578 else 579 hc_entry->rmx_recvpipe = 580 (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2; 581 /* TCPSTAT_INC(tcps_cachedrecvpipe); */ 582 } 583 584 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 585 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 586 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 587 } 588 589 /* 590 * Sysctl function: prints the list and values of all hostcache entries in 591 * unsorted order. 592 */ 593 static int 594 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) 595 { 596 int bufsize; 597 int linesize = 128; 598 char *p, *buf; 599 int len, i, error; 600 struct hc_metrics *hc_entry; 601 #ifdef INET6 602 char ip6buf[INET6_ADDRSTRLEN]; 603 #endif 604 605 bufsize = linesize * (V_tcp_hostcache.cache_count + 1); 606 607 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 608 609 len = snprintf(p, linesize, 610 "\nIP address MTU SSTRESH RTT RTTVAR BANDWIDTH " 611 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n"); 612 p += len; 613 614 #define msec(u) (((u) + 500) / 1000) 615 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 616 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 617 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket, 618 rmx_q) { 619 len = snprintf(p, linesize, 620 "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu " 621 "%4lu %4lu %4i\n", 622 hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) : 623 #ifdef INET6 624 ip6_sprintf(ip6buf, &hc_entry->ip6), 625 #else 626 "IPv6?", 627 #endif 628 hc_entry->rmx_mtu, 629 hc_entry->rmx_ssthresh, 630 msec(hc_entry->rmx_rtt * 631 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))), 632 msec(hc_entry->rmx_rttvar * 633 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))), 634 hc_entry->rmx_bandwidth * 8, 635 hc_entry->rmx_cwnd, 636 hc_entry->rmx_sendpipe, 637 hc_entry->rmx_recvpipe, 638 hc_entry->rmx_hits, 639 hc_entry->rmx_updates, 640 hc_entry->rmx_expire); 641 p += len; 642 } 643 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 644 } 645 #undef msec 646 error = SYSCTL_OUT(req, buf, p - buf); 647 free(buf, M_TEMP); 648 return(error); 649 } 650 651 /* 652 * Caller has to make sure the curvnet is set properly. 653 */ 654 static void 655 tcp_hc_purge_internal(int all) 656 { 657 struct hc_metrics *hc_entry, *hc_next; 658 int i; 659 660 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 661 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 662 TAILQ_FOREACH_SAFE(hc_entry, 663 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) { 664 if (all || hc_entry->rmx_expire <= 0) { 665 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket, 666 hc_entry, rmx_q); 667 uma_zfree(V_tcp_hostcache.zone, hc_entry); 668 V_tcp_hostcache.hashbase[i].hch_length--; 669 V_tcp_hostcache.cache_count--; 670 } else 671 hc_entry->rmx_expire -= V_tcp_hostcache.prune; 672 } 673 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 674 } 675 } 676 677 /* 678 * Expire and purge (old|all) entries in the tcp_hostcache. Runs 679 * periodically from the callout. 680 */ 681 static void 682 tcp_hc_purge(void *arg) 683 { 684 CURVNET_SET((struct vnet *) arg); 685 int all = 0; 686 687 if (V_tcp_hostcache.purgeall) { 688 all = 1; 689 V_tcp_hostcache.purgeall = 0; 690 } 691 692 tcp_hc_purge_internal(all); 693 694 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 695 tcp_hc_purge, arg); 696 CURVNET_RESTORE(); 697 } 698