1 /*- 2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote 14 * products derived from this software without specific prior written 15 * permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * The tcp_hostcache moves the tcp-specific cached metrics from the routing 32 * table to a dedicated structure indexed by the remote IP address. It keeps 33 * information on the measured TCP parameters of past TCP sessions to allow 34 * better initial start values to be used with later connections to/from the 35 * same source. Depending on the network parameters (delay, bandwidth, max 36 * MTU, congestion window) between local and remote sites, this can lead to 37 * significant speed-ups for new TCP connections after the first one. 38 * 39 * Due to the tcp_hostcache, all TCP-specific metrics information in the 40 * routing table have been removed. The inpcb no longer keeps a pointer to 41 * the routing entry, and protocol-initiated route cloning has been removed 42 * as well. With these changes, the routing table has gone back to being 43 * more lightwight and only carries information related to packet forwarding. 44 * 45 * tcp_hostcache is designed for multiple concurrent access in SMP 46 * environments and high contention. All bucket rows have their own lock and 47 * thus multiple lookups and modifies can be done at the same time as long as 48 * they are in different bucket rows. If a request for insertion of a new 49 * record can't be satisfied, it simply returns an empty structure. Nobody 50 * and nothing outside of tcp_hostcache.c will ever point directly to any 51 * entry in the tcp_hostcache. All communication is done in an 52 * object-oriented way and only functions of tcp_hostcache will manipulate 53 * hostcache entries. Otherwise, we are unable to achieve good behaviour in 54 * concurrent access situations. Since tcp_hostcache is only caching 55 * information, there are no fatal consequences if we either can't satisfy 56 * any particular request or have to drop/overwrite an existing entry because 57 * of bucket limit memory constrains. 58 */ 59 60 /* 61 * Many thanks to jlemon for basic structure of tcp_syncache which is being 62 * followed here. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include "opt_inet6.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/lock.h> 74 #include <sys/mutex.h> 75 #include <sys/malloc.h> 76 #include <sys/socket.h> 77 #include <sys/socketvar.h> 78 #include <sys/sysctl.h> 79 #include <sys/vimage.h> 80 81 #include <net/if.h> 82 83 #include <netinet/in.h> 84 #include <netinet/in_systm.h> 85 #include <netinet/ip.h> 86 #include <netinet/in_var.h> 87 #include <netinet/in_pcb.h> 88 #include <netinet/ip_var.h> 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #include <netinet6/ip6_var.h> 92 #endif 93 #include <netinet/tcp.h> 94 #include <netinet/tcp_var.h> 95 #include <netinet/tcp_hostcache.h> 96 #include <netinet/vinet.h> 97 #ifdef INET6 98 #include <netinet6/tcp6_var.h> 99 #endif 100 101 #include <vm/uma.h> 102 103 /* Arbitrary values */ 104 #define TCP_HOSTCACHE_HASHSIZE 512 105 #define TCP_HOSTCACHE_BUCKETLIMIT 30 106 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */ 107 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */ 108 109 #ifdef VIMAGE_GLOBALS 110 static struct tcp_hostcache tcp_hostcache; 111 static struct callout tcp_hc_callout; 112 #endif 113 114 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *); 115 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *); 116 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS); 117 static void tcp_hc_purge(void *); 118 119 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0, 120 "TCP Host cache"); 121 122 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, cachelimit, 123 CTLFLAG_RDTUN, tcp_hostcache.cache_limit, 0, 124 "Overall entry limit for hostcache"); 125 126 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, hashsize, 127 CTLFLAG_RDTUN, tcp_hostcache.hashsize, 0, 128 "Size of TCP hostcache hashtable"); 129 130 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, bucketlimit, 131 CTLFLAG_RDTUN, tcp_hostcache.bucket_limit, 0, 132 "Per-bucket hash limit for hostcache"); 133 134 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, count, 135 CTLFLAG_RD, tcp_hostcache.cache_count, 0, 136 "Current number of entries in hostcache"); 137 138 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, expire, 139 CTLFLAG_RW, tcp_hostcache.expire, 0, 140 "Expire time of TCP hostcache entries"); 141 142 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, prune, 143 CTLFLAG_RW, tcp_hostcache.prune, 0, "Time between purge runs"); 144 145 SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_hostcache, OID_AUTO, purge, 146 CTLFLAG_RW, tcp_hostcache.purgeall, 0, 147 "Expire all entires on next purge run"); 148 149 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list, 150 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0, 151 sysctl_tcp_hc_list, "A", "List of all hostcache entries"); 152 153 154 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache"); 155 156 #define HOSTCACHE_HASH(ip) \ 157 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \ 158 V_tcp_hostcache.hashmask) 159 160 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */ 161 #define HOSTCACHE_HASH6(ip6) \ 162 (((ip6)->s6_addr32[0] ^ \ 163 (ip6)->s6_addr32[1] ^ \ 164 (ip6)->s6_addr32[2] ^ \ 165 (ip6)->s6_addr32[3]) & \ 166 V_tcp_hostcache.hashmask) 167 168 #define THC_LOCK(lp) mtx_lock(lp) 169 #define THC_UNLOCK(lp) mtx_unlock(lp) 170 171 void 172 tcp_hc_init(void) 173 { 174 INIT_VNET_INET(curvnet); 175 int i; 176 177 /* 178 * Initialize hostcache structures. 179 */ 180 V_tcp_hostcache.cache_count = 0; 181 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; 182 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT; 183 V_tcp_hostcache.cache_limit = 184 V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit; 185 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE; 186 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE; 187 188 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize", 189 &V_tcp_hostcache.hashsize); 190 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit", 191 &V_tcp_hostcache.cache_limit); 192 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit", 193 &V_tcp_hostcache.bucket_limit); 194 if (!powerof2(V_tcp_hostcache.hashsize)) { 195 printf("WARNING: hostcache hash size is not a power of 2.\n"); 196 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */ 197 } 198 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1; 199 200 /* 201 * Allocate the hash table. 202 */ 203 V_tcp_hostcache.hashbase = (struct hc_head *) 204 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head), 205 M_HOSTCACHE, M_WAITOK | M_ZERO); 206 207 /* 208 * Initialize the hash buckets. 209 */ 210 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 211 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket); 212 V_tcp_hostcache.hashbase[i].hch_length = 0; 213 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry", 214 NULL, MTX_DEF); 215 } 216 217 /* 218 * Allocate the hostcache entries. 219 */ 220 V_tcp_hostcache.zone = 221 uma_zcreate("hostcache", sizeof(struct hc_metrics), 222 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 223 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit); 224 225 /* 226 * Set up periodic cache cleanup. 227 */ 228 callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE); 229 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 230 tcp_hc_purge, 0); 231 } 232 233 /* 234 * Internal function: look up an entry in the hostcache or return NULL. 235 * 236 * If an entry has been returned, the caller becomes responsible for 237 * unlocking the bucket row after he is done reading/modifying the entry. 238 */ 239 static struct hc_metrics * 240 tcp_hc_lookup(struct in_conninfo *inc) 241 { 242 INIT_VNET_INET(curvnet); 243 int hash; 244 struct hc_head *hc_head; 245 struct hc_metrics *hc_entry; 246 247 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer")); 248 249 /* 250 * Hash the foreign ip address. 251 */ 252 if (inc->inc_flags & INC_ISIPV6) 253 hash = HOSTCACHE_HASH6(&inc->inc6_faddr); 254 else 255 hash = HOSTCACHE_HASH(&inc->inc_faddr); 256 257 hc_head = &V_tcp_hostcache.hashbase[hash]; 258 259 /* 260 * Acquire lock for this bucket row; we release the lock if we don't 261 * find an entry, otherwise the caller has to unlock after he is 262 * done. 263 */ 264 THC_LOCK(&hc_head->hch_mtx); 265 266 /* 267 * Iterate through entries in bucket row looking for a match. 268 */ 269 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) { 270 if (inc->inc_flags & INC_ISIPV6) { 271 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6, 272 sizeof(inc->inc6_faddr)) == 0) 273 return hc_entry; 274 } else { 275 if (memcmp(&inc->inc_faddr, &hc_entry->ip4, 276 sizeof(inc->inc_faddr)) == 0) 277 return hc_entry; 278 } 279 } 280 281 /* 282 * We were unsuccessful and didn't find anything. 283 */ 284 THC_UNLOCK(&hc_head->hch_mtx); 285 return NULL; 286 } 287 288 /* 289 * Internal function: insert an entry into the hostcache or return NULL if 290 * unable to allocate a new one. 291 * 292 * If an entry has been returned, the caller becomes responsible for 293 * unlocking the bucket row after he is done reading/modifying the entry. 294 */ 295 static struct hc_metrics * 296 tcp_hc_insert(struct in_conninfo *inc) 297 { 298 INIT_VNET_INET(curvnet); 299 int hash; 300 struct hc_head *hc_head; 301 struct hc_metrics *hc_entry; 302 303 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer")); 304 305 /* 306 * Hash the foreign ip address. 307 */ 308 if (inc->inc_flags & INC_ISIPV6) 309 hash = HOSTCACHE_HASH6(&inc->inc6_faddr); 310 else 311 hash = HOSTCACHE_HASH(&inc->inc_faddr); 312 313 hc_head = &V_tcp_hostcache.hashbase[hash]; 314 315 /* 316 * Acquire lock for this bucket row; we release the lock if we don't 317 * find an entry, otherwise the caller has to unlock after he is 318 * done. 319 */ 320 THC_LOCK(&hc_head->hch_mtx); 321 322 /* 323 * If the bucket limit is reached, reuse the least-used element. 324 */ 325 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit || 326 V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) { 327 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead); 328 /* 329 * At first we were dropping the last element, just to 330 * reacquire it in the next two lines again, which isn't very 331 * efficient. Instead just reuse the least used element. 332 * We may drop something that is still "in-use" but we can be 333 * "lossy". 334 * Just give up if this bucket row is empty and we don't have 335 * anything to replace. 336 */ 337 if (hc_entry == NULL) { 338 THC_UNLOCK(&hc_head->hch_mtx); 339 return NULL; 340 } 341 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q); 342 V_tcp_hostcache.hashbase[hash].hch_length--; 343 V_tcp_hostcache.cache_count--; 344 V_tcpstat.tcps_hc_bucketoverflow++; 345 #if 0 346 uma_zfree(V_tcp_hostcache.zone, hc_entry); 347 #endif 348 } else { 349 /* 350 * Allocate a new entry, or balk if not possible. 351 */ 352 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT); 353 if (hc_entry == NULL) { 354 THC_UNLOCK(&hc_head->hch_mtx); 355 return NULL; 356 } 357 } 358 359 /* 360 * Initialize basic information of hostcache entry. 361 */ 362 bzero(hc_entry, sizeof(*hc_entry)); 363 if (inc->inc_flags & INC_ISIPV6) 364 bcopy(&inc->inc6_faddr, &hc_entry->ip6, sizeof(hc_entry->ip6)); 365 else 366 hc_entry->ip4 = inc->inc_faddr; 367 hc_entry->rmx_head = hc_head; 368 hc_entry->rmx_expire = V_tcp_hostcache.expire; 369 370 /* 371 * Put it upfront. 372 */ 373 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); 374 V_tcp_hostcache.hashbase[hash].hch_length++; 375 V_tcp_hostcache.cache_count++; 376 V_tcpstat.tcps_hc_added++; 377 378 return hc_entry; 379 } 380 381 /* 382 * External function: look up an entry in the hostcache and fill out the 383 * supplied TCP metrics structure. Fills in NULL when no entry was found or 384 * a value is not set. 385 */ 386 void 387 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite) 388 { 389 INIT_VNET_INET(curvnet); 390 struct hc_metrics *hc_entry; 391 392 /* 393 * Find the right bucket. 394 */ 395 hc_entry = tcp_hc_lookup(inc); 396 397 /* 398 * If we don't have an existing object. 399 */ 400 if (hc_entry == NULL) { 401 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite)); 402 return; 403 } 404 hc_entry->rmx_hits++; 405 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 406 407 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu; 408 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh; 409 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt; 410 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar; 411 hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth; 412 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd; 413 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe; 414 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe; 415 416 /* 417 * Unlock bucket row. 418 */ 419 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 420 } 421 422 /* 423 * External function: look up an entry in the hostcache and return the 424 * discovered path MTU. Returns NULL if no entry is found or value is not 425 * set. 426 */ 427 u_long 428 tcp_hc_getmtu(struct in_conninfo *inc) 429 { 430 INIT_VNET_INET(curvnet); 431 struct hc_metrics *hc_entry; 432 u_long mtu; 433 434 hc_entry = tcp_hc_lookup(inc); 435 if (hc_entry == NULL) { 436 return 0; 437 } 438 hc_entry->rmx_hits++; 439 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 440 441 mtu = hc_entry->rmx_mtu; 442 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 443 return mtu; 444 } 445 446 /* 447 * External function: update the MTU value of an entry in the hostcache. 448 * Creates a new entry if none was found. 449 */ 450 void 451 tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu) 452 { 453 INIT_VNET_INET(curvnet); 454 struct hc_metrics *hc_entry; 455 456 /* 457 * Find the right bucket. 458 */ 459 hc_entry = tcp_hc_lookup(inc); 460 461 /* 462 * If we don't have an existing object, try to insert a new one. 463 */ 464 if (hc_entry == NULL) { 465 hc_entry = tcp_hc_insert(inc); 466 if (hc_entry == NULL) 467 return; 468 } 469 hc_entry->rmx_updates++; 470 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 471 472 hc_entry->rmx_mtu = mtu; 473 474 /* 475 * Put it upfront so we find it faster next time. 476 */ 477 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 478 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 479 480 /* 481 * Unlock bucket row. 482 */ 483 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 484 } 485 486 /* 487 * External function: update the TCP metrics of an entry in the hostcache. 488 * Creates a new entry if none was found. 489 */ 490 void 491 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) 492 { 493 INIT_VNET_INET(curvnet); 494 struct hc_metrics *hc_entry; 495 496 hc_entry = tcp_hc_lookup(inc); 497 if (hc_entry == NULL) { 498 hc_entry = tcp_hc_insert(inc); 499 if (hc_entry == NULL) 500 return; 501 } 502 hc_entry->rmx_updates++; 503 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 504 505 if (hcml->rmx_rtt != 0) { 506 if (hc_entry->rmx_rtt == 0) 507 hc_entry->rmx_rtt = hcml->rmx_rtt; 508 else 509 hc_entry->rmx_rtt = 510 (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2; 511 V_tcpstat.tcps_cachedrtt++; 512 } 513 if (hcml->rmx_rttvar != 0) { 514 if (hc_entry->rmx_rttvar == 0) 515 hc_entry->rmx_rttvar = hcml->rmx_rttvar; 516 else 517 hc_entry->rmx_rttvar = 518 (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2; 519 V_tcpstat.tcps_cachedrttvar++; 520 } 521 if (hcml->rmx_ssthresh != 0) { 522 if (hc_entry->rmx_ssthresh == 0) 523 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh; 524 else 525 hc_entry->rmx_ssthresh = 526 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2; 527 V_tcpstat.tcps_cachedssthresh++; 528 } 529 if (hcml->rmx_bandwidth != 0) { 530 if (hc_entry->rmx_bandwidth == 0) 531 hc_entry->rmx_bandwidth = hcml->rmx_bandwidth; 532 else 533 hc_entry->rmx_bandwidth = 534 (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2; 535 /* V_tcpstat.tcps_cachedbandwidth++; */ 536 } 537 if (hcml->rmx_cwnd != 0) { 538 if (hc_entry->rmx_cwnd == 0) 539 hc_entry->rmx_cwnd = hcml->rmx_cwnd; 540 else 541 hc_entry->rmx_cwnd = 542 (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2; 543 /* V_tcpstat.tcps_cachedcwnd++; */ 544 } 545 if (hcml->rmx_sendpipe != 0) { 546 if (hc_entry->rmx_sendpipe == 0) 547 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe; 548 else 549 hc_entry->rmx_sendpipe = 550 (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2; 551 /* V_tcpstat.tcps_cachedsendpipe++; */ 552 } 553 if (hcml->rmx_recvpipe != 0) { 554 if (hc_entry->rmx_recvpipe == 0) 555 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe; 556 else 557 hc_entry->rmx_recvpipe = 558 (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2; 559 /* V_tcpstat.tcps_cachedrecvpipe++; */ 560 } 561 562 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 563 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 564 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 565 } 566 567 /* 568 * Sysctl function: prints the list and values of all hostcache entries in 569 * unsorted order. 570 */ 571 static int 572 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) 573 { 574 INIT_VNET_INET(curvnet); 575 int bufsize; 576 int linesize = 128; 577 char *p, *buf; 578 int len, i, error; 579 struct hc_metrics *hc_entry; 580 #ifdef INET6 581 char ip6buf[INET6_ADDRSTRLEN]; 582 #endif 583 584 bufsize = linesize * (V_tcp_hostcache.cache_count + 1); 585 586 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 587 588 len = snprintf(p, linesize, 589 "\nIP address MTU SSTRESH RTT RTTVAR BANDWIDTH " 590 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n"); 591 p += len; 592 593 #define msec(u) (((u) + 500) / 1000) 594 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 595 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 596 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket, 597 rmx_q) { 598 len = snprintf(p, linesize, 599 "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu " 600 "%4lu %4lu %4i\n", 601 hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) : 602 #ifdef INET6 603 ip6_sprintf(ip6buf, &hc_entry->ip6), 604 #else 605 "IPv6?", 606 #endif 607 hc_entry->rmx_mtu, 608 hc_entry->rmx_ssthresh, 609 msec(hc_entry->rmx_rtt * 610 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))), 611 msec(hc_entry->rmx_rttvar * 612 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))), 613 hc_entry->rmx_bandwidth * 8, 614 hc_entry->rmx_cwnd, 615 hc_entry->rmx_sendpipe, 616 hc_entry->rmx_recvpipe, 617 hc_entry->rmx_hits, 618 hc_entry->rmx_updates, 619 hc_entry->rmx_expire); 620 p += len; 621 } 622 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 623 } 624 #undef msec 625 error = SYSCTL_OUT(req, buf, p - buf); 626 free(buf, M_TEMP); 627 return(error); 628 } 629 630 /* 631 * Expire and purge (old|all) entries in the tcp_hostcache. Runs 632 * periodically from the callout. 633 */ 634 static void 635 tcp_hc_purge(void *arg) 636 { 637 INIT_VNET_INET(curvnet); 638 struct hc_metrics *hc_entry, *hc_next; 639 int all = (intptr_t)arg; 640 int i; 641 642 if (V_tcp_hostcache.purgeall) { 643 all = 1; 644 V_tcp_hostcache.purgeall = 0; 645 } 646 647 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 648 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 649 TAILQ_FOREACH_SAFE(hc_entry, 650 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) { 651 if (all || hc_entry->rmx_expire <= 0) { 652 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket, 653 hc_entry, rmx_q); 654 uma_zfree(V_tcp_hostcache.zone, hc_entry); 655 V_tcp_hostcache.hashbase[i].hch_length--; 656 V_tcp_hostcache.cache_count--; 657 } else 658 hc_entry->rmx_expire -= V_tcp_hostcache.prune; 659 } 660 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 661 } 662 663 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 664 tcp_hc_purge, arg); 665 } 666