Lines Matching refs:hc_entry

315 tcp_hc_cmp(struct hc_metrics *hc_entry, const struct in_conninfo *inc)  in tcp_hc_cmp()  argument
320 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6, in tcp_hc_cmp()
324 if (memcmp(&inc->inc_faddr, &hc_entry->ip4, in tcp_hc_cmp()
340 struct hc_metrics *hc_entry; in tcp_hc_lookup() local
350 CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, hc_q) in tcp_hc_lookup()
351 if (tcp_hc_cmp(hc_entry, inc)) in tcp_hc_lookup()
354 if (hc_entry != NULL) { in tcp_hc_lookup()
355 if (atomic_load_int(&hc_entry->hc_expire) != in tcp_hc_lookup()
357 atomic_store_int(&hc_entry->hc_expire, in tcp_hc_lookup()
360 hc_entry->hc_hits++; in tcp_hc_lookup()
365 return (hc_entry); in tcp_hc_lookup()
377 struct hc_metrics *hc_entry; in tcp_hc_get() local
387 hc_entry = tcp_hc_lookup(inc); in tcp_hc_get()
392 if (hc_entry == NULL) { in tcp_hc_get()
397 hc_metrics_lite->hc_mtu = atomic_load_32(&hc_entry->hc_mtu); in tcp_hc_get()
398 hc_metrics_lite->hc_ssthresh = atomic_load_32(&hc_entry->hc_ssthresh); in tcp_hc_get()
399 hc_metrics_lite->hc_rtt = atomic_load_32(&hc_entry->hc_rtt); in tcp_hc_get()
400 hc_metrics_lite->hc_rttvar = atomic_load_32(&hc_entry->hc_rttvar); in tcp_hc_get()
401 hc_metrics_lite->hc_cwnd = atomic_load_32(&hc_entry->hc_cwnd); in tcp_hc_get()
402 hc_metrics_lite->hc_sendpipe = atomic_load_32(&hc_entry->hc_sendpipe); in tcp_hc_get()
403 hc_metrics_lite->hc_recvpipe = atomic_load_32(&hc_entry->hc_recvpipe); in tcp_hc_get()
416 struct hc_metrics *hc_entry; in tcp_hc_getmtu() local
422 hc_entry = tcp_hc_lookup(inc); in tcp_hc_getmtu()
423 if (hc_entry == NULL) { in tcp_hc_getmtu()
427 mtu = atomic_load_32(&hc_entry->hc_mtu); in tcp_hc_getmtu()
453 struct hc_metrics *hc_entry, *hc_prev; in tcp_hc_update() local
464 CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, hc_q) { in tcp_hc_update()
465 if (tcp_hc_cmp(hc_entry, inc)) in tcp_hc_update()
467 if (CK_SLIST_NEXT(hc_entry, hc_q) != NULL) in tcp_hc_update()
468 hc_prev = hc_entry; in tcp_hc_update()
471 if (hc_entry != NULL) { in tcp_hc_update()
472 if (atomic_load_int(&hc_entry->hc_expire) != in tcp_hc_update()
474 atomic_store_int(&hc_entry->hc_expire, in tcp_hc_update()
477 hc_entry->hc_updates++; in tcp_hc_update()
495 hc_entry = CK_SLIST_NEXT(hc_prev, hc_q); in tcp_hc_update()
496 KASSERT(CK_SLIST_NEXT(hc_entry, hc_q) == NULL, in tcp_hc_update()
500 } else if ((hc_entry = in tcp_hc_update()
502 KASSERT(CK_SLIST_NEXT(hc_entry, hc_q) == NULL, in tcp_hc_update()
504 __func__, hc_entry)); in tcp_hc_update()
518 uma_zfree_smr(V_tcp_hostcache.zone, hc_entry); in tcp_hc_update()
524 hc_entry = uma_zalloc_smr(V_tcp_hostcache.zone, M_NOWAIT); in tcp_hc_update()
525 if (hc_entry == NULL) { in tcp_hc_update()
533 bzero(hc_entry, sizeof(*hc_entry)); in tcp_hc_update()
535 hc_entry->ip6 = inc->inc6_faddr; in tcp_hc_update()
536 hc_entry->ip6_zoneid = inc->inc6_zoneid; in tcp_hc_update()
538 hc_entry->ip4 = inc->inc_faddr; in tcp_hc_update()
539 hc_entry->hc_expire = V_tcp_hostcache.expire; in tcp_hc_update()
548 atomic_store_32(&hc_entry->hc_mtu, hcml->hc_mtu); in tcp_hc_update()
551 if (hc_entry->hc_rtt == 0) in tcp_hc_update()
554 v = ((uint64_t)hc_entry->hc_rtt + in tcp_hc_update()
556 atomic_store_32(&hc_entry->hc_rtt, v); in tcp_hc_update()
560 if (hc_entry->hc_rttvar == 0) in tcp_hc_update()
563 v = ((uint64_t)hc_entry->hc_rttvar + in tcp_hc_update()
565 atomic_store_32(&hc_entry->hc_rttvar, v); in tcp_hc_update()
569 if (hc_entry->hc_ssthresh == 0) in tcp_hc_update()
572 v = (hc_entry->hc_ssthresh + hcml->hc_ssthresh) / 2; in tcp_hc_update()
573 atomic_store_32(&hc_entry->hc_ssthresh, v); in tcp_hc_update()
577 if (hc_entry->hc_cwnd == 0) in tcp_hc_update()
580 v = ((uint64_t)hc_entry->hc_cwnd + in tcp_hc_update()
582 atomic_store_32(&hc_entry->hc_cwnd, v); in tcp_hc_update()
586 if (hc_entry->hc_sendpipe == 0) in tcp_hc_update()
589 v = ((uint64_t)hc_entry->hc_sendpipe + in tcp_hc_update()
591 atomic_store_32(&hc_entry->hc_sendpipe, v); in tcp_hc_update()
595 if (hc_entry->hc_recvpipe == 0) in tcp_hc_update()
598 v = ((uint64_t)hc_entry->hc_recvpipe + in tcp_hc_update()
600 atomic_store_32(&hc_entry->hc_recvpipe, v); in tcp_hc_update()
608 CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, hc_q); in tcp_hc_update()
614 } else if (hc_entry != CK_SLIST_FIRST(&hc_head->hch_bucket)) { in tcp_hc_update()
615 KASSERT(CK_SLIST_NEXT(hc_prev, hc_q) == hc_entry, in tcp_hc_update()
616 ("%s: %p next is not %p", __func__, hc_prev, hc_entry)); in tcp_hc_update()
618 CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, hc_q); in tcp_hc_update()
633 struct hc_metrics *hc_entry; in sysctl_tcp_hc_list() local
670 CK_SLIST_FOREACH(hc_entry, in sysctl_tcp_hc_list()
678 hc_entry->ip4.s_addr ? in sysctl_tcp_hc_list()
679 inet_ntoa_r(hc_entry->ip4, ip4buf) : in sysctl_tcp_hc_list()
681 ip6_sprintf(ip6buf, &hc_entry->ip6), in sysctl_tcp_hc_list()
685 hc_entry->hc_mtu, in sysctl_tcp_hc_list()
686 hc_entry->hc_ssthresh, in sysctl_tcp_hc_list()
687 msec((u_long)hc_entry->hc_rtt * in sysctl_tcp_hc_list()
689 msec((u_long)hc_entry->hc_rttvar * in sysctl_tcp_hc_list()
691 hc_entry->hc_cwnd, in sysctl_tcp_hc_list()
692 hc_entry->hc_sendpipe, in sysctl_tcp_hc_list()
693 hc_entry->hc_recvpipe, in sysctl_tcp_hc_list()
695 hc_entry->hc_hits, in sysctl_tcp_hc_list()
696 hc_entry->hc_updates, in sysctl_tcp_hc_list()
698 hc_entry->hc_expire); in sysctl_tcp_hc_list()
758 struct hc_metrics *hc_entry, *hc_next, *hc_prev; in tcp_hc_purge_internal() local
765 CK_SLIST_FOREACH_SAFE(hc_entry, &head->hch_bucket, hc_q, in tcp_hc_purge_internal()
772 atomic_load_int(&hc_entry->hc_expire) <= 0) { in tcp_hc_purge_internal()
774 KASSERT(hc_entry == in tcp_hc_purge_internal()
777 __func__, hc_entry, hc_prev)); in tcp_hc_purge_internal()
780 KASSERT(hc_entry == in tcp_hc_purge_internal()
783 __func__, hc_entry)); in tcp_hc_purge_internal()
787 uma_zfree_smr(V_tcp_hostcache.zone, hc_entry); in tcp_hc_purge_internal()
791 atomic_subtract_int(&hc_entry->hc_expire, in tcp_hc_purge_internal()
793 hc_prev = hc_entry; in tcp_hc_purge_internal()