12529f56eSJonathan T. Looney /*- 22529f56eSJonathan T. Looney * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 32529f56eSJonathan T. Looney * 452467047SWarner Losh * Copyright (c) 2016-2018 Netflix, Inc. 52529f56eSJonathan T. Looney * 62529f56eSJonathan T. Looney * Redistribution and use in source and binary forms, with or without 72529f56eSJonathan T. Looney * modification, are permitted provided that the following conditions 82529f56eSJonathan T. Looney * are met: 92529f56eSJonathan T. Looney * 1. Redistributions of source code must retain the above copyright 102529f56eSJonathan T. Looney * notice, this list of conditions and the following disclaimer. 112529f56eSJonathan T. Looney * 2. Redistributions in binary form must reproduce the above copyright 122529f56eSJonathan T. Looney * notice, this list of conditions and the following disclaimer in the 132529f56eSJonathan T. Looney * documentation and/or other materials provided with the distribution. 142529f56eSJonathan T. Looney * 152529f56eSJonathan T. Looney * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 162529f56eSJonathan T. Looney * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 172529f56eSJonathan T. Looney * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 182529f56eSJonathan T. Looney * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 192529f56eSJonathan T. Looney * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 202529f56eSJonathan T. Looney * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 212529f56eSJonathan T. Looney * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 222529f56eSJonathan T. Looney * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 232529f56eSJonathan T. Looney * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 242529f56eSJonathan T. Looney * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 252529f56eSJonathan T. Looney * SUCH DAMAGE. 262529f56eSJonathan T. Looney * 272529f56eSJonathan T. Looney */ 282529f56eSJonathan T. Looney 292529f56eSJonathan T. Looney #include <sys/cdefs.h> 302529f56eSJonathan T. Looney __FBSDID("$FreeBSD$"); 312529f56eSJonathan T. Looney 3269c7c811SRandall Stewart #include "opt_inet.h" 332529f56eSJonathan T. Looney #include <sys/param.h> 34adc56f5aSEdward Tomasz Napierala #include <sys/arb.h> 3569c7c811SRandall Stewart #include <sys/hash.h> 362529f56eSJonathan T. Looney #include <sys/kernel.h> 372529f56eSJonathan T. Looney #include <sys/lock.h> 382529f56eSJonathan T. Looney #include <sys/malloc.h> 392529f56eSJonathan T. Looney #include <sys/mutex.h> 40adc56f5aSEdward Tomasz Napierala #include <sys/qmath.h> 412529f56eSJonathan T. Looney #include <sys/queue.h> 422529f56eSJonathan T. Looney #include <sys/refcount.h> 432529f56eSJonathan T. Looney #include <sys/rwlock.h> 442529f56eSJonathan T. Looney #include <sys/socket.h> 452529f56eSJonathan T. Looney #include <sys/socketvar.h> 462529f56eSJonathan T. Looney #include <sys/sysctl.h> 472529f56eSJonathan T. Looney #include <sys/tree.h> 48a9a08eceSRandall Stewart #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 492529f56eSJonathan T. Looney #include <sys/counter.h> 502529f56eSJonathan T. Looney #include <dev/tcp_log/tcp_log_dev.h> 512529f56eSJonathan T. Looney 522529f56eSJonathan T. Looney #include <net/if.h> 532529f56eSJonathan T. Looney #include <net/if_var.h> 542529f56eSJonathan T. Looney #include <net/vnet.h> 552529f56eSJonathan T. Looney 562529f56eSJonathan T. Looney #include <netinet/in.h> 572529f56eSJonathan T. Looney #include <netinet/in_pcb.h> 582529f56eSJonathan T. Looney #include <netinet/in_var.h> 592529f56eSJonathan T. Looney #include <netinet/tcp_var.h> 602529f56eSJonathan T. Looney #include <netinet/tcp_log_buf.h> 61*73ee5756SRandall Stewart #include <netinet/tcp_seq.h> 6269c7c811SRandall Stewart #include <netinet/tcp_hpts.h> 632529f56eSJonathan T. Looney 642529f56eSJonathan T. Looney /* Default expiry time */ 652529f56eSJonathan T. Looney #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S) 662529f56eSJonathan T. Looney 672529f56eSJonathan T. Looney /* Max interval at which to run the expiry timer */ 682529f56eSJonathan T. Looney #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S) 692529f56eSJonathan T. Looney 702529f56eSJonathan T. Looney bool tcp_log_verbose; 718c47d8f5SAlan Somers static uma_zone_t tcp_log_id_bucket_zone, tcp_log_id_node_zone, tcp_log_zone; 722529f56eSJonathan T. Looney static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT; 732529f56eSJonathan T. Looney static uint32_t tcp_log_version = TCP_LOG_BUF_VER; 742529f56eSJonathan T. Looney RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket); 752529f56eSJonathan T. Looney static struct tcp_log_id_tree tcp_log_id_head; 762529f56eSJonathan T. Looney static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head = 772529f56eSJonathan T. Looney STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head); 782529f56eSJonathan T. Looney static struct mtx tcp_log_expireq_mtx; 792529f56eSJonathan T. Looney static struct callout tcp_log_expireq_callout; 809959c8b9SJonathan T. Looney static u_long tcp_log_auto_ratio = 0; 819959c8b9SJonathan T. Looney static volatile u_long tcp_log_auto_ratio_cur = 0; 822529f56eSJonathan T. Looney static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL; 832529f56eSJonathan T. Looney static bool tcp_log_auto_all = false; 84a9a08eceSRandall Stewart static uint32_t tcp_disable_all_bb_logs = 0; 852529f56eSJonathan T. Looney 862529f56eSJonathan T. Looney RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 872529f56eSJonathan T. Looney 887029da5cSPawel Biernacki SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 897029da5cSPawel Biernacki "TCP Black Box controls"); 902529f56eSJonathan T. Looney 9169c7c811SRandall Stewart SYSCTL_NODE(_net_inet_tcp_bb, OID_AUTO, tp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 9269c7c811SRandall Stewart "TCP Black Box Trace Point controls"); 9369c7c811SRandall Stewart 942529f56eSJonathan T. Looney SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose, 952529f56eSJonathan T. Looney 0, "Force verbose logging for TCP traces"); 962529f56eSJonathan T. Looney 972529f56eSJonathan T. Looney SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit, 982529f56eSJonathan T. Looney CTLFLAG_RW, &tcp_log_session_limit, 0, 992529f56eSJonathan T. Looney "Maximum number of events maintained for each TCP session"); 1002529f56eSJonathan T. Looney 10169c7c811SRandall Stewart uint32_t tcp_trace_point_config = 0; 10269c7c811SRandall Stewart SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, number, CTLFLAG_RW, 10369c7c811SRandall Stewart &tcp_trace_point_config, TCP_LOG_STATE_HEAD_AUTO, 10469c7c811SRandall Stewart "What is the trace point number to activate (0=none, 0xffffffff = all)?"); 10569c7c811SRandall Stewart 10669c7c811SRandall Stewart uint32_t tcp_trace_point_bb_mode = TCP_LOG_STATE_CONTINUAL; 10769c7c811SRandall Stewart SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, bbmode, CTLFLAG_RW, 10869c7c811SRandall Stewart &tcp_trace_point_bb_mode, TCP_LOG_STATE_HEAD_AUTO, 10969c7c811SRandall Stewart "What is BB logging mode that is activated?"); 11069c7c811SRandall Stewart 11169c7c811SRandall Stewart int32_t tcp_trace_point_count = 0; 11269c7c811SRandall Stewart SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, count, CTLFLAG_RW, 11369c7c811SRandall Stewart &tcp_trace_point_count, TCP_LOG_STATE_HEAD_AUTO, 11469c7c811SRandall Stewart "How many connections will have BB logging turned on that hit the tracepoint?"); 11569c7c811SRandall Stewart 11669c7c811SRandall Stewart 11769c7c811SRandall Stewart 1182529f56eSJonathan T. Looney SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW, 1192529f56eSJonathan T. Looney &tcp_log_zone, "Maximum number of events maintained for all TCP sessions"); 1202529f56eSJonathan T. Looney 1212529f56eSJonathan T. Looney SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD, 1222529f56eSJonathan T. Looney &tcp_log_zone, "Current number of events maintained for all TCP sessions"); 1232529f56eSJonathan T. Looney 1242529f56eSJonathan T. Looney SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW, 1258c47d8f5SAlan Somers &tcp_log_id_bucket_zone, "Maximum number of log IDs"); 1262529f56eSJonathan T. Looney 1272529f56eSJonathan T. Looney SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD, 1288c47d8f5SAlan Somers &tcp_log_id_bucket_zone, "Current number of log IDs"); 1292529f56eSJonathan T. Looney 1302529f56eSJonathan T. Looney SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW, 1318c47d8f5SAlan Somers &tcp_log_id_node_zone, "Maximum number of tcpcbs with log IDs"); 1322529f56eSJonathan T. Looney 1332529f56eSJonathan T. Looney SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD, 1348c47d8f5SAlan Somers &tcp_log_id_node_zone, "Current number of tcpcbs with log IDs"); 1352529f56eSJonathan T. Looney 1362529f56eSJonathan T. Looney SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version, 1372529f56eSJonathan T. Looney 0, "Version of log formats exported"); 1382529f56eSJonathan T. Looney 139a9a08eceSRandall Stewart SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW, 14057cc27a3SCheng Cui &tcp_disable_all_bb_logs, 0, 141a9a08eceSRandall Stewart "Disable all BB logging for all connections"); 142a9a08eceSRandall Stewart 1439959c8b9SJonathan T. Looney SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW, 1442529f56eSJonathan T. Looney &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions"); 1452529f56eSJonathan T. Looney 1462529f56eSJonathan T. Looney SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW, 14757cc27a3SCheng Cui &tcp_log_auto_mode, 0, 14857cc27a3SCheng Cui "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_TAIL)"); 1492529f56eSJonathan T. Looney 1502529f56eSJonathan T. Looney SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW, 15157cc27a3SCheng Cui &tcp_log_auto_all, 0, 1522529f56eSJonathan T. Looney "Auto-select from all sessions (rather than just those with IDs)"); 1532529f56eSJonathan T. Looney 1542529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 1552529f56eSJonathan T. Looney counter_u64_t tcp_log_queued; 1562529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail1; 1572529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail2; 1582529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail3; 1592529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail4; 1602529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail5; 1612529f56eSJonathan T. Looney counter_u64_t tcp_log_que_copyout; 1622529f56eSJonathan T. Looney counter_u64_t tcp_log_que_read; 1632529f56eSJonathan T. Looney counter_u64_t tcp_log_que_freed; 1642529f56eSJonathan T. Looney 1652529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD, 1662529f56eSJonathan T. Looney &tcp_log_queued, "Number of entries queued"); 1672529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD, 1682529f56eSJonathan T. Looney &tcp_log_que_fail1, "Number of entries queued but fail 1"); 1692529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD, 1702529f56eSJonathan T. Looney &tcp_log_que_fail2, "Number of entries queued but fail 2"); 1712529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD, 1722529f56eSJonathan T. Looney &tcp_log_que_fail3, "Number of entries queued but fail 3"); 1732529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD, 1742529f56eSJonathan T. Looney &tcp_log_que_fail4, "Number of entries queued but fail 4"); 1752529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD, 1762529f56eSJonathan T. Looney &tcp_log_que_fail5, "Number of entries queued but fail 4"); 1772529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD, 1782529f56eSJonathan T. Looney &tcp_log_que_copyout, "Number of entries copied out"); 1792529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD, 1802529f56eSJonathan T. Looney &tcp_log_que_read, "Number of entries read from the queue"); 1812529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD, 1822529f56eSJonathan T. Looney &tcp_log_que_freed, "Number of entries freed after reading"); 1832529f56eSJonathan T. Looney #endif 1842529f56eSJonathan T. Looney 1852529f56eSJonathan T. Looney #ifdef INVARIANTS 1862529f56eSJonathan T. Looney #define TCPLOG_DEBUG_RINGBUF 1872529f56eSJonathan T. Looney #endif 188a9a08eceSRandall Stewart /* Number of requests to consider a PBCID "active". */ 189a9a08eceSRandall Stewart #define ACTIVE_REQUEST_COUNT 10 190a9a08eceSRandall Stewart 191a9a08eceSRandall Stewart /* Statistic tracking for "active" PBCIDs. */ 192a9a08eceSRandall Stewart static counter_u64_t tcp_log_pcb_ids_cur; 193a9a08eceSRandall Stewart static counter_u64_t tcp_log_pcb_ids_tot; 194a9a08eceSRandall Stewart 195a9a08eceSRandall Stewart SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD, 196a9a08eceSRandall Stewart &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system"); 197a9a08eceSRandall Stewart SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD, 198a9a08eceSRandall Stewart &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated"); 1992529f56eSJonathan T. Looney 2002529f56eSJonathan T. Looney struct tcp_log_mem 2012529f56eSJonathan T. Looney { 2022529f56eSJonathan T. Looney STAILQ_ENTRY(tcp_log_mem) tlm_queue; 2032529f56eSJonathan T. Looney struct tcp_log_buffer tlm_buf; 2042529f56eSJonathan T. Looney struct tcp_log_verbose tlm_v; 2052529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF 2062529f56eSJonathan T. Looney volatile int tlm_refcnt; 2072529f56eSJonathan T. Looney #endif 2082529f56eSJonathan T. Looney }; 2092529f56eSJonathan T. Looney 2102529f56eSJonathan T. Looney /* 60 bytes for the header, + 16 bytes for padding */ 2112529f56eSJonathan T. Looney static uint8_t zerobuf[76]; 2122529f56eSJonathan T. Looney 2132529f56eSJonathan T. Looney /* 2142529f56eSJonathan T. Looney * Lock order: 2152529f56eSJonathan T. Looney * 1. TCPID_TREE 2162529f56eSJonathan T. Looney * 2. TCPID_BUCKET 2172529f56eSJonathan T. Looney * 3. INP 2182529f56eSJonathan T. Looney * 2192529f56eSJonathan T. Looney * Rules: 2202529f56eSJonathan T. Looney * A. You need a lock on the Tree to add/remove buckets. 2212529f56eSJonathan T. Looney * B. You need a lock on the bucket to add/remove nodes from the bucket. 2222529f56eSJonathan T. Looney * C. To change information in a node, you need the INP lock if the tln_closed 2232529f56eSJonathan T. Looney * field is false. Otherwise, you need the bucket lock. (Note that the 2242529f56eSJonathan T. Looney * tln_closed field can change at any point, so you need to recheck the 2252529f56eSJonathan T. Looney * entry after acquiring the INP lock.) 2262529f56eSJonathan T. Looney * D. To remove a node from the bucket, you must have that entry locked, 2272529f56eSJonathan T. Looney * according to the criteria of Rule C. Also, the node must not be on 2282529f56eSJonathan T. Looney * the expiry queue. 2292529f56eSJonathan T. Looney * E. The exception to C is the expiry queue fields, which are locked by 2302529f56eSJonathan T. Looney * the TCPLOG_EXPIREQ lock. 2312529f56eSJonathan T. Looney * 2322529f56eSJonathan T. Looney * Buckets have a reference count. Each node is a reference. Further, 2332529f56eSJonathan T. Looney * other callers may add reference counts to keep a bucket from disappearing. 2342529f56eSJonathan T. Looney * You can add a reference as long as you own a lock sufficient to keep the 2352529f56eSJonathan T. Looney * bucket from disappearing. For example, a common use is: 2362529f56eSJonathan T. Looney * a. Have a locked INP, but need to lock the TCPID_BUCKET. 2372529f56eSJonathan T. Looney * b. Add a refcount on the bucket. (Safe because the INP lock prevents 2382529f56eSJonathan T. Looney * the TCPID_BUCKET from going away.) 2392529f56eSJonathan T. Looney * c. Drop the INP lock. 2402529f56eSJonathan T. Looney * d. Acquire a lock on the TCPID_BUCKET. 2412529f56eSJonathan T. Looney * e. Acquire a lock on the INP. 2422529f56eSJonathan T. Looney * f. Drop the refcount on the bucket. 2432529f56eSJonathan T. Looney * (At this point, the bucket may disappear.) 2442529f56eSJonathan T. Looney * 2452529f56eSJonathan T. Looney * Expire queue lock: 2462529f56eSJonathan T. Looney * You can acquire this with either the bucket or INP lock. Don't reverse it. 2472529f56eSJonathan T. Looney * When the expire code has committed to freeing a node, it resets the expiry 2482529f56eSJonathan T. Looney * time to SBT_MAX. That is the signal to everyone else that they should 2492529f56eSJonathan T. Looney * leave that node alone. 2502529f56eSJonathan T. Looney */ 2512529f56eSJonathan T. Looney static struct rwlock tcp_id_tree_lock; 2522529f56eSJonathan T. Looney #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock) 2532529f56eSJonathan T. Looney #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock) 2542529f56eSJonathan T. Looney #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock) 2552529f56eSJonathan T. Looney #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock) 2562529f56eSJonathan T. Looney #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock) 2572529f56eSJonathan T. Looney #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED) 2582529f56eSJonathan T. Looney #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED) 2592529f56eSJonathan T. Looney #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED) 2602529f56eSJonathan T. Looney 2612529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF) 2622529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx)) 2632529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx)) 2642529f56eSJonathan T. Looney #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx)) 2652529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED) 2662529f56eSJonathan T. Looney #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED) 2672529f56eSJonathan T. Looney 2682529f56eSJonathan T. Looney #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt)) 2692529f56eSJonathan T. Looney #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt)) 2702529f56eSJonathan T. Looney 2712529f56eSJonathan T. Looney #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx) 2722529f56eSJonathan T. Looney #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx) 2732529f56eSJonathan T. Looney 2742529f56eSJonathan T. Looney SLIST_HEAD(tcp_log_id_head, tcp_log_id_node); 2752529f56eSJonathan T. Looney 2762529f56eSJonathan T. Looney struct tcp_log_id_bucket 2772529f56eSJonathan T. Looney { 2782529f56eSJonathan T. Looney /* 2792529f56eSJonathan T. Looney * tlb_id must be first. This lets us use strcmp on 2802529f56eSJonathan T. Looney * (struct tcp_log_id_bucket *) and (char *) interchangeably. 2812529f56eSJonathan T. Looney */ 2822529f56eSJonathan T. Looney char tlb_id[TCP_LOG_ID_LEN]; 283a9a08eceSRandall Stewart char tlb_tag[TCP_LOG_TAG_LEN]; 2842529f56eSJonathan T. Looney RB_ENTRY(tcp_log_id_bucket) tlb_rb; 2852529f56eSJonathan T. Looney struct tcp_log_id_head tlb_head; 2862529f56eSJonathan T. Looney struct mtx tlb_mtx; 2872529f56eSJonathan T. Looney volatile u_int tlb_refcnt; 288a9a08eceSRandall Stewart volatile u_int tlb_reqcnt; 289a9a08eceSRandall Stewart uint32_t tlb_loglimit; 29069c7c811SRandall Stewart int8_t tlb_logstate; 2912529f56eSJonathan T. Looney }; 2922529f56eSJonathan T. Looney 2932529f56eSJonathan T. Looney struct tcp_log_id_node 2942529f56eSJonathan T. Looney { 2952529f56eSJonathan T. Looney SLIST_ENTRY(tcp_log_id_node) tln_list; 2962529f56eSJonathan T. Looney STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */ 2972529f56eSJonathan T. Looney sbintime_t tln_expiretime; /* Locked by the expireq lock */ 2982529f56eSJonathan T. Looney 2992529f56eSJonathan T. Looney /* 3002529f56eSJonathan T. Looney * If INP is NULL, that means the connection has closed. We've 3012529f56eSJonathan T. Looney * saved the connection endpoint information and the log entries 3022529f56eSJonathan T. Looney * in the tln_ie and tln_entries members. We've also saved a pointer 3032529f56eSJonathan T. Looney * to the enclosing bucket here. If INP is not NULL, the information is 3042529f56eSJonathan T. Looney * in the PCB and not here. 3052529f56eSJonathan T. Looney */ 3062529f56eSJonathan T. Looney struct inpcb *tln_inp; 3072529f56eSJonathan T. Looney struct tcpcb *tln_tp; 3082529f56eSJonathan T. Looney struct tcp_log_id_bucket *tln_bucket; 3092529f56eSJonathan T. Looney struct in_endpoints tln_ie; 3102529f56eSJonathan T. Looney struct tcp_log_stailq tln_entries; 3112529f56eSJonathan T. Looney int tln_count; 3122529f56eSJonathan T. Looney volatile int tln_closed; 3132529f56eSJonathan T. Looney uint8_t tln_af; 3142529f56eSJonathan T. Looney }; 3152529f56eSJonathan T. Looney 3162529f56eSJonathan T. Looney enum tree_lock_state { 3172529f56eSJonathan T. Looney TREE_UNLOCKED = 0, 3182529f56eSJonathan T. Looney TREE_RLOCKED, 3192529f56eSJonathan T. Looney TREE_WLOCKED, 3202529f56eSJonathan T. Looney }; 3212529f56eSJonathan T. Looney 3222529f56eSJonathan T. Looney /* Do we want to select this session for auto-logging? */ 3232529f56eSJonathan T. Looney static __inline bool 3242529f56eSJonathan T. Looney tcp_log_selectauto(void) 3252529f56eSJonathan T. Looney { 3262529f56eSJonathan T. Looney 3272529f56eSJonathan T. Looney /* 3282529f56eSJonathan T. Looney * If we are doing auto-capturing, figure out whether we will capture 3292529f56eSJonathan T. Looney * this session. 3302529f56eSJonathan T. Looney */ 3312529f56eSJonathan T. Looney if (tcp_log_auto_ratio && 332a9a08eceSRandall Stewart (tcp_disable_all_bb_logs == 0) && 3339959c8b9SJonathan T. Looney (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) % 3342529f56eSJonathan T. Looney tcp_log_auto_ratio) == 0) 3352529f56eSJonathan T. Looney return (true); 3362529f56eSJonathan T. Looney return (false); 3372529f56eSJonathan T. Looney } 3382529f56eSJonathan T. Looney 3392529f56eSJonathan T. Looney static __inline int 3402529f56eSJonathan T. Looney tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b) 3412529f56eSJonathan T. Looney { 3422529f56eSJonathan T. Looney KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL")); 3432529f56eSJonathan T. Looney KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL")); 3442529f56eSJonathan T. Looney return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN); 3452529f56eSJonathan T. Looney } 3462529f56eSJonathan T. Looney 3472529f56eSJonathan T. Looney RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 3482529f56eSJonathan T. Looney 3492529f56eSJonathan T. Looney static __inline void 3502529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(int tree_locked) 3512529f56eSJonathan T. Looney { 3522529f56eSJonathan T. Looney 3532529f56eSJonathan T. Looney #ifdef INVARIANTS 3542529f56eSJonathan T. Looney switch (tree_locked) { 3552529f56eSJonathan T. Looney case TREE_WLOCKED: 3562529f56eSJonathan T. Looney TCPID_TREE_WLOCK_ASSERT(); 3572529f56eSJonathan T. Looney break; 3582529f56eSJonathan T. Looney case TREE_RLOCKED: 3592529f56eSJonathan T. Looney TCPID_TREE_RLOCK_ASSERT(); 3602529f56eSJonathan T. Looney break; 3612529f56eSJonathan T. Looney case TREE_UNLOCKED: 3622529f56eSJonathan T. Looney TCPID_TREE_UNLOCK_ASSERT(); 3632529f56eSJonathan T. Looney break; 3642529f56eSJonathan T. Looney default: 3652529f56eSJonathan T. Looney kassert_panic("%s:%d: unknown tree lock state", __func__, 3662529f56eSJonathan T. Looney __LINE__); 3672529f56eSJonathan T. Looney } 3682529f56eSJonathan T. Looney #endif 3692529f56eSJonathan T. Looney } 3702529f56eSJonathan T. Looney 3712529f56eSJonathan T. Looney static __inline void 3722529f56eSJonathan T. Looney tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb) 3732529f56eSJonathan T. Looney { 3742529f56eSJonathan T. Looney 3752529f56eSJonathan T. Looney TCPID_TREE_WLOCK_ASSERT(); 3762529f56eSJonathan T. Looney KASSERT(SLIST_EMPTY(&tlb->tlb_head), 3772529f56eSJonathan T. Looney ("%s: Attempt to remove non-empty bucket", __func__)); 3782529f56eSJonathan T. Looney if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) { 3792529f56eSJonathan T. Looney #ifdef INVARIANTS 3802529f56eSJonathan T. Looney kassert_panic("%s:%d: error removing element from tree", 3812529f56eSJonathan T. Looney __func__, __LINE__); 3822529f56eSJonathan T. Looney #endif 3832529f56eSJonathan T. Looney } 3842529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_DESTROY(tlb); 385a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); 3868c47d8f5SAlan Somers uma_zfree(tcp_log_id_bucket_zone, tlb); 3872529f56eSJonathan T. Looney } 3882529f56eSJonathan T. Looney 3892529f56eSJonathan T. Looney /* 3902529f56eSJonathan T. Looney * Call with a referenced and locked bucket. 3912529f56eSJonathan T. Looney * Will return true if the bucket was freed; otherwise, false. 3922529f56eSJonathan T. Looney * tlb: The bucket to unreference. 3932529f56eSJonathan T. Looney * tree_locked: A pointer to the state of the tree lock. If the tree lock 3942529f56eSJonathan T. Looney * state changes, the function will update it. 3952529f56eSJonathan T. Looney * inp: If not NULL and the function needs to drop the inp lock to relock the 3962529f56eSJonathan T. Looney * tree, it will do so. (The caller must ensure inp will not become invalid, 3972529f56eSJonathan T. Looney * probably by holding a reference to it.) 3982529f56eSJonathan T. Looney */ 3992529f56eSJonathan T. Looney static bool 4002529f56eSJonathan T. Looney tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked, 4012529f56eSJonathan T. Looney struct inpcb *inp) 4022529f56eSJonathan T. Looney { 4032529f56eSJonathan T. Looney 4042529f56eSJonathan T. Looney KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__)); 4052529f56eSJonathan T. Looney KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 4062529f56eSJonathan T. Looney __func__)); 4072529f56eSJonathan T. Looney 4082529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(*tree_locked); 4092529f56eSJonathan T. Looney 4102529f56eSJonathan T. Looney /* 4112529f56eSJonathan T. Looney * Did we hold the last reference on the tlb? If so, we may need 4122529f56eSJonathan T. Looney * to free it. (Note that we can realistically only execute the 4132529f56eSJonathan T. Looney * loop twice: once without a write lock and once with a write 4142529f56eSJonathan T. Looney * lock.) 4152529f56eSJonathan T. Looney */ 4162529f56eSJonathan T. Looney while (TCPID_BUCKET_UNREF(tlb)) { 4172529f56eSJonathan T. Looney /* 4182529f56eSJonathan T. Looney * We need a write lock on the tree to free this. 4192529f56eSJonathan T. Looney * If we can upgrade the tree lock, this is "easy". If we 4202529f56eSJonathan T. Looney * can't upgrade the tree lock, we need to do this the 4212529f56eSJonathan T. Looney * "hard" way: unwind all our locks and relock everything. 4222529f56eSJonathan T. Looney * In the meantime, anything could have changed. We even 4232529f56eSJonathan T. Looney * need to validate that we still need to free the bucket. 4242529f56eSJonathan T. Looney */ 4252529f56eSJonathan T. Looney if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE()) 4262529f56eSJonathan T. Looney *tree_locked = TREE_WLOCKED; 4272529f56eSJonathan T. Looney else if (*tree_locked != TREE_WLOCKED) { 4282529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb); 4292529f56eSJonathan T. Looney if (inp != NULL) 4302529f56eSJonathan T. Looney INP_WUNLOCK(inp); 4312529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 4322529f56eSJonathan T. Looney if (*tree_locked == TREE_RLOCKED) 4332529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 4342529f56eSJonathan T. Looney TCPID_TREE_WLOCK(); 4352529f56eSJonathan T. Looney *tree_locked = TREE_WLOCKED; 4362529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 4372529f56eSJonathan T. Looney if (inp != NULL) 4382529f56eSJonathan T. Looney INP_WLOCK(inp); 4392529f56eSJonathan T. Looney continue; 4402529f56eSJonathan T. Looney } 4412529f56eSJonathan T. Looney 4422529f56eSJonathan T. Looney /* 4432529f56eSJonathan T. Looney * We have an empty bucket and a write lock on the tree. 4442529f56eSJonathan T. Looney * Remove the empty bucket. 4452529f56eSJonathan T. Looney */ 4462529f56eSJonathan T. Looney tcp_log_remove_bucket(tlb); 4472529f56eSJonathan T. Looney return (true); 4482529f56eSJonathan T. Looney } 4492529f56eSJonathan T. Looney return (false); 4502529f56eSJonathan T. Looney } 4512529f56eSJonathan T. Looney 4522529f56eSJonathan T. Looney /* 4532529f56eSJonathan T. Looney * Call with a locked bucket. This function will release the lock on the 4542529f56eSJonathan T. Looney * bucket before returning. 4552529f56eSJonathan T. Looney * 4562529f56eSJonathan T. Looney * The caller is responsible for freeing the tp->t_lin/tln node! 4572529f56eSJonathan T. Looney * 4582529f56eSJonathan T. Looney * Note: one of tp or both tlb and tln must be supplied. 4592529f56eSJonathan T. Looney * 4602529f56eSJonathan T. Looney * inp: A pointer to the inp. If the function needs to drop the inp lock to 4612529f56eSJonathan T. Looney * acquire the tree write lock, it will do so. (The caller must ensure inp 4622529f56eSJonathan T. Looney * will not become invalid, probably by holding a reference to it.) 4632529f56eSJonathan T. Looney * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored) 4642529f56eSJonathan T. Looney * tlb: A pointer to the bucket. (optional; ignored if tp is specified) 4652529f56eSJonathan T. Looney * tln: A pointer to the node. (optional; ignored if tp is specified) 4662529f56eSJonathan T. Looney * tree_locked: A pointer to the state of the tree lock. If the tree lock 4672529f56eSJonathan T. Looney * state changes, the function will update it. 4682529f56eSJonathan T. Looney * 4692529f56eSJonathan T. Looney * Will return true if the INP lock was reacquired; otherwise, false. 4702529f56eSJonathan T. Looney */ 4712529f56eSJonathan T. Looney static bool 4722529f56eSJonathan T. Looney tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp, 4732529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln, 4742529f56eSJonathan T. Looney int *tree_locked) 4752529f56eSJonathan T. Looney { 4762529f56eSJonathan T. Looney int orig_tree_locked; 4772529f56eSJonathan T. Looney 4782529f56eSJonathan T. Looney KASSERT(tp != NULL || (tlb != NULL && tln != NULL), 4792529f56eSJonathan T. Looney ("%s: called with tp=%p, tlb=%p, tln=%p", __func__, 4802529f56eSJonathan T. Looney tp, tlb, tln)); 4812529f56eSJonathan T. Looney KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 4822529f56eSJonathan T. Looney __func__)); 4832529f56eSJonathan T. Looney 4842529f56eSJonathan T. Looney if (tp != NULL) { 4852529f56eSJonathan T. Looney tlb = tp->t_lib; 4862529f56eSJonathan T. Looney tln = tp->t_lin; 4872529f56eSJonathan T. Looney KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__)); 4882529f56eSJonathan T. Looney KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__)); 4892529f56eSJonathan T. Looney } 4902529f56eSJonathan T. Looney 4912529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(*tree_locked); 4922529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb); 4932529f56eSJonathan T. Looney 4942529f56eSJonathan T. Looney /* 4952529f56eSJonathan T. Looney * Remove the node, clear the log bucket and node from the TCPCB, and 4962529f56eSJonathan T. Looney * decrement the bucket refcount. In the process, if this is the 4972529f56eSJonathan T. Looney * last reference, the bucket will be freed. 4982529f56eSJonathan T. Looney */ 4992529f56eSJonathan T. Looney SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list); 5002529f56eSJonathan T. Looney if (tp != NULL) { 5012529f56eSJonathan T. Looney tp->t_lib = NULL; 5022529f56eSJonathan T. Looney tp->t_lin = NULL; 5032529f56eSJonathan T. Looney } 5042529f56eSJonathan T. Looney orig_tree_locked = *tree_locked; 5052529f56eSJonathan T. Looney if (!tcp_log_unref_bucket(tlb, tree_locked, inp)) 5062529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 5072529f56eSJonathan T. Looney return (*tree_locked != orig_tree_locked); 5082529f56eSJonathan T. Looney } 5092529f56eSJonathan T. Looney 5102529f56eSJonathan T. Looney #define RECHECK_INP_CLEAN(cleanup) do { \ 51153af6903SGleb Smirnoff if (inp->inp_flags & INP_DROPPED) { \ 5122529f56eSJonathan T. Looney rv = ECONNRESET; \ 5132529f56eSJonathan T. Looney cleanup; \ 5142529f56eSJonathan T. Looney goto done; \ 5152529f56eSJonathan T. Looney } \ 5162529f56eSJonathan T. Looney tp = intotcpcb(inp); \ 5172529f56eSJonathan T. Looney } while (0) 5182529f56eSJonathan T. Looney 5192529f56eSJonathan T. Looney #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */) 5202529f56eSJonathan T. Looney 5212529f56eSJonathan T. Looney static void 5222529f56eSJonathan T. Looney tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp) 5232529f56eSJonathan T. Looney { 5242529f56eSJonathan T. Looney 5259eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 5262529f56eSJonathan T. Looney 527adc56f5aSEdward Tomasz Napierala #ifdef STATS 5282529f56eSJonathan T. Looney if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL) 5292529f56eSJonathan T. Looney (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id)); 5302529f56eSJonathan T. Looney #endif 5312529f56eSJonathan T. Looney } 5322529f56eSJonathan T. Looney 533a9a08eceSRandall Stewart static void 534a9a08eceSRandall Stewart tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb) 535a9a08eceSRandall Stewart { 536a9a08eceSRandall Stewart 537a9a08eceSRandall Stewart atomic_fetchadd_int(&tlb->tlb_reqcnt, 1); 538a9a08eceSRandall Stewart } 539a9a08eceSRandall Stewart 54069c7c811SRandall Stewart int 54169c7c811SRandall Stewart tcp_log_apply_ratio(struct tcpcb *tp, int ratio) 54269c7c811SRandall Stewart { 54369c7c811SRandall Stewart struct tcp_log_id_bucket *tlb; 54469c7c811SRandall Stewart struct inpcb *inp = tptoinpcb(tp); 54569c7c811SRandall Stewart uint32_t hash, ratio_hash_thresh; 54669c7c811SRandall Stewart int rv, tree_locked; 54769c7c811SRandall Stewart 54869c7c811SRandall Stewart rv = 0; 54969c7c811SRandall Stewart tree_locked = TREE_UNLOCKED; 55069c7c811SRandall Stewart tlb = tp->t_lib; 55169c7c811SRandall Stewart 55269c7c811SRandall Stewart INP_WLOCK_ASSERT(inp); 55369c7c811SRandall Stewart if (tlb == NULL) { 55469c7c811SRandall Stewart INP_WUNLOCK(inp); 55569c7c811SRandall Stewart return (EOPNOTSUPP); 55669c7c811SRandall Stewart } 55769c7c811SRandall Stewart ratio_hash_thresh = max(1, UINT32_MAX / ratio); 55869c7c811SRandall Stewart TCPID_BUCKET_REF(tlb); 55969c7c811SRandall Stewart INP_WUNLOCK(inp); 56069c7c811SRandall Stewart TCPID_BUCKET_LOCK(tlb); 56169c7c811SRandall Stewart 56269c7c811SRandall Stewart hash = hash32_buf(tlb->tlb_id, strlen(tlb->tlb_id), 0); 56369c7c811SRandall Stewart if (hash > ratio_hash_thresh && tp->_t_logstate == TCP_LOG_STATE_OFF && 56469c7c811SRandall Stewart tlb->tlb_logstate == TCP_LOG_STATE_OFF) { 56569c7c811SRandall Stewart /* 56669c7c811SRandall Stewart * Ratio decision not to log this log ID (and this connection by 56769c7c811SRandall Stewart * way of association). We only apply a log ratio log disable 56869c7c811SRandall Stewart * decision if it would not interfere with a log enable decision 56969c7c811SRandall Stewart * made elsewhere e.g. tcp_log_selectauto() or setsockopt(). 57069c7c811SRandall Stewart */ 57169c7c811SRandall Stewart tlb->tlb_logstate = TCP_LOG_STATE_RATIO_OFF; 57269c7c811SRandall Stewart INP_WLOCK(inp); 57369c7c811SRandall Stewart RECHECK_INP(); 57469c7c811SRandall Stewart (void)tcp_log_state_change(tp, TCP_LOG_STATE_OFF); 57569c7c811SRandall Stewart done: 57669c7c811SRandall Stewart INP_WUNLOCK(inp); 57769c7c811SRandall Stewart } 57869c7c811SRandall Stewart 57969c7c811SRandall Stewart INP_UNLOCK_ASSERT(inp); 58069c7c811SRandall Stewart if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 58169c7c811SRandall Stewart TCPID_BUCKET_UNLOCK(tlb); 58269c7c811SRandall Stewart 58369c7c811SRandall Stewart if (tree_locked == TREE_WLOCKED) { 58469c7c811SRandall Stewart TCPID_TREE_WLOCK_ASSERT(); 58569c7c811SRandall Stewart TCPID_TREE_WUNLOCK(); 58669c7c811SRandall Stewart } else if (tree_locked == TREE_RLOCKED) { 58769c7c811SRandall Stewart TCPID_TREE_RLOCK_ASSERT(); 58869c7c811SRandall Stewart TCPID_TREE_RUNLOCK(); 58969c7c811SRandall Stewart } else 59069c7c811SRandall Stewart TCPID_TREE_UNLOCK_ASSERT(); 59169c7c811SRandall Stewart 59269c7c811SRandall Stewart return (rv); 59369c7c811SRandall Stewart } 59469c7c811SRandall Stewart 595a9a08eceSRandall Stewart /* 596a9a08eceSRandall Stewart * Associate the specified tag with a particular TCP log ID. 597a9a08eceSRandall Stewart * Called with INPCB locked. Returns with it unlocked. 598a9a08eceSRandall Stewart * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID. 599a9a08eceSRandall Stewart */ 600a9a08eceSRandall Stewart int 601a9a08eceSRandall Stewart tcp_log_set_tag(struct tcpcb *tp, char *tag) 602a9a08eceSRandall Stewart { 6039eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 604a9a08eceSRandall Stewart struct tcp_log_id_bucket *tlb; 605a9a08eceSRandall Stewart int tree_locked; 606a9a08eceSRandall Stewart 6079eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp); 608a9a08eceSRandall Stewart 609a9a08eceSRandall Stewart tree_locked = TREE_UNLOCKED; 610a9a08eceSRandall Stewart tlb = tp->t_lib; 611a9a08eceSRandall Stewart if (tlb == NULL) { 6129eb0e832SGleb Smirnoff INP_WUNLOCK(inp); 613a9a08eceSRandall Stewart return (EOPNOTSUPP); 614a9a08eceSRandall Stewart } 615a9a08eceSRandall Stewart 616a9a08eceSRandall Stewart TCPID_BUCKET_REF(tlb); 6179eb0e832SGleb Smirnoff INP_WUNLOCK(inp); 618a9a08eceSRandall Stewart TCPID_BUCKET_LOCK(tlb); 619a9a08eceSRandall Stewart strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN); 620a9a08eceSRandall Stewart if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 621a9a08eceSRandall Stewart TCPID_BUCKET_UNLOCK(tlb); 622a9a08eceSRandall Stewart 623a9a08eceSRandall Stewart if (tree_locked == TREE_WLOCKED) { 624a9a08eceSRandall Stewart TCPID_TREE_WLOCK_ASSERT(); 625a9a08eceSRandall Stewart TCPID_TREE_WUNLOCK(); 626a9a08eceSRandall Stewart } else if (tree_locked == TREE_RLOCKED) { 627a9a08eceSRandall Stewart TCPID_TREE_RLOCK_ASSERT(); 628a9a08eceSRandall Stewart TCPID_TREE_RUNLOCK(); 629a9a08eceSRandall Stewart } else 630a9a08eceSRandall Stewart TCPID_TREE_UNLOCK_ASSERT(); 631a9a08eceSRandall Stewart 632a9a08eceSRandall Stewart return (0); 633a9a08eceSRandall Stewart } 634a9a08eceSRandall Stewart 6352529f56eSJonathan T. Looney /* 6362529f56eSJonathan T. Looney * Set the TCP log ID for a TCPCB. 6372529f56eSJonathan T. Looney * Called with INPCB locked. Returns with it unlocked. 6382529f56eSJonathan T. Looney */ 6392529f56eSJonathan T. Looney int 6402529f56eSJonathan T. Looney tcp_log_set_id(struct tcpcb *tp, char *id) 6412529f56eSJonathan T. Looney { 6422529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb, *tmp_tlb; 6432529f56eSJonathan T. Looney struct tcp_log_id_node *tln; 6449eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 6452529f56eSJonathan T. Looney int tree_locked, rv; 64669c7c811SRandall Stewart bool bucket_locked, same; 6472529f56eSJonathan T. Looney 6482529f56eSJonathan T. Looney tlb = NULL; 6492529f56eSJonathan T. Looney tln = NULL; 6502529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 6512529f56eSJonathan T. Looney bucket_locked = false; 6522529f56eSJonathan T. Looney 6532529f56eSJonathan T. Looney restart: 6542529f56eSJonathan T. Looney INP_WLOCK_ASSERT(inp); 6552529f56eSJonathan T. Looney /* See if the ID is unchanged. */ 65669c7c811SRandall Stewart same = ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) || 65769c7c811SRandall Stewart (tp->t_lib == NULL && *id == 0)); 65869c7c811SRandall Stewart if (tp->_t_logstate && STAILQ_FIRST(&tp->t_logs) && !same) { 65969c7c811SRandall Stewart /* 66069c7c811SRandall Stewart * There are residual logs left we may 66169c7c811SRandall Stewart * be changing id's so dump what we can. 66269c7c811SRandall Stewart */ 66369c7c811SRandall Stewart switch(tp->_t_logstate) { 66469c7c811SRandall Stewart case TCP_LOG_STATE_HEAD_AUTO: 66569c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at id switch", 66669c7c811SRandall Stewart M_NOWAIT, false); 66769c7c811SRandall Stewart break; 66869c7c811SRandall Stewart case TCP_LOG_STATE_TAIL_AUTO: 66969c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at id switch", 67069c7c811SRandall Stewart M_NOWAIT, false); 67169c7c811SRandall Stewart break; 67269c7c811SRandall Stewart case TCP_LOG_STATE_CONTINUAL: 67369c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual at id switch", 67469c7c811SRandall Stewart M_NOWAIT, false); 67569c7c811SRandall Stewart break; 67669c7c811SRandall Stewart case TCP_LOG_VIA_BBPOINTS: 67769c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints at id switch", 67869c7c811SRandall Stewart M_NOWAIT, false); 67969c7c811SRandall Stewart break; 68069c7c811SRandall Stewart } 68169c7c811SRandall Stewart } 68269c7c811SRandall Stewart if (same) { 683a9a08eceSRandall Stewart if (tp->t_lib != NULL) { 684a9a08eceSRandall Stewart tcp_log_increment_reqcnt(tp->t_lib); 68569c7c811SRandall Stewart if ((tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) && 686a9a08eceSRandall Stewart (tp->t_log_state_set == 0)) { 687a9a08eceSRandall Stewart /* Clone in any logging */ 688a9a08eceSRandall Stewart 68969c7c811SRandall Stewart tp->_t_logstate = tp->t_lib->tlb_logstate; 690a9a08eceSRandall Stewart } 691a9a08eceSRandall Stewart if ((tp->t_lib->tlb_loglimit) && 692a9a08eceSRandall Stewart (tp->t_log_state_set == 0)) { 693a9a08eceSRandall Stewart /* We also have a limit set */ 694a9a08eceSRandall Stewart 695a9a08eceSRandall Stewart tp->t_loglimit = tp->t_lib->tlb_loglimit; 696a9a08eceSRandall Stewart } 697a9a08eceSRandall Stewart } 6982529f56eSJonathan T. Looney rv = 0; 6992529f56eSJonathan T. Looney goto done; 7002529f56eSJonathan T. Looney } 7012529f56eSJonathan T. Looney 7022529f56eSJonathan T. Looney /* 7032529f56eSJonathan T. Looney * If the TCPCB had a previous ID, we need to extricate it from 7042529f56eSJonathan T. Looney * the previous list. 7052529f56eSJonathan T. Looney * 7062529f56eSJonathan T. Looney * Drop the TCPCB lock and lock the tree and the bucket. 7072529f56eSJonathan T. Looney * Because this is called in the socket context, we (theoretically) 7082529f56eSJonathan T. Looney * don't need to worry about the INPCB completely going away 7092529f56eSJonathan T. Looney * while we are gone. 7102529f56eSJonathan T. Looney */ 7112529f56eSJonathan T. Looney if (tp->t_lib != NULL) { 7122529f56eSJonathan T. Looney tlb = tp->t_lib; 7132529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb); 7142529f56eSJonathan T. Looney INP_WUNLOCK(inp); 7152529f56eSJonathan T. Looney 7162529f56eSJonathan T. Looney if (tree_locked == TREE_UNLOCKED) { 7172529f56eSJonathan T. Looney TCPID_TREE_RLOCK(); 7182529f56eSJonathan T. Looney tree_locked = TREE_RLOCKED; 7192529f56eSJonathan T. Looney } 7202529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 7212529f56eSJonathan T. Looney bucket_locked = true; 7222529f56eSJonathan T. Looney INP_WLOCK(inp); 7232529f56eSJonathan T. Looney 7242529f56eSJonathan T. Looney /* 7252529f56eSJonathan T. Looney * Unreference the bucket. If our bucket went away, it is no 7262529f56eSJonathan T. Looney * longer locked or valid. 7272529f56eSJonathan T. Looney */ 7282529f56eSJonathan T. Looney if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) { 7292529f56eSJonathan T. Looney bucket_locked = false; 7302529f56eSJonathan T. Looney tlb = NULL; 7312529f56eSJonathan T. Looney } 7322529f56eSJonathan T. Looney 7332529f56eSJonathan T. Looney /* Validate the INP. */ 7342529f56eSJonathan T. Looney RECHECK_INP(); 7352529f56eSJonathan T. Looney 7362529f56eSJonathan T. Looney /* 7372529f56eSJonathan T. Looney * Evaluate whether the bucket changed while we were unlocked. 7382529f56eSJonathan T. Looney * 7392529f56eSJonathan T. Looney * Possible scenarios here: 7402529f56eSJonathan T. Looney * 1. Bucket is unchanged and the same one we started with. 7412529f56eSJonathan T. Looney * 2. The TCPCB no longer has a bucket and our bucket was 7422529f56eSJonathan T. Looney * freed. 7432529f56eSJonathan T. Looney * 3. The TCPCB has a new bucket, whether ours was freed. 7442529f56eSJonathan T. Looney * 4. The TCPCB no longer has a bucket and our bucket was 7452529f56eSJonathan T. Looney * not freed. 7462529f56eSJonathan T. Looney * 7472529f56eSJonathan T. Looney * In cases 2-4, we will start over. In case 1, we will 7482529f56eSJonathan T. Looney * proceed here to remove the bucket. 7492529f56eSJonathan T. Looney */ 7502529f56eSJonathan T. Looney if (tlb == NULL || tp->t_lib != tlb) { 7512529f56eSJonathan T. Looney KASSERT(bucket_locked || tlb == NULL, 7522529f56eSJonathan T. Looney ("%s: bucket_locked (%d) and tlb (%p) are " 7532529f56eSJonathan T. Looney "inconsistent", __func__, bucket_locked, tlb)); 7542529f56eSJonathan T. Looney 7552529f56eSJonathan T. Looney if (bucket_locked) { 7562529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 7572529f56eSJonathan T. Looney bucket_locked = false; 7582529f56eSJonathan T. Looney tlb = NULL; 7592529f56eSJonathan T. Looney } 7602529f56eSJonathan T. Looney goto restart; 7612529f56eSJonathan T. Looney } 7622529f56eSJonathan T. Looney 7632529f56eSJonathan T. Looney /* 7642529f56eSJonathan T. Looney * Store the (struct tcp_log_id_node) for reuse. Then, remove 7652529f56eSJonathan T. Looney * it from the bucket. In the process, we may end up relocking. 7662529f56eSJonathan T. Looney * If so, we need to validate that the INP is still valid, and 7672529f56eSJonathan T. Looney * the TCPCB entries match we expect. 7682529f56eSJonathan T. Looney * 7692529f56eSJonathan T. Looney * We will clear tlb and change the bucket_locked state just 7702529f56eSJonathan T. Looney * before calling tcp_log_remove_id_node(), since that function 7712529f56eSJonathan T. Looney * will unlock the bucket. 7722529f56eSJonathan T. Looney */ 7732529f56eSJonathan T. Looney if (tln != NULL) 7748c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, tln); 7752529f56eSJonathan T. Looney tln = tp->t_lin; 7762529f56eSJonathan T. Looney tlb = NULL; 7772529f56eSJonathan T. Looney bucket_locked = false; 7782529f56eSJonathan T. Looney if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) { 7792529f56eSJonathan T. Looney RECHECK_INP(); 7802529f56eSJonathan T. Looney 7812529f56eSJonathan T. Looney /* 7822529f56eSJonathan T. Looney * If the TCPCB moved to a new bucket while we had 7832529f56eSJonathan T. Looney * dropped the lock, restart. 7842529f56eSJonathan T. Looney */ 7852529f56eSJonathan T. Looney if (tp->t_lib != NULL || tp->t_lin != NULL) 7862529f56eSJonathan T. Looney goto restart; 7872529f56eSJonathan T. Looney } 7882529f56eSJonathan T. Looney 7892529f56eSJonathan T. Looney /* 7902529f56eSJonathan T. Looney * Yay! We successfully removed the TCPCB from its old 7912529f56eSJonathan T. Looney * bucket. Phew! 7922529f56eSJonathan T. Looney * 7932529f56eSJonathan T. Looney * On to bigger and better things... 7942529f56eSJonathan T. Looney */ 7952529f56eSJonathan T. Looney } 7962529f56eSJonathan T. Looney 7972529f56eSJonathan T. Looney /* At this point, the TCPCB should not be in any bucket. */ 7982529f56eSJonathan T. Looney KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__)); 7992529f56eSJonathan T. Looney 8002529f56eSJonathan T. Looney /* 8012529f56eSJonathan T. Looney * If the new ID is not empty, we need to now assign this TCPCB to a 8022529f56eSJonathan T. Looney * new bucket. 8032529f56eSJonathan T. Looney */ 8042529f56eSJonathan T. Looney if (*id) { 8052529f56eSJonathan T. Looney /* Get a new tln, if we don't already have one to reuse. */ 8062529f56eSJonathan T. Looney if (tln == NULL) { 8078c47d8f5SAlan Somers tln = uma_zalloc(tcp_log_id_node_zone, 8088c47d8f5SAlan Somers M_NOWAIT | M_ZERO); 8092529f56eSJonathan T. Looney if (tln == NULL) { 8102529f56eSJonathan T. Looney rv = ENOBUFS; 8112529f56eSJonathan T. Looney goto done; 8122529f56eSJonathan T. Looney } 8132529f56eSJonathan T. Looney tln->tln_inp = inp; 8142529f56eSJonathan T. Looney tln->tln_tp = tp; 8152529f56eSJonathan T. Looney } 8162529f56eSJonathan T. Looney 8172529f56eSJonathan T. Looney /* 8182529f56eSJonathan T. Looney * Drop the INP lock for a bit. We don't need it, and dropping 8192529f56eSJonathan T. Looney * it prevents lock order reversals. 8202529f56eSJonathan T. Looney */ 8212529f56eSJonathan T. Looney INP_WUNLOCK(inp); 8222529f56eSJonathan T. Looney 8232529f56eSJonathan T. Looney /* Make sure we have at least a read lock on the tree. */ 8242529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(tree_locked); 8252529f56eSJonathan T. Looney if (tree_locked == TREE_UNLOCKED) { 8262529f56eSJonathan T. Looney TCPID_TREE_RLOCK(); 8272529f56eSJonathan T. Looney tree_locked = TREE_RLOCKED; 8282529f56eSJonathan T. Looney } 8292529f56eSJonathan T. Looney 8302529f56eSJonathan T. Looney refind: 8312529f56eSJonathan T. Looney /* 8322529f56eSJonathan T. Looney * Remember that we constructed (struct tcp_log_id_node) so 8332529f56eSJonathan T. Looney * we can safely cast the id to it for the purposes of finding. 8342529f56eSJonathan T. Looney */ 8352529f56eSJonathan T. Looney KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL", 8362529f56eSJonathan T. Looney __func__, __LINE__)); 8372529f56eSJonathan T. Looney tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head, 8382529f56eSJonathan T. Looney (struct tcp_log_id_bucket *) id); 8392529f56eSJonathan T. Looney 8402529f56eSJonathan T. Looney /* 8412529f56eSJonathan T. Looney * If we didn't find a matching bucket, we need to add a new 8422529f56eSJonathan T. Looney * one. This requires a write lock. But, of course, we will 8432529f56eSJonathan T. Looney * need to recheck some things when we re-acquire the lock. 8442529f56eSJonathan T. Looney */ 8452529f56eSJonathan T. Looney if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) { 8462529f56eSJonathan T. Looney tree_locked = TREE_WLOCKED; 8472529f56eSJonathan T. Looney if (!TCPID_TREE_UPGRADE()) { 8482529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 8492529f56eSJonathan T. Looney TCPID_TREE_WLOCK(); 8502529f56eSJonathan T. Looney 8512529f56eSJonathan T. Looney /* 8522529f56eSJonathan T. Looney * The tree may have changed while we were 8532529f56eSJonathan T. Looney * unlocked. 8542529f56eSJonathan T. Looney */ 8552529f56eSJonathan T. Looney goto refind; 8562529f56eSJonathan T. Looney } 8572529f56eSJonathan T. Looney } 8582529f56eSJonathan T. Looney 8592529f56eSJonathan T. Looney /* If we need to add a new bucket, do it now. */ 8602529f56eSJonathan T. Looney if (tmp_tlb == NULL) { 8612529f56eSJonathan T. Looney /* Allocate new bucket. */ 8628c47d8f5SAlan Somers tlb = uma_zalloc(tcp_log_id_bucket_zone, M_NOWAIT); 8632529f56eSJonathan T. Looney if (tlb == NULL) { 8642529f56eSJonathan T. Looney rv = ENOBUFS; 8652529f56eSJonathan T. Looney goto done_noinp; 8662529f56eSJonathan T. Looney } 867a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_cur, 1); 868a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_tot, 1); 869a9a08eceSRandall Stewart 870a9a08eceSRandall Stewart if ((tcp_log_auto_all == false) && 871a9a08eceSRandall Stewart tcp_log_auto_mode && 872a9a08eceSRandall Stewart tcp_log_selectauto()) { 873a9a08eceSRandall Stewart /* Save off the log state */ 874a9a08eceSRandall Stewart tlb->tlb_logstate = tcp_log_auto_mode; 875a9a08eceSRandall Stewart } else 876a9a08eceSRandall Stewart tlb->tlb_logstate = TCP_LOG_STATE_OFF; 877a9a08eceSRandall Stewart tlb->tlb_loglimit = 0; 878a9a08eceSRandall Stewart tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */ 8792529f56eSJonathan T. Looney 8802529f56eSJonathan T. Looney /* 8812529f56eSJonathan T. Looney * Copy the ID to the bucket. 8822529f56eSJonathan T. Looney * NB: Don't use strlcpy() unless you are sure 8832529f56eSJonathan T. Looney * we've always validated NULL termination. 8842529f56eSJonathan T. Looney * 8852529f56eSJonathan T. Looney * TODO: When I'm done writing this, see if we 8862529f56eSJonathan T. Looney * we have correctly validated NULL termination and 8872529f56eSJonathan T. Looney * can use strlcpy(). :-) 8882529f56eSJonathan T. Looney */ 8892529f56eSJonathan T. Looney strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1); 8902529f56eSJonathan T. Looney tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0'; 8912529f56eSJonathan T. Looney 8922529f56eSJonathan T. Looney /* 8932529f56eSJonathan T. Looney * Take the refcount for the first node and go ahead 8942529f56eSJonathan T. Looney * and lock this. Note that we zero the tlb_mtx 8952529f56eSJonathan T. Looney * structure, since 0xdeadc0de flips the right bits 8962529f56eSJonathan T. Looney * for the code to think that this mutex has already 8972529f56eSJonathan T. Looney * been initialized. :-( 8982529f56eSJonathan T. Looney */ 8992529f56eSJonathan T. Looney SLIST_INIT(&tlb->tlb_head); 9002529f56eSJonathan T. Looney refcount_init(&tlb->tlb_refcnt, 1); 901a9a08eceSRandall Stewart tlb->tlb_reqcnt = 1; 9022529f56eSJonathan T. Looney memset(&tlb->tlb_mtx, 0, sizeof(struct mtx)); 9032529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_INIT(tlb); 9042529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 9052529f56eSJonathan T. Looney bucket_locked = true; 9062529f56eSJonathan T. Looney 9072529f56eSJonathan T. Looney #define FREE_NEW_TLB() do { \ 9082529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_DESTROY(tlb); \ 9098c47d8f5SAlan Somers uma_zfree(tcp_log_id_bucket_zone, tlb); \ 910a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \ 911a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \ 9122529f56eSJonathan T. Looney bucket_locked = false; \ 9132529f56eSJonathan T. Looney tlb = NULL; \ 9142529f56eSJonathan T. Looney } while (0) 9152529f56eSJonathan T. Looney /* 9162529f56eSJonathan T. Looney * Relock the INP and make sure we are still 9172529f56eSJonathan T. Looney * unassigned. 9182529f56eSJonathan T. Looney */ 9192529f56eSJonathan T. Looney INP_WLOCK(inp); 9202529f56eSJonathan T. Looney RECHECK_INP_CLEAN(FREE_NEW_TLB()); 9212529f56eSJonathan T. Looney if (tp->t_lib != NULL) { 9222529f56eSJonathan T. Looney FREE_NEW_TLB(); 9232529f56eSJonathan T. Looney goto restart; 9242529f56eSJonathan T. Looney } 9252529f56eSJonathan T. Looney 9262529f56eSJonathan T. Looney /* Add the new bucket to the tree. */ 9272529f56eSJonathan T. Looney tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head, 9282529f56eSJonathan T. Looney tlb); 9292529f56eSJonathan T. Looney KASSERT(tmp_tlb == NULL, 9302529f56eSJonathan T. Looney ("%s: Unexpected conflicting bucket (%p) while " 9312529f56eSJonathan T. Looney "adding new bucket (%p)", __func__, tmp_tlb, tlb)); 9322529f56eSJonathan T. Looney 9332529f56eSJonathan T. Looney /* 9342529f56eSJonathan T. Looney * If we found a conflicting bucket, free the new 9352529f56eSJonathan T. Looney * one we made and fall through to use the existing 9362529f56eSJonathan T. Looney * bucket. 9372529f56eSJonathan T. Looney */ 9382529f56eSJonathan T. Looney if (tmp_tlb != NULL) { 9392529f56eSJonathan T. Looney FREE_NEW_TLB(); 9402529f56eSJonathan T. Looney INP_WUNLOCK(inp); 9412529f56eSJonathan T. Looney } 9422529f56eSJonathan T. Looney #undef FREE_NEW_TLB 9432529f56eSJonathan T. Looney } 9442529f56eSJonathan T. Looney 9452529f56eSJonathan T. Looney /* If we found an existing bucket, use it. */ 9462529f56eSJonathan T. Looney if (tmp_tlb != NULL) { 9472529f56eSJonathan T. Looney tlb = tmp_tlb; 9482529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 9492529f56eSJonathan T. Looney bucket_locked = true; 9502529f56eSJonathan T. Looney 9512529f56eSJonathan T. Looney /* 9522529f56eSJonathan T. Looney * Relock the INP and make sure we are still 9532529f56eSJonathan T. Looney * unassigned. 9542529f56eSJonathan T. Looney */ 9552529f56eSJonathan T. Looney INP_UNLOCK_ASSERT(inp); 9562529f56eSJonathan T. Looney INP_WLOCK(inp); 9572529f56eSJonathan T. Looney RECHECK_INP(); 9582529f56eSJonathan T. Looney if (tp->t_lib != NULL) { 9592529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 96080219286SRandall Stewart bucket_locked = false; 9612529f56eSJonathan T. Looney tlb = NULL; 9622529f56eSJonathan T. Looney goto restart; 9632529f56eSJonathan T. Looney } 9642529f56eSJonathan T. Looney 9652529f56eSJonathan T. Looney /* Take a reference on the bucket. */ 9662529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb); 967a9a08eceSRandall Stewart 968a9a08eceSRandall Stewart /* Record the request. */ 969a9a08eceSRandall Stewart tcp_log_increment_reqcnt(tlb); 9702529f56eSJonathan T. Looney } 9712529f56eSJonathan T. Looney 9722529f56eSJonathan T. Looney tcp_log_grow_tlb(tlb->tlb_id, tp); 9732529f56eSJonathan T. Looney 9742529f56eSJonathan T. Looney /* Add the new node to the list. */ 9752529f56eSJonathan T. Looney SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list); 9762529f56eSJonathan T. Looney tp->t_lib = tlb; 9772529f56eSJonathan T. Looney tp->t_lin = tln; 97869c7c811SRandall Stewart if (tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) { 979a9a08eceSRandall Stewart /* Clone in any logging */ 980a9a08eceSRandall Stewart 98169c7c811SRandall Stewart tp->_t_logstate = tp->t_lib->tlb_logstate; 982a9a08eceSRandall Stewart } 983a9a08eceSRandall Stewart if (tp->t_lib->tlb_loglimit) { 984a9a08eceSRandall Stewart /* The loglimit too */ 985a9a08eceSRandall Stewart 986a9a08eceSRandall Stewart tp->t_loglimit = tp->t_lib->tlb_loglimit; 987a9a08eceSRandall Stewart } 9882529f56eSJonathan T. Looney tln = NULL; 9892529f56eSJonathan T. Looney } 9902529f56eSJonathan T. Looney 9912529f56eSJonathan T. Looney rv = 0; 9922529f56eSJonathan T. Looney 9932529f56eSJonathan T. Looney done: 9942529f56eSJonathan T. Looney /* Unlock things, as needed, and return. */ 9952529f56eSJonathan T. Looney INP_WUNLOCK(inp); 9962529f56eSJonathan T. Looney done_noinp: 9972529f56eSJonathan T. Looney INP_UNLOCK_ASSERT(inp); 9982529f56eSJonathan T. Looney if (bucket_locked) { 9992529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb); 10002529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 10012529f56eSJonathan T. Looney } else if (tlb != NULL) 10022529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK_ASSERT(tlb); 10032529f56eSJonathan T. Looney if (tree_locked == TREE_WLOCKED) { 10042529f56eSJonathan T. Looney TCPID_TREE_WLOCK_ASSERT(); 10052529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK(); 10062529f56eSJonathan T. Looney } else if (tree_locked == TREE_RLOCKED) { 10072529f56eSJonathan T. Looney TCPID_TREE_RLOCK_ASSERT(); 10082529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 10092529f56eSJonathan T. Looney } else 10102529f56eSJonathan T. Looney TCPID_TREE_UNLOCK_ASSERT(); 10112529f56eSJonathan T. Looney if (tln != NULL) 10128c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, tln); 10132529f56eSJonathan T. Looney return (rv); 10142529f56eSJonathan T. Looney } 10152529f56eSJonathan T. Looney 10162529f56eSJonathan T. Looney /* 10172529f56eSJonathan T. Looney * Get the TCP log ID for a TCPCB. 10182529f56eSJonathan T. Looney * Called with INPCB locked. 10192529f56eSJonathan T. Looney * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long. 10202529f56eSJonathan T. Looney * Returns number of bytes copied. 10212529f56eSJonathan T. Looney */ 10222529f56eSJonathan T. Looney size_t 10232529f56eSJonathan T. Looney tcp_log_get_id(struct tcpcb *tp, char *buf) 10242529f56eSJonathan T. Looney { 10252529f56eSJonathan T. Looney size_t len; 10262529f56eSJonathan T. Looney 10279eb0e832SGleb Smirnoff INP_LOCK_ASSERT(tptoinpcb(tp)); 10282529f56eSJonathan T. Looney if (tp->t_lib != NULL) { 10292529f56eSJonathan T. Looney len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 10302529f56eSJonathan T. Looney KASSERT(len < TCP_LOG_ID_LEN, 10312529f56eSJonathan T. Looney ("%s:%d: tp->t_lib->tlb_id too long (%zu)", 10322529f56eSJonathan T. Looney __func__, __LINE__, len)); 10332529f56eSJonathan T. Looney } else { 10342529f56eSJonathan T. Looney *buf = '\0'; 10352529f56eSJonathan T. Looney len = 0; 10362529f56eSJonathan T. Looney } 10372529f56eSJonathan T. Looney return (len); 10382529f56eSJonathan T. Looney } 10392529f56eSJonathan T. Looney 10402529f56eSJonathan T. Looney /* 1041a9a08eceSRandall Stewart * Get the tag associated with the TCPCB's log ID. 1042a9a08eceSRandall Stewart * Called with INPCB locked. Returns with it unlocked. 1043a9a08eceSRandall Stewart * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long. 1044a9a08eceSRandall Stewart * Returns number of bytes copied. 1045a9a08eceSRandall Stewart */ 1046a9a08eceSRandall Stewart size_t 1047a9a08eceSRandall Stewart tcp_log_get_tag(struct tcpcb *tp, char *buf) 1048a9a08eceSRandall Stewart { 10499eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 1050a9a08eceSRandall Stewart struct tcp_log_id_bucket *tlb; 1051a9a08eceSRandall Stewart size_t len; 1052a9a08eceSRandall Stewart int tree_locked; 1053a9a08eceSRandall Stewart 10549eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp); 1055a9a08eceSRandall Stewart 1056a9a08eceSRandall Stewart tree_locked = TREE_UNLOCKED; 1057a9a08eceSRandall Stewart tlb = tp->t_lib; 1058a9a08eceSRandall Stewart 1059a9a08eceSRandall Stewart if (tlb != NULL) { 1060a9a08eceSRandall Stewart TCPID_BUCKET_REF(tlb); 10619eb0e832SGleb Smirnoff INP_WUNLOCK(inp); 1062a9a08eceSRandall Stewart TCPID_BUCKET_LOCK(tlb); 1063a9a08eceSRandall Stewart len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN); 1064a9a08eceSRandall Stewart KASSERT(len < TCP_LOG_TAG_LEN, 1065a9a08eceSRandall Stewart ("%s:%d: tp->t_lib->tlb_tag too long (%zu)", 1066a9a08eceSRandall Stewart __func__, __LINE__, len)); 1067a9a08eceSRandall Stewart if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 1068a9a08eceSRandall Stewart TCPID_BUCKET_UNLOCK(tlb); 1069a9a08eceSRandall Stewart 1070a9a08eceSRandall Stewart if (tree_locked == TREE_WLOCKED) { 1071a9a08eceSRandall Stewart TCPID_TREE_WLOCK_ASSERT(); 1072a9a08eceSRandall Stewart TCPID_TREE_WUNLOCK(); 1073a9a08eceSRandall Stewart } else if (tree_locked == TREE_RLOCKED) { 1074a9a08eceSRandall Stewart TCPID_TREE_RLOCK_ASSERT(); 1075a9a08eceSRandall Stewart TCPID_TREE_RUNLOCK(); 1076a9a08eceSRandall Stewart } else 1077a9a08eceSRandall Stewart TCPID_TREE_UNLOCK_ASSERT(); 1078a9a08eceSRandall Stewart } else { 10799eb0e832SGleb Smirnoff INP_WUNLOCK(inp); 1080a9a08eceSRandall Stewart *buf = '\0'; 1081a9a08eceSRandall Stewart len = 0; 1082a9a08eceSRandall Stewart } 1083a9a08eceSRandall Stewart 1084a9a08eceSRandall Stewart return (len); 1085a9a08eceSRandall Stewart } 1086a9a08eceSRandall Stewart 1087a9a08eceSRandall Stewart /* 10882529f56eSJonathan T. Looney * Get number of connections with the same log ID. 10892529f56eSJonathan T. Looney * Log ID is taken from given TCPCB. 10902529f56eSJonathan T. Looney * Called with INPCB locked. 10912529f56eSJonathan T. Looney */ 10922529f56eSJonathan T. Looney u_int 10932529f56eSJonathan T. Looney tcp_log_get_id_cnt(struct tcpcb *tp) 10942529f56eSJonathan T. Looney { 10952529f56eSJonathan T. Looney 10969eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 10972529f56eSJonathan T. Looney return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt); 10982529f56eSJonathan T. Looney } 10992529f56eSJonathan T. Looney 11002529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF 11012529f56eSJonathan T. Looney /* 11022529f56eSJonathan T. Looney * Functions/macros to increment/decrement reference count for a log 11032529f56eSJonathan T. Looney * entry. This should catch when we do a double-free/double-remove or 11042529f56eSJonathan T. Looney * a double-add. 11052529f56eSJonathan T. Looney */ 11062529f56eSJonathan T. Looney static inline void 11072529f56eSJonathan T. Looney _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func, 11082529f56eSJonathan T. Looney int line) 11092529f56eSJonathan T. Looney { 11102529f56eSJonathan T. Looney int refcnt; 11112529f56eSJonathan T. Looney 11122529f56eSJonathan T. Looney refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1); 11132529f56eSJonathan T. Looney if (refcnt != 0) 11142529f56eSJonathan T. Looney panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)", 11152529f56eSJonathan T. Looney func, line, log_entry, refcnt); 11162529f56eSJonathan T. Looney } 11172529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_add(l) \ 11182529f56eSJonathan T. Looney _tcp_log_entry_refcnt_add((l), __func__, __LINE__) 11192529f56eSJonathan T. Looney 11202529f56eSJonathan T. Looney static inline void 11212529f56eSJonathan T. Looney _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func, 11222529f56eSJonathan T. Looney int line) 11232529f56eSJonathan T. Looney { 11242529f56eSJonathan T. Looney int refcnt; 11252529f56eSJonathan T. Looney 11262529f56eSJonathan T. Looney refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1); 11272529f56eSJonathan T. Looney if (refcnt != 1) 11282529f56eSJonathan T. Looney panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)", 11292529f56eSJonathan T. Looney func, line, log_entry, refcnt); 11302529f56eSJonathan T. Looney } 11312529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_rem(l) \ 11322529f56eSJonathan T. Looney _tcp_log_entry_refcnt_rem((l), __func__, __LINE__) 11332529f56eSJonathan T. Looney 11342529f56eSJonathan T. Looney #else /* !TCPLOG_DEBUG_RINGBUF */ 11352529f56eSJonathan T. Looney 11362529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_add(l) 11372529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_rem(l) 11382529f56eSJonathan T. Looney 11392529f56eSJonathan T. Looney #endif 11402529f56eSJonathan T. Looney 11412529f56eSJonathan T. Looney /* 11422529f56eSJonathan T. Looney * Cleanup after removing a log entry, but only decrement the count if we 11432529f56eSJonathan T. Looney * are running INVARIANTS. 11442529f56eSJonathan T. Looney */ 11452529f56eSJonathan T. Looney static inline void 11462529f56eSJonathan T. Looney tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused) 11472529f56eSJonathan T. Looney { 11482529f56eSJonathan T. Looney 11492529f56eSJonathan T. Looney uma_zfree(tcp_log_zone, log_entry); 11502529f56eSJonathan T. Looney #ifdef INVARIANTS 11512529f56eSJonathan T. Looney (*count)--; 11522529f56eSJonathan T. Looney KASSERT(*count >= 0, 11532529f56eSJonathan T. Looney ("%s: count unexpectedly negative", __func__)); 11542529f56eSJonathan T. Looney #endif 11552529f56eSJonathan T. Looney } 11562529f56eSJonathan T. Looney 11572529f56eSJonathan T. Looney static void 11582529f56eSJonathan T. Looney tcp_log_free_entries(struct tcp_log_stailq *head, int *count) 11592529f56eSJonathan T. Looney { 11602529f56eSJonathan T. Looney struct tcp_log_mem *log_entry; 11612529f56eSJonathan T. Looney 11622529f56eSJonathan T. Looney /* Free the entries. */ 11632529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(head)) != NULL) { 11642529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(head, tlm_queue); 11652529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry); 11662529f56eSJonathan T. Looney tcp_log_free_log_common(log_entry, count); 11672529f56eSJonathan T. Looney } 11682529f56eSJonathan T. Looney } 11692529f56eSJonathan T. Looney 11702529f56eSJonathan T. Looney /* Cleanup after removing a log entry. */ 11712529f56eSJonathan T. Looney static inline void 11722529f56eSJonathan T. Looney tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry) 11732529f56eSJonathan T. Looney { 11742529f56eSJonathan T. Looney uma_zfree(tcp_log_zone, log_entry); 11752529f56eSJonathan T. Looney tp->t_lognum--; 11762529f56eSJonathan T. Looney KASSERT(tp->t_lognum >= 0, 11772529f56eSJonathan T. Looney ("%s: tp->t_lognum unexpectedly negative", __func__)); 11782529f56eSJonathan T. Looney } 11792529f56eSJonathan T. Looney 11802529f56eSJonathan T. Looney /* Remove a log entry from the head of a list. */ 11812529f56eSJonathan T. Looney static inline void 11822529f56eSJonathan T. Looney tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry) 11832529f56eSJonathan T. Looney { 11842529f56eSJonathan T. Looney 11852529f56eSJonathan T. Looney KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs), 11862529f56eSJonathan T. Looney ("%s: attempt to remove non-HEAD log entry", __func__)); 11872529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 11882529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry); 11892529f56eSJonathan T. Looney tcp_log_remove_log_cleanup(tp, log_entry); 11902529f56eSJonathan T. Looney } 11912529f56eSJonathan T. Looney 11922529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF 11932529f56eSJonathan T. Looney /* 11942529f56eSJonathan T. Looney * Initialize the log entry's reference count, which we want to 11952529f56eSJonathan T. Looney * survive allocations. 11962529f56eSJonathan T. Looney */ 11972529f56eSJonathan T. Looney static int 11982529f56eSJonathan T. Looney tcp_log_zone_init(void *mem, int size, int flags __unused) 11992529f56eSJonathan T. Looney { 12002529f56eSJonathan T. Looney struct tcp_log_mem *tlm; 12012529f56eSJonathan T. Looney 12022529f56eSJonathan T. Looney KASSERT(size >= sizeof(struct tcp_log_mem), 12032529f56eSJonathan T. Looney ("%s: unexpectedly short (%d) allocation", __func__, size)); 12042529f56eSJonathan T. Looney tlm = (struct tcp_log_mem *)mem; 12052529f56eSJonathan T. Looney tlm->tlm_refcnt = 0; 12062529f56eSJonathan T. Looney return (0); 12072529f56eSJonathan T. Looney } 12082529f56eSJonathan T. Looney 12092529f56eSJonathan T. Looney /* 12102529f56eSJonathan T. Looney * Double check that the refcnt is zero on allocation and return. 12112529f56eSJonathan T. Looney */ 12122529f56eSJonathan T. Looney static int 12132529f56eSJonathan T. Looney tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused) 12142529f56eSJonathan T. Looney { 12152529f56eSJonathan T. Looney struct tcp_log_mem *tlm; 12162529f56eSJonathan T. Looney 12172529f56eSJonathan T. Looney KASSERT(size >= sizeof(struct tcp_log_mem), 12182529f56eSJonathan T. Looney ("%s: unexpectedly short (%d) allocation", __func__, size)); 12192529f56eSJonathan T. Looney tlm = (struct tcp_log_mem *)mem; 12202529f56eSJonathan T. Looney if (tlm->tlm_refcnt != 0) 12212529f56eSJonathan T. Looney panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 12222529f56eSJonathan T. Looney __func__, __LINE__, tlm, tlm->tlm_refcnt); 12232529f56eSJonathan T. Looney return (0); 12242529f56eSJonathan T. Looney } 12252529f56eSJonathan T. Looney 12262529f56eSJonathan T. Looney static void 12272529f56eSJonathan T. Looney tcp_log_zone_dtor(void *mem, int size, void *args __unused) 12282529f56eSJonathan T. Looney { 12292529f56eSJonathan T. Looney struct tcp_log_mem *tlm; 12302529f56eSJonathan T. Looney 12312529f56eSJonathan T. Looney KASSERT(size >= sizeof(struct tcp_log_mem), 12322529f56eSJonathan T. Looney ("%s: unexpectedly short (%d) allocation", __func__, size)); 12332529f56eSJonathan T. Looney tlm = (struct tcp_log_mem *)mem; 12342529f56eSJonathan T. Looney if (tlm->tlm_refcnt != 0) 12352529f56eSJonathan T. Looney panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 12362529f56eSJonathan T. Looney __func__, __LINE__, tlm, tlm->tlm_refcnt); 12372529f56eSJonathan T. Looney } 12382529f56eSJonathan T. Looney #endif /* TCPLOG_DEBUG_RINGBUF */ 12392529f56eSJonathan T. Looney 12402529f56eSJonathan T. Looney /* Do global initialization. */ 12412529f56eSJonathan T. Looney void 12422529f56eSJonathan T. Looney tcp_log_init(void) 12432529f56eSJonathan T. Looney { 12442529f56eSJonathan T. Looney 12452529f56eSJonathan T. Looney tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem), 12462529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF 12472529f56eSJonathan T. Looney tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init, 12482529f56eSJonathan T. Looney #else 12492529f56eSJonathan T. Looney NULL, NULL, NULL, 12502529f56eSJonathan T. Looney #endif 12512529f56eSJonathan T. Looney NULL, UMA_ALIGN_PTR, 0); 12522529f56eSJonathan T. Looney (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT); 12538c47d8f5SAlan Somers tcp_log_id_bucket_zone = uma_zcreate("tcp_log_id_bucket", 12542529f56eSJonathan T. Looney sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL, 12552529f56eSJonathan T. Looney UMA_ALIGN_PTR, 0); 12568c47d8f5SAlan Somers tcp_log_id_node_zone = uma_zcreate("tcp_log_id_node", 12572529f56eSJonathan T. Looney sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL, 12582529f56eSJonathan T. Looney UMA_ALIGN_PTR, 0); 12592529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 12602529f56eSJonathan T. Looney tcp_log_queued = counter_u64_alloc(M_WAITOK); 12612529f56eSJonathan T. Looney tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK); 12622529f56eSJonathan T. Looney tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK); 12632529f56eSJonathan T. Looney tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK); 12642529f56eSJonathan T. Looney tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK); 12652529f56eSJonathan T. Looney tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK); 12662529f56eSJonathan T. Looney tcp_log_que_copyout = counter_u64_alloc(M_WAITOK); 12672529f56eSJonathan T. Looney tcp_log_que_read = counter_u64_alloc(M_WAITOK); 12682529f56eSJonathan T. Looney tcp_log_que_freed = counter_u64_alloc(M_WAITOK); 12692529f56eSJonathan T. Looney #endif 1270a9a08eceSRandall Stewart tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK); 1271a9a08eceSRandall Stewart tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK); 12722529f56eSJonathan T. Looney 12732529f56eSJonathan T. Looney rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW); 12742529f56eSJonathan T. Looney mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF); 12752529f56eSJonathan T. Looney callout_init(&tcp_log_expireq_callout, 1); 12762529f56eSJonathan T. Looney } 12772529f56eSJonathan T. Looney 12782529f56eSJonathan T. Looney /* Do per-TCPCB initialization. */ 12792529f56eSJonathan T. Looney void 12802529f56eSJonathan T. Looney tcp_log_tcpcbinit(struct tcpcb *tp) 12812529f56eSJonathan T. Looney { 12822529f56eSJonathan T. Looney 12832529f56eSJonathan T. Looney /* A new TCPCB should start out zero-initialized. */ 12842529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs); 12852529f56eSJonathan T. Looney 12862529f56eSJonathan T. Looney /* 12872529f56eSJonathan T. Looney * If we are doing auto-capturing, figure out whether we will capture 12882529f56eSJonathan T. Looney * this session. 12892529f56eSJonathan T. Looney */ 1290a9a08eceSRandall Stewart tp->t_loglimit = tcp_log_session_limit; 1291a9a08eceSRandall Stewart if ((tcp_log_auto_all == true) && 1292a9a08eceSRandall Stewart tcp_log_auto_mode && 1293a9a08eceSRandall Stewart tcp_log_selectauto()) { 129469c7c811SRandall Stewart tp->_t_logstate = tcp_log_auto_mode; 12952529f56eSJonathan T. Looney tp->t_flags2 |= TF2_LOG_AUTO; 12962529f56eSJonathan T. Looney } 12972529f56eSJonathan T. Looney } 12982529f56eSJonathan T. Looney 12992529f56eSJonathan T. Looney /* Remove entries */ 13002529f56eSJonathan T. Looney static void 13012529f56eSJonathan T. Looney tcp_log_expire(void *unused __unused) 13022529f56eSJonathan T. Looney { 13032529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb; 13042529f56eSJonathan T. Looney struct tcp_log_id_node *tln; 13052529f56eSJonathan T. Looney sbintime_t expiry_limit; 13062529f56eSJonathan T. Looney int tree_locked; 13072529f56eSJonathan T. Looney 13082529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK(); 13092529f56eSJonathan T. Looney if (callout_pending(&tcp_log_expireq_callout)) { 13102529f56eSJonathan T. Looney /* Callout was reset. */ 13112529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 13122529f56eSJonathan T. Looney return; 13132529f56eSJonathan T. Looney } 13142529f56eSJonathan T. Looney 13152529f56eSJonathan T. Looney /* 13162529f56eSJonathan T. Looney * Process entries until we reach one that expires too far in the 13172529f56eSJonathan T. Looney * future. Look one second in the future. 13182529f56eSJonathan T. Looney */ 13192529f56eSJonathan T. Looney expiry_limit = getsbinuptime() + SBT_1S; 13202529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 13212529f56eSJonathan T. Looney 13222529f56eSJonathan T. Looney while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL && 13232529f56eSJonathan T. Looney tln->tln_expiretime <= expiry_limit) { 13242529f56eSJonathan T. Looney if (!callout_active(&tcp_log_expireq_callout)) { 13252529f56eSJonathan T. Looney /* 13262529f56eSJonathan T. Looney * Callout was stopped. I guess we should 13272529f56eSJonathan T. Looney * just quit at this point. 13282529f56eSJonathan T. Looney */ 13292529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 13302529f56eSJonathan T. Looney return; 13312529f56eSJonathan T. Looney } 13322529f56eSJonathan T. Looney 13332529f56eSJonathan T. Looney /* 13342529f56eSJonathan T. Looney * Remove the node from the head of the list and unlock 13352529f56eSJonathan T. Looney * the list. Change the expiry time to SBT_MAX as a signal 13362529f56eSJonathan T. Looney * to other threads that we now own this. 13372529f56eSJonathan T. Looney */ 13382529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq); 13392529f56eSJonathan T. Looney tln->tln_expiretime = SBT_MAX; 13402529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 13412529f56eSJonathan T. Looney 13422529f56eSJonathan T. Looney /* 13432529f56eSJonathan T. Looney * Remove the node from the bucket. 13442529f56eSJonathan T. Looney */ 13452529f56eSJonathan T. Looney tlb = tln->tln_bucket; 13462529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 13472529f56eSJonathan T. Looney if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) { 13482529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(tree_locked); 13492529f56eSJonathan T. Looney if (tree_locked == TREE_WLOCKED) 13502529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK(); 13512529f56eSJonathan T. Looney else 13522529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 13532529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 13542529f56eSJonathan T. Looney } 13552529f56eSJonathan T. Looney 13562529f56eSJonathan T. Looney /* Drop the INP reference. */ 13572529f56eSJonathan T. Looney INP_WLOCK(tln->tln_inp); 13582529f56eSJonathan T. Looney if (!in_pcbrele_wlocked(tln->tln_inp)) 13592529f56eSJonathan T. Looney INP_WUNLOCK(tln->tln_inp); 13602529f56eSJonathan T. Looney 13612529f56eSJonathan T. Looney /* Free the log records. */ 13622529f56eSJonathan T. Looney tcp_log_free_entries(&tln->tln_entries, &tln->tln_count); 13632529f56eSJonathan T. Looney 13642529f56eSJonathan T. Looney /* Free the node. */ 13658c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, tln); 13662529f56eSJonathan T. Looney 13672529f56eSJonathan T. Looney /* Relock the expiry queue. */ 13682529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK(); 13692529f56eSJonathan T. Looney } 13702529f56eSJonathan T. Looney 13712529f56eSJonathan T. Looney /* 13722529f56eSJonathan T. Looney * We've expired all the entries we can. Do we need to reschedule 13732529f56eSJonathan T. Looney * ourselves? 13742529f56eSJonathan T. Looney */ 13752529f56eSJonathan T. Looney callout_deactivate(&tcp_log_expireq_callout); 13762529f56eSJonathan T. Looney if (tln != NULL) { 13772529f56eSJonathan T. Looney /* 13782529f56eSJonathan T. Looney * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and 13792529f56eSJonathan T. Looney * set the next callout to that. (This helps ensure we generally 13802529f56eSJonathan T. Looney * run the callout no more often than desired.) 13812529f56eSJonathan T. Looney */ 13822529f56eSJonathan T. Looney expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL; 13832529f56eSJonathan T. Looney if (expiry_limit < tln->tln_expiretime) 13842529f56eSJonathan T. Looney expiry_limit = tln->tln_expiretime; 13852529f56eSJonathan T. Looney callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit, 13862529f56eSJonathan T. Looney SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE); 13872529f56eSJonathan T. Looney } 13882529f56eSJonathan T. Looney 13892529f56eSJonathan T. Looney /* We're done. */ 13902529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 13912529f56eSJonathan T. Looney return; 13922529f56eSJonathan T. Looney } 13932529f56eSJonathan T. Looney 13942529f56eSJonathan T. Looney /* 13952529f56eSJonathan T. Looney * Move log data from the TCPCB to a new node. This will reset the TCPCB log 13962529f56eSJonathan T. Looney * entries and log count; however, it will not touch other things from the 13972529f56eSJonathan T. Looney * TCPCB (e.g. t_lin, t_lib). 13982529f56eSJonathan T. Looney * 13992529f56eSJonathan T. Looney * NOTE: Must hold a lock on the INP. 14002529f56eSJonathan T. Looney */ 14012529f56eSJonathan T. Looney static void 14022529f56eSJonathan T. Looney tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln) 14032529f56eSJonathan T. Looney { 14049eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 14052529f56eSJonathan T. Looney 14069eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp); 14072529f56eSJonathan T. Looney 14089eb0e832SGleb Smirnoff tln->tln_ie = inp->inp_inc.inc_ie; 14099eb0e832SGleb Smirnoff if (inp->inp_inc.inc_flags & INC_ISIPV6) 14102529f56eSJonathan T. Looney tln->tln_af = AF_INET6; 14112529f56eSJonathan T. Looney else 14122529f56eSJonathan T. Looney tln->tln_af = AF_INET; 14132529f56eSJonathan T. Looney tln->tln_entries = tp->t_logs; 14142529f56eSJonathan T. Looney tln->tln_count = tp->t_lognum; 14152529f56eSJonathan T. Looney tln->tln_bucket = tp->t_lib; 14162529f56eSJonathan T. Looney 14172529f56eSJonathan T. Looney /* Clear information from the PCB. */ 14182529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs); 14192529f56eSJonathan T. Looney tp->t_lognum = 0; 14202529f56eSJonathan T. Looney } 14212529f56eSJonathan T. Looney 14222529f56eSJonathan T. Looney /* Do per-TCPCB cleanup */ 14232529f56eSJonathan T. Looney void 14242529f56eSJonathan T. Looney tcp_log_tcpcbfini(struct tcpcb *tp) 14252529f56eSJonathan T. Looney { 14262529f56eSJonathan T. Looney struct tcp_log_id_node *tln, *tln_first; 14272529f56eSJonathan T. Looney struct tcp_log_mem *log_entry; 14282529f56eSJonathan T. Looney sbintime_t callouttime; 14292529f56eSJonathan T. Looney 143069c7c811SRandall Stewart 14319eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 143269c7c811SRandall Stewart #ifdef TCP_ACCOUNTING 143369c7c811SRandall Stewart if (tp->_t_logstate) { 143469c7c811SRandall Stewart struct tcp_log_buffer *lgb; 143569c7c811SRandall Stewart union tcp_log_stackspecific log; 143669c7c811SRandall Stewart struct timeval tv; 143769c7c811SRandall Stewart int i; 14382529f56eSJonathan T. Looney 143969c7c811SRandall Stewart memset(&log, 0, sizeof(log)); 144069c7c811SRandall Stewart if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 144169c7c811SRandall Stewart for (i = 0; i<TCP_NUM_CNT_COUNTERS; i++) { 144269c7c811SRandall Stewart log.u_raw.u64_flex[i] = tp->tcp_cnt_counters[i]; 144369c7c811SRandall Stewart } 144469c7c811SRandall Stewart lgb = tcp_log_event(tp, NULL, 144569c7c811SRandall Stewart NULL, 144669c7c811SRandall Stewart NULL, 144769c7c811SRandall Stewart TCP_LOG_ACCOUNTING, 0, 144869c7c811SRandall Stewart 0, &log, false, NULL, NULL, 0, &tv); 144969c7c811SRandall Stewart lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS; 145069c7c811SRandall Stewart lgb->tlb_flex2 = 1; 145169c7c811SRandall Stewart for (i = 0; i<TCP_NUM_CNT_COUNTERS; i++) { 145269c7c811SRandall Stewart log.u_raw.u64_flex[i] = tp->tcp_proc_time[i]; 145369c7c811SRandall Stewart } 145469c7c811SRandall Stewart lgb = tcp_log_event(tp, NULL, 145569c7c811SRandall Stewart NULL, 145669c7c811SRandall Stewart NULL, 145769c7c811SRandall Stewart TCP_LOG_ACCOUNTING, 0, 145869c7c811SRandall Stewart 0, &log, false, NULL, NULL, 0, &tv); 145969c7c811SRandall Stewart if (tptoinpcb(tp)->inp_flags2 & INP_MBUF_ACKCMP) 146069c7c811SRandall Stewart lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS; 146169c7c811SRandall Stewart else 146269c7c811SRandall Stewart lgb->tlb_flex1 = TCP_NUM_PROC_COUNTERS; 146369c7c811SRandall Stewart lgb->tlb_flex2 = 2; 146469c7c811SRandall Stewart } 146569c7c811SRandall Stewart log.u_bbr.timeStamp = tcp_get_usecs(&tv); 146669c7c811SRandall Stewart log.u_bbr.cur_del_rate = tp->t_end_info; 146769c7c811SRandall Stewart TCP_LOG_EVENTP(tp, NULL, 146869c7c811SRandall Stewart NULL, 146969c7c811SRandall Stewart NULL, 147069c7c811SRandall Stewart TCP_LOG_CONNEND, 0, 147169c7c811SRandall Stewart 0, &log, false, &tv); 147269c7c811SRandall Stewart } 147369c7c811SRandall Stewart #endif 14742529f56eSJonathan T. Looney /* 14752529f56eSJonathan T. Looney * If we were gathering packets to be automatically dumped, try to do 14762529f56eSJonathan T. Looney * it now. If this succeeds, the log information in the TCPCB will be 14772529f56eSJonathan T. Looney * cleared. Otherwise, we'll handle the log information as we do 14782529f56eSJonathan T. Looney * for other states. 14792529f56eSJonathan T. Looney */ 148069c7c811SRandall Stewart switch(tp->_t_logstate) { 14812529f56eSJonathan T. Looney case TCP_LOG_STATE_HEAD_AUTO: 14822529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 14832529f56eSJonathan T. Looney M_NOWAIT, false); 14842529f56eSJonathan T. Looney break; 14852529f56eSJonathan T. Looney case TCP_LOG_STATE_TAIL_AUTO: 14862529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail", 14872529f56eSJonathan T. Looney M_NOWAIT, false); 14882529f56eSJonathan T. Looney break; 148969c7c811SRandall Stewart case TCP_LOG_VIA_BBPOINTS: 149069c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints", 149169c7c811SRandall Stewart M_NOWAIT, false); 149269c7c811SRandall Stewart break; 14932529f56eSJonathan T. Looney case TCP_LOG_STATE_CONTINUAL: 14942529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 14952529f56eSJonathan T. Looney M_NOWAIT, false); 14962529f56eSJonathan T. Looney break; 14972529f56eSJonathan T. Looney } 14982529f56eSJonathan T. Looney 14992529f56eSJonathan T. Looney /* 15002529f56eSJonathan T. Looney * There are two ways we could keep logs: per-socket or per-ID. If 15012529f56eSJonathan T. Looney * we are tracking logs with an ID, then the logs survive the 15022529f56eSJonathan T. Looney * destruction of the TCPCB. 15032529f56eSJonathan T. Looney * 15042529f56eSJonathan T. Looney * If the TCPCB is associated with an ID node, move the logs from the 15052529f56eSJonathan T. Looney * TCPCB to the ID node. In theory, this is safe, for reasons which I 15062529f56eSJonathan T. Looney * will now explain for my own benefit when I next need to figure out 15072529f56eSJonathan T. Looney * this code. :-) 15082529f56eSJonathan T. Looney * 15092529f56eSJonathan T. Looney * We own the INP lock. Therefore, no one else can change the contents 15102529f56eSJonathan T. Looney * of this node (Rule C). Further, no one can remove this node from 15112529f56eSJonathan T. Looney * the bucket while we hold the lock (Rule D). Basically, no one can 15122529f56eSJonathan T. Looney * mess with this node. That leaves two states in which we could be: 15132529f56eSJonathan T. Looney * 15142529f56eSJonathan T. Looney * 1. Another thread is currently waiting to acquire the INP lock, with 15152529f56eSJonathan T. Looney * plans to do something with this node. When we drop the INP lock, 15162529f56eSJonathan T. Looney * they will have a chance to do that. They will recheck the 15172529f56eSJonathan T. Looney * tln_closed field (see note to Rule C) and then acquire the 15182529f56eSJonathan T. Looney * bucket lock before proceeding further. 15192529f56eSJonathan T. Looney * 15202529f56eSJonathan T. Looney * 2. Another thread will try to acquire a lock at some point in the 15212529f56eSJonathan T. Looney * future. If they try to acquire a lock before we set the 15222529f56eSJonathan T. Looney * tln_closed field, they will follow state #1. If they try to 15232529f56eSJonathan T. Looney * acquire a lock after we set the tln_closed field, they will be 15242529f56eSJonathan T. Looney * able to make changes to the node, at will, following Rule C. 15252529f56eSJonathan T. Looney * 15262529f56eSJonathan T. Looney * Therefore, we currently own this node and can make any changes 15272529f56eSJonathan T. Looney * we want. But, as soon as we set the tln_closed field to true, we 15282529f56eSJonathan T. Looney * have effectively dropped our lock on the node. (For this reason, we 15292529f56eSJonathan T. Looney * also need to make sure our writes are ordered correctly. An atomic 15302529f56eSJonathan T. Looney * operation with "release" semantics should be sufficient.) 15312529f56eSJonathan T. Looney */ 15322529f56eSJonathan T. Looney 15332529f56eSJonathan T. Looney if (tp->t_lin != NULL) { 15349eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 15359eb0e832SGleb Smirnoff 15362529f56eSJonathan T. Looney /* Copy the relevant information to the log entry. */ 15372529f56eSJonathan T. Looney tln = tp->t_lin; 15389eb0e832SGleb Smirnoff KASSERT(tln->tln_inp == inp, 15399eb0e832SGleb Smirnoff ("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)", 15409eb0e832SGleb Smirnoff __func__, tln->tln_inp, inp)); 15412529f56eSJonathan T. Looney tcp_log_move_tp_to_node(tp, tln); 15422529f56eSJonathan T. Looney 15432529f56eSJonathan T. Looney /* Clear information from the PCB. */ 15442529f56eSJonathan T. Looney tp->t_lin = NULL; 15452529f56eSJonathan T. Looney tp->t_lib = NULL; 15462529f56eSJonathan T. Looney 15472529f56eSJonathan T. Looney /* 15482529f56eSJonathan T. Looney * Take a reference on the INP. This ensures that the INP 15492529f56eSJonathan T. Looney * remains valid while the node is on the expiry queue. This 15502529f56eSJonathan T. Looney * ensures the INP is valid for other threads that may be 15512529f56eSJonathan T. Looney * racing to lock this node when we move it to the expire 15522529f56eSJonathan T. Looney * queue. 15532529f56eSJonathan T. Looney */ 15549eb0e832SGleb Smirnoff in_pcbref(inp); 15552529f56eSJonathan T. Looney 15562529f56eSJonathan T. Looney /* 15572529f56eSJonathan T. Looney * Store the entry on the expiry list. The exact behavior 15582529f56eSJonathan T. Looney * depends on whether we have entries to keep. If so, we 15592529f56eSJonathan T. Looney * put the entry at the tail of the list and expire in 15602529f56eSJonathan T. Looney * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put 15612529f56eSJonathan T. Looney * the entry at the head of the list. (Handling the cleanup 15622529f56eSJonathan T. Looney * via the expiry timer lets us avoid locking messy-ness here.) 15632529f56eSJonathan T. Looney */ 15642529f56eSJonathan T. Looney tln->tln_expiretime = getsbinuptime(); 15652529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK(); 15662529f56eSJonathan T. Looney if (tln->tln_count) { 15672529f56eSJonathan T. Looney tln->tln_expiretime += TCP_LOG_EXPIRE_TIME; 15682529f56eSJonathan T. Looney if (STAILQ_EMPTY(&tcp_log_expireq_head) && 15692529f56eSJonathan T. Looney !callout_active(&tcp_log_expireq_callout)) { 15702529f56eSJonathan T. Looney /* 15712529f56eSJonathan T. Looney * We are adding the first entry and a callout 15722529f56eSJonathan T. Looney * is not currently scheduled; therefore, we 15732529f56eSJonathan T. Looney * need to schedule one. 15742529f56eSJonathan T. Looney */ 15752529f56eSJonathan T. Looney callout_reset_sbt(&tcp_log_expireq_callout, 15762529f56eSJonathan T. Looney tln->tln_expiretime, SBT_1S, tcp_log_expire, 15772529f56eSJonathan T. Looney NULL, C_ABSOLUTE); 15782529f56eSJonathan T. Looney } 15792529f56eSJonathan T. Looney STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln, 15802529f56eSJonathan T. Looney tln_expireq); 15812529f56eSJonathan T. Looney } else { 15822529f56eSJonathan T. Looney callouttime = tln->tln_expiretime + 15832529f56eSJonathan T. Looney TCP_LOG_EXPIRE_INTVL; 15842529f56eSJonathan T. Looney tln_first = STAILQ_FIRST(&tcp_log_expireq_head); 15852529f56eSJonathan T. Looney 15862529f56eSJonathan T. Looney if ((tln_first == NULL || 15872529f56eSJonathan T. Looney callouttime < tln_first->tln_expiretime) && 15882529f56eSJonathan T. Looney (callout_pending(&tcp_log_expireq_callout) || 15892529f56eSJonathan T. Looney !callout_active(&tcp_log_expireq_callout))) { 15902529f56eSJonathan T. Looney /* 15912529f56eSJonathan T. Looney * The list is empty, or we want to run the 15922529f56eSJonathan T. Looney * expire code before the first entry's timer 15932529f56eSJonathan T. Looney * fires. Also, we are in a case where a callout 15942529f56eSJonathan T. Looney * is not actively running. We want to reset 15952529f56eSJonathan T. Looney * the callout to occur sooner. 15962529f56eSJonathan T. Looney */ 15972529f56eSJonathan T. Looney callout_reset_sbt(&tcp_log_expireq_callout, 15982529f56eSJonathan T. Looney callouttime, SBT_1S, tcp_log_expire, NULL, 15992529f56eSJonathan T. Looney C_ABSOLUTE); 16002529f56eSJonathan T. Looney } 16012529f56eSJonathan T. Looney 16022529f56eSJonathan T. Looney /* 16032529f56eSJonathan T. Looney * Insert to the head, or just after the head, as 16042529f56eSJonathan T. Looney * appropriate. (This might result in small 16052529f56eSJonathan T. Looney * mis-orderings as a bunch of "expire now" entries 16062529f56eSJonathan T. Looney * gather at the start of the list, but that should 16072529f56eSJonathan T. Looney * not produce big problems, since the expire timer 16082529f56eSJonathan T. Looney * will walk through all of them.) 16092529f56eSJonathan T. Looney */ 16102529f56eSJonathan T. Looney if (tln_first == NULL || 16112529f56eSJonathan T. Looney tln->tln_expiretime < tln_first->tln_expiretime) 16122529f56eSJonathan T. Looney STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln, 16132529f56eSJonathan T. Looney tln_expireq); 16142529f56eSJonathan T. Looney else 16152529f56eSJonathan T. Looney STAILQ_INSERT_AFTER(&tcp_log_expireq_head, 16162529f56eSJonathan T. Looney tln_first, tln, tln_expireq); 16172529f56eSJonathan T. Looney } 16182529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 16192529f56eSJonathan T. Looney 16202529f56eSJonathan T. Looney /* 16212529f56eSJonathan T. Looney * We are done messing with the tln. After this point, we 16222529f56eSJonathan T. Looney * can't touch it. (Note that the "release" semantics should 16232529f56eSJonathan T. Looney * be included with the TCPLOG_EXPIREQ_UNLOCK() call above. 16242529f56eSJonathan T. Looney * Therefore, they should be unnecessary here. However, it 16252529f56eSJonathan T. Looney * seems like a good idea to include them anyway, since we 16262529f56eSJonathan T. Looney * really are releasing a lock here.) 16272529f56eSJonathan T. Looney */ 16282529f56eSJonathan T. Looney atomic_store_rel_int(&tln->tln_closed, 1); 16292529f56eSJonathan T. Looney } else { 16302529f56eSJonathan T. Looney /* Remove log entries. */ 16312529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 16322529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry); 16332529f56eSJonathan T. Looney KASSERT(tp->t_lognum == 0, 16342529f56eSJonathan T. Looney ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 16352529f56eSJonathan T. Looney __func__, tp->t_lognum)); 16362529f56eSJonathan T. Looney } 16372529f56eSJonathan T. Looney 16382529f56eSJonathan T. Looney /* 16392529f56eSJonathan T. Looney * Change the log state to off (just in case anything tries to sneak 16402529f56eSJonathan T. Looney * in a last-minute log). 16412529f56eSJonathan T. Looney */ 164269c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 16432529f56eSJonathan T. Looney } 16442529f56eSJonathan T. Looney 1645a9a08eceSRandall Stewart static void 1646a9a08eceSRandall Stewart tcp_log_purge_tp_logbuf(struct tcpcb *tp) 1647a9a08eceSRandall Stewart { 1648a9a08eceSRandall Stewart struct tcp_log_mem *log_entry; 1649a9a08eceSRandall Stewart 16509eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 1651a9a08eceSRandall Stewart if (tp->t_lognum == 0) 1652a9a08eceSRandall Stewart return; 1653a9a08eceSRandall Stewart 1654a9a08eceSRandall Stewart while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1655a9a08eceSRandall Stewart tcp_log_remove_log_head(tp, log_entry); 1656a9a08eceSRandall Stewart KASSERT(tp->t_lognum == 0, 1657a9a08eceSRandall Stewart ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1658a9a08eceSRandall Stewart __func__, tp->t_lognum)); 165969c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 1660a9a08eceSRandall Stewart } 1661a9a08eceSRandall Stewart 16622529f56eSJonathan T. Looney /* 16632529f56eSJonathan T. Looney * This logs an event for a TCP socket. Normally, this is called via 16642529f56eSJonathan T. Looney * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for 16652529f56eSJonathan T. Looney * TCP_LOG_EVENT(). 16662529f56eSJonathan T. Looney */ 16672529f56eSJonathan T. Looney 16682529f56eSJonathan T. Looney struct tcp_log_buffer * 166969c7c811SRandall Stewart tcp_log_event(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf, 16702529f56eSJonathan T. Looney struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len, 16712529f56eSJonathan T. Looney union tcp_log_stackspecific *stackinfo, int th_hostorder, 16722529f56eSJonathan T. Looney const char *output_caller, const char *func, int line, const struct timeval *itv) 16732529f56eSJonathan T. Looney { 16742529f56eSJonathan T. Looney struct tcp_log_mem *log_entry; 16752529f56eSJonathan T. Looney struct tcp_log_buffer *log_buf; 16762529f56eSJonathan T. Looney int attempt_count = 0; 16772529f56eSJonathan T. Looney struct tcp_log_verbose *log_verbose; 16782529f56eSJonathan T. Looney uint32_t logsn; 16792529f56eSJonathan T. Looney 16802529f56eSJonathan T. Looney KASSERT((func == NULL && line == 0) || (func != NULL && line > 0), 16812529f56eSJonathan T. Looney ("%s called with inconsistent func (%p) and line (%d) arguments", 16822529f56eSJonathan T. Looney __func__, func, line)); 16832529f56eSJonathan T. Looney 16849eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 1685a9a08eceSRandall Stewart if (tcp_disable_all_bb_logs) { 1686a9a08eceSRandall Stewart /* 1687a9a08eceSRandall Stewart * The global shutdown logging 1688a9a08eceSRandall Stewart * switch has been thrown. Call 1689a9a08eceSRandall Stewart * the purge function that frees 1690a9a08eceSRandall Stewart * purges out the logs and 1691a9a08eceSRandall Stewart * turns off logging. 1692a9a08eceSRandall Stewart */ 1693a9a08eceSRandall Stewart tcp_log_purge_tp_logbuf(tp); 1694a9a08eceSRandall Stewart return (NULL); 1695a9a08eceSRandall Stewart } 169669c7c811SRandall Stewart KASSERT(tp->_t_logstate == TCP_LOG_STATE_HEAD || 169769c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_TAIL || 169869c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_CONTINUAL || 169969c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO || 170069c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_VIA_BBPOINTS || 170169c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO, 170269c7c811SRandall Stewart ("%s called with unexpected tp->_t_logstate (%d)", __func__, 170369c7c811SRandall Stewart tp->_t_logstate)); 17042529f56eSJonathan T. Looney 17052529f56eSJonathan T. Looney /* 17062529f56eSJonathan T. Looney * Get the serial number. We do this early so it will 17072529f56eSJonathan T. Looney * increment even if we end up skipping the log entry for some 17082529f56eSJonathan T. Looney * reason. 17092529f56eSJonathan T. Looney */ 17102529f56eSJonathan T. Looney logsn = tp->t_logsn++; 17112529f56eSJonathan T. Looney 17122529f56eSJonathan T. Looney /* 17132529f56eSJonathan T. Looney * Can we get a new log entry? If so, increment the lognum counter 17142529f56eSJonathan T. Looney * here. 17152529f56eSJonathan T. Looney */ 17162529f56eSJonathan T. Looney retry: 1717a9a08eceSRandall Stewart if (tp->t_lognum < tp->t_loglimit) { 17182529f56eSJonathan T. Looney if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL) 17192529f56eSJonathan T. Looney tp->t_lognum++; 17202529f56eSJonathan T. Looney } else 17212529f56eSJonathan T. Looney log_entry = NULL; 17222529f56eSJonathan T. Looney 17232529f56eSJonathan T. Looney /* Do we need to try to reuse? */ 17242529f56eSJonathan T. Looney if (log_entry == NULL) { 17252529f56eSJonathan T. Looney /* 17262529f56eSJonathan T. Looney * Sacrifice auto-logged sessions without a log ID if 17272529f56eSJonathan T. Looney * tcp_log_auto_all is false. (If they don't have a log 17282529f56eSJonathan T. Looney * ID by now, it is probable that either they won't get one 17292529f56eSJonathan T. Looney * or we are resource-constrained.) 17302529f56eSJonathan T. Looney */ 17312529f56eSJonathan T. Looney if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 17322529f56eSJonathan T. Looney !tcp_log_auto_all) { 17332529f56eSJonathan T. Looney if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) { 17342529f56eSJonathan T. Looney #ifdef INVARIANTS 17352529f56eSJonathan T. Looney panic("%s:%d: tcp_log_state_change() failed " 17362529f56eSJonathan T. Looney "to set tp %p to TCP_LOG_STATE_CLEAR", 17372529f56eSJonathan T. Looney __func__, __LINE__, tp); 17382529f56eSJonathan T. Looney #endif 173969c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 17402529f56eSJonathan T. Looney } 17412529f56eSJonathan T. Looney return (NULL); 17422529f56eSJonathan T. Looney } 17432529f56eSJonathan T. Looney /* 17442529f56eSJonathan T. Looney * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump 17452529f56eSJonathan T. Looney * the buffers. If successful, deactivate tracing. Otherwise, 17462529f56eSJonathan T. Looney * leave it active so we will retry. 17472529f56eSJonathan T. Looney */ 174869c7c811SRandall Stewart if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO && 17492529f56eSJonathan T. Looney !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 17502529f56eSJonathan T. Looney M_NOWAIT, false)) { 175169c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 17522529f56eSJonathan T. Looney return(NULL); 175369c7c811SRandall Stewart } else if ((tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) && 17542529f56eSJonathan T. Looney !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 17552529f56eSJonathan T. Looney M_NOWAIT, false)) { 17562529f56eSJonathan T. Looney if (attempt_count == 0) { 17572529f56eSJonathan T. Looney attempt_count++; 17582529f56eSJonathan T. Looney goto retry; 17592529f56eSJonathan T. Looney } 17602529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 17612529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail4, 1); 17622529f56eSJonathan T. Looney #endif 17632529f56eSJonathan T. Looney return(NULL); 176469c7c811SRandall Stewart 176569c7c811SRandall Stewart } else if ((tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) && 176669c7c811SRandall Stewart !tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints", 176769c7c811SRandall Stewart M_NOWAIT, false)) { 176869c7c811SRandall Stewart if (attempt_count == 0) { 176969c7c811SRandall Stewart attempt_count++; 177069c7c811SRandall Stewart goto retry; 177169c7c811SRandall Stewart } 177269c7c811SRandall Stewart #ifdef TCPLOG_DEBUG_COUNTERS 177369c7c811SRandall Stewart counter_u64_add(tcp_log_que_fail4, 1); 177469c7c811SRandall Stewart #endif 177569c7c811SRandall Stewart return(NULL); 177669c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO) 17772529f56eSJonathan T. Looney return(NULL); 17782529f56eSJonathan T. Looney 17792529f56eSJonathan T. Looney /* If in HEAD state, just deactivate the tracing and return. */ 178069c7c811SRandall Stewart if (tp->_t_logstate == TCP_LOG_STATE_HEAD) { 178169c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 17822529f56eSJonathan T. Looney return(NULL); 17832529f56eSJonathan T. Looney } 17842529f56eSJonathan T. Looney /* 17852529f56eSJonathan T. Looney * Get a buffer to reuse. If that fails, just give up. 17862529f56eSJonathan T. Looney * (We can't log anything without a buffer in which to 17872529f56eSJonathan T. Looney * put it.) 17882529f56eSJonathan T. Looney * 17892529f56eSJonathan T. Looney * Note that we don't change the t_lognum counter 17902529f56eSJonathan T. Looney * here. Because we are re-using the buffer, the total 17912529f56eSJonathan T. Looney * number won't change. 17922529f56eSJonathan T. Looney */ 17932529f56eSJonathan T. Looney if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL) 17942529f56eSJonathan T. Looney return(NULL); 17952529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 17962529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry); 17972529f56eSJonathan T. Looney } 17982529f56eSJonathan T. Looney 17992529f56eSJonathan T. Looney KASSERT(log_entry != NULL, 18002529f56eSJonathan T. Looney ("%s: log_entry unexpectedly NULL", __func__)); 18012529f56eSJonathan T. Looney 18022529f56eSJonathan T. Looney /* Extract the log buffer and verbose buffer pointers. */ 18032529f56eSJonathan T. Looney log_buf = &log_entry->tlm_buf; 18042529f56eSJonathan T. Looney log_verbose = &log_entry->tlm_v; 18052529f56eSJonathan T. Looney 18062529f56eSJonathan T. Looney /* Basic entries. */ 18072529f56eSJonathan T. Looney if (itv == NULL) 180869c7c811SRandall Stewart microuptime(&log_buf->tlb_tv); 18092529f56eSJonathan T. Looney else 18102529f56eSJonathan T. Looney memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval)); 18112529f56eSJonathan T. Looney log_buf->tlb_ticks = ticks; 18122529f56eSJonathan T. Looney log_buf->tlb_sn = logsn; 18132529f56eSJonathan T. Looney log_buf->tlb_stackid = tp->t_fb->tfb_id; 18142529f56eSJonathan T. Looney log_buf->tlb_eventid = eventid; 18152529f56eSJonathan T. Looney log_buf->tlb_eventflags = 0; 18162529f56eSJonathan T. Looney log_buf->tlb_errno = errornum; 18172529f56eSJonathan T. Looney 18182529f56eSJonathan T. Looney /* Socket buffers */ 18192529f56eSJonathan T. Looney if (rxbuf != NULL) { 18202529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_RXBUF; 18212529f56eSJonathan T. Looney log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc; 18222529f56eSJonathan T. Looney log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc; 18232529f56eSJonathan T. Looney log_buf->tlb_rxbuf.tls_sb_spare = 0; 182469c7c811SRandall Stewart } else { 182569c7c811SRandall Stewart log_buf->tlb_rxbuf.tls_sb_acc = 0; 182669c7c811SRandall Stewart log_buf->tlb_rxbuf.tls_sb_ccc = 0; 18272529f56eSJonathan T. Looney } 18282529f56eSJonathan T. Looney if (txbuf != NULL) { 18292529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_TXBUF; 18302529f56eSJonathan T. Looney log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc; 18312529f56eSJonathan T. Looney log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc; 18322529f56eSJonathan T. Looney log_buf->tlb_txbuf.tls_sb_spare = 0; 183369c7c811SRandall Stewart } else { 183469c7c811SRandall Stewart log_buf->tlb_txbuf.tls_sb_acc = 0; 183569c7c811SRandall Stewart log_buf->tlb_txbuf.tls_sb_ccc = 0; 18362529f56eSJonathan T. Looney } 18372529f56eSJonathan T. Looney /* Copy values from tp to the log entry. */ 18382529f56eSJonathan T. Looney #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f 18392529f56eSJonathan T. Looney #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f 18402529f56eSJonathan T. Looney COPY_STAT_T(state); 18412529f56eSJonathan T. Looney COPY_STAT_T(starttime); 18422529f56eSJonathan T. Looney COPY_STAT(iss); 18432529f56eSJonathan T. Looney COPY_STAT_T(flags); 18442529f56eSJonathan T. Looney COPY_STAT(snd_una); 18452529f56eSJonathan T. Looney COPY_STAT(snd_max); 18462529f56eSJonathan T. Looney COPY_STAT(snd_cwnd); 18472529f56eSJonathan T. Looney COPY_STAT(snd_nxt); 18482529f56eSJonathan T. Looney COPY_STAT(snd_recover); 18492529f56eSJonathan T. Looney COPY_STAT(snd_wnd); 18502529f56eSJonathan T. Looney COPY_STAT(snd_ssthresh); 18512529f56eSJonathan T. Looney COPY_STAT_T(srtt); 18522529f56eSJonathan T. Looney COPY_STAT_T(rttvar); 18532529f56eSJonathan T. Looney COPY_STAT(rcv_up); 18542529f56eSJonathan T. Looney COPY_STAT(rcv_adv); 18552529f56eSJonathan T. Looney COPY_STAT(rcv_nxt); 18562529f56eSJonathan T. Looney COPY_STAT(rcv_wnd); 18572529f56eSJonathan T. Looney COPY_STAT_T(dupacks); 18582529f56eSJonathan T. Looney COPY_STAT_T(segqlen); 18592529f56eSJonathan T. Looney COPY_STAT(snd_numholes); 18602529f56eSJonathan T. Looney COPY_STAT(snd_scale); 18612529f56eSJonathan T. Looney COPY_STAT(rcv_scale); 1862e854dd38SRandall Stewart COPY_STAT_T(flags2); 1863e854dd38SRandall Stewart COPY_STAT_T(fbyte_in); 1864e854dd38SRandall Stewart COPY_STAT_T(fbyte_out); 18652529f56eSJonathan T. Looney #undef COPY_STAT 18662529f56eSJonathan T. Looney #undef COPY_STAT_T 18672529f56eSJonathan T. Looney /* Copy stack-specific info. */ 18682529f56eSJonathan T. Looney if (stackinfo != NULL) { 18692529f56eSJonathan T. Looney memcpy(&log_buf->tlb_stackinfo, stackinfo, 18702529f56eSJonathan T. Looney sizeof(log_buf->tlb_stackinfo)); 18712529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO; 18722529f56eSJonathan T. Looney } 18732529f56eSJonathan T. Looney 18742529f56eSJonathan T. Looney /* The packet */ 18752529f56eSJonathan T. Looney log_buf->tlb_len = len; 18762529f56eSJonathan T. Looney if (th) { 18772529f56eSJonathan T. Looney int optlen; 18782529f56eSJonathan T. Looney 18792529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_HDR; 18802529f56eSJonathan T. Looney log_buf->tlb_th = *th; 18812529f56eSJonathan T. Looney if (th_hostorder) 18822529f56eSJonathan T. Looney tcp_fields_to_net(&log_buf->tlb_th); 18832529f56eSJonathan T. Looney optlen = (th->th_off << 2) - sizeof (struct tcphdr); 18842529f56eSJonathan T. Looney if (optlen > 0) 18852529f56eSJonathan T. Looney memcpy(log_buf->tlb_opts, th + 1, optlen); 188669c7c811SRandall Stewart } else { 188769c7c811SRandall Stewart memset(&log_buf->tlb_th, 0, sizeof(*th)); 18882529f56eSJonathan T. Looney } 18892529f56eSJonathan T. Looney 18902529f56eSJonathan T. Looney /* Verbose information */ 18912529f56eSJonathan T. Looney if (func != NULL) { 18922529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE; 18932529f56eSJonathan T. Looney if (output_caller != NULL) 18942529f56eSJonathan T. Looney strlcpy(log_verbose->tlv_snd_frm, output_caller, 18952529f56eSJonathan T. Looney TCP_FUNC_LEN); 18962529f56eSJonathan T. Looney else 18972529f56eSJonathan T. Looney *log_verbose->tlv_snd_frm = 0; 18982529f56eSJonathan T. Looney strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN); 18992529f56eSJonathan T. Looney log_verbose->tlv_trace_line = line; 19002529f56eSJonathan T. Looney } 19012529f56eSJonathan T. Looney 19022529f56eSJonathan T. Looney /* Insert the new log at the tail. */ 19032529f56eSJonathan T. Looney STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue); 19042529f56eSJonathan T. Looney tcp_log_entry_refcnt_add(log_entry); 19052529f56eSJonathan T. Looney return (log_buf); 19062529f56eSJonathan T. Looney } 19072529f56eSJonathan T. Looney 19082529f56eSJonathan T. Looney /* 19092529f56eSJonathan T. Looney * Change the logging state for a TCPCB. Returns 0 on success or an 19102529f56eSJonathan T. Looney * error code on failure. 19112529f56eSJonathan T. Looney */ 19122529f56eSJonathan T. Looney int 19132529f56eSJonathan T. Looney tcp_log_state_change(struct tcpcb *tp, int state) 19142529f56eSJonathan T. Looney { 19152529f56eSJonathan T. Looney struct tcp_log_mem *log_entry; 191669c7c811SRandall Stewart int rv; 19172529f56eSJonathan T. Looney 19189eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 191969c7c811SRandall Stewart rv = 0; 19202529f56eSJonathan T. Looney switch(state) { 19212529f56eSJonathan T. Looney case TCP_LOG_STATE_CLEAR: 19222529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 19232529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry); 19242529f56eSJonathan T. Looney /* Fall through */ 19252529f56eSJonathan T. Looney 19262529f56eSJonathan T. Looney case TCP_LOG_STATE_OFF: 192769c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 19282529f56eSJonathan T. Looney break; 19292529f56eSJonathan T. Looney 19302529f56eSJonathan T. Looney case TCP_LOG_STATE_TAIL: 19312529f56eSJonathan T. Looney case TCP_LOG_STATE_HEAD: 19322529f56eSJonathan T. Looney case TCP_LOG_STATE_CONTINUAL: 193369c7c811SRandall Stewart case TCP_LOG_VIA_BBPOINTS: 19342529f56eSJonathan T. Looney case TCP_LOG_STATE_HEAD_AUTO: 19352529f56eSJonathan T. Looney case TCP_LOG_STATE_TAIL_AUTO: 193669c7c811SRandall Stewart /* 193769c7c811SRandall Stewart * When the RATIO_OFF state is set for the bucket, the log ID 193869c7c811SRandall Stewart * this tp is associated with has been probabilistically opted 193969c7c811SRandall Stewart * out of logging per tcp_log_apply_ratio(). 194069c7c811SRandall Stewart */ 194169c7c811SRandall Stewart if (tp->t_lib == NULL || 194269c7c811SRandall Stewart tp->t_lib->tlb_logstate != TCP_LOG_STATE_RATIO_OFF) { 194369c7c811SRandall Stewart tp->_t_logstate = state; 194469c7c811SRandall Stewart } else { 194569c7c811SRandall Stewart rv = ECANCELED; 194669c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 194769c7c811SRandall Stewart } 19482529f56eSJonathan T. Looney break; 19492529f56eSJonathan T. Looney 19502529f56eSJonathan T. Looney default: 19512529f56eSJonathan T. Looney return (EINVAL); 19522529f56eSJonathan T. Looney } 1953a9a08eceSRandall Stewart if (tcp_disable_all_bb_logs) { 1954a9a08eceSRandall Stewart /* We are prohibited from doing any logs */ 195569c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 195669c7c811SRandall Stewart rv = EBUSY; 1957a9a08eceSRandall Stewart } 19582529f56eSJonathan T. Looney tp->t_flags2 &= ~(TF2_LOG_AUTO); 19592529f56eSJonathan T. Looney 196069c7c811SRandall Stewart return (rv); 19612529f56eSJonathan T. Looney } 19622529f56eSJonathan T. Looney 19632529f56eSJonathan T. Looney /* If tcp_drain() is called, flush half the log entries. */ 19642529f56eSJonathan T. Looney void 19652529f56eSJonathan T. Looney tcp_log_drain(struct tcpcb *tp) 19662529f56eSJonathan T. Looney { 19672529f56eSJonathan T. Looney struct tcp_log_mem *log_entry, *next; 19682529f56eSJonathan T. Looney int target, skip; 19692529f56eSJonathan T. Looney 19709eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp)); 19712529f56eSJonathan T. Looney if ((target = tp->t_lognum / 2) == 0) 19722529f56eSJonathan T. Looney return; 19732529f56eSJonathan T. Looney 19742529f56eSJonathan T. Looney /* 197569c7c811SRandall Stewart * XXXRRS: At this I don't think this is wise that 197669c7c811SRandall Stewart * we do this. All that a drain call means is that 197769c7c811SRandall Stewart * we are hitting one of the system mbuf limits. BB 197869c7c811SRandall Stewart * logging, or freeing of them, will not create any 197969c7c811SRandall Stewart * more mbufs and really has nothing to do with 198069c7c811SRandall Stewart * the system running out of mbufs. For now I 198169c7c811SRandall Stewart * am changing this to free any "AUTO" by dumping 198269c7c811SRandall Stewart * them out. But this should either be changed 198369c7c811SRandall Stewart * so that it gets called when we hit the BB limit 198469c7c811SRandall Stewart * or it should just not get called (one of the two) 198569c7c811SRandall Stewart * since I don't think the mbuf <-> BB log cleanup 198669c7c811SRandall Stewart * is the right thing to do here. 198769c7c811SRandall Stewart */ 198869c7c811SRandall Stewart /* 19892529f56eSJonathan T. Looney * If we are logging the "head" packets, we want to discard 19902529f56eSJonathan T. Looney * from the tail of the queue. Otherwise, we want to discard 19912529f56eSJonathan T. Looney * from the head. 19922529f56eSJonathan T. Looney */ 199369c7c811SRandall Stewart if (tp->_t_logstate == TCP_LOG_STATE_HEAD) { 19942529f56eSJonathan T. Looney skip = tp->t_lognum - target; 19952529f56eSJonathan T. Looney STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue) 19962529f56eSJonathan T. Looney if (!--skip) 19972529f56eSJonathan T. Looney break; 19982529f56eSJonathan T. Looney KASSERT(log_entry != NULL, 19992529f56eSJonathan T. Looney ("%s: skipped through all entries!", __func__)); 20002529f56eSJonathan T. Looney if (log_entry == NULL) 20012529f56eSJonathan T. Looney return; 20022529f56eSJonathan T. Looney while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) { 20032529f56eSJonathan T. Looney STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue); 20042529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(next); 20052529f56eSJonathan T. Looney tcp_log_remove_log_cleanup(tp, next); 20062529f56eSJonathan T. Looney #ifdef INVARIANTS 20072529f56eSJonathan T. Looney target--; 20082529f56eSJonathan T. Looney #endif 20092529f56eSJonathan T. Looney } 20102529f56eSJonathan T. Looney KASSERT(target == 0, 20112529f56eSJonathan T. Looney ("%s: After removing from tail, target was %d", __func__, 20122529f56eSJonathan T. Looney target)); 201369c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO) { 201469c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at drain", 201569c7c811SRandall Stewart M_NOWAIT, false); 201669c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO) { 201769c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at drain", 201869c7c811SRandall Stewart M_NOWAIT, false); 201969c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) { 202069c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints", 202169c7c811SRandall Stewart M_NOWAIT, false); 202269c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) { 20232529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 20242529f56eSJonathan T. Looney M_NOWAIT, false); 20252529f56eSJonathan T. Looney } else { 20262529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL && 20272529f56eSJonathan T. Looney target--) 20282529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry); 20292529f56eSJonathan T. Looney KASSERT(target <= 0, 20302529f56eSJonathan T. Looney ("%s: After removing from head, target was %d", __func__, 20312529f56eSJonathan T. Looney target)); 20322529f56eSJonathan T. Looney KASSERT(tp->t_lognum > 0, 20332529f56eSJonathan T. Looney ("%s: After removing from head, tp->t_lognum was %d", 20342529f56eSJonathan T. Looney __func__, target)); 20352529f56eSJonathan T. Looney KASSERT(log_entry != NULL, 20362529f56eSJonathan T. Looney ("%s: After removing from head, the tailq was empty", 20372529f56eSJonathan T. Looney __func__)); 20382529f56eSJonathan T. Looney } 20392529f56eSJonathan T. Looney } 20402529f56eSJonathan T. Looney 20412529f56eSJonathan T. Looney static inline int 20422529f56eSJonathan T. Looney tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len) 20432529f56eSJonathan T. Looney { 20442529f56eSJonathan T. Looney 20452529f56eSJonathan T. Looney if (sopt->sopt_td != NULL) 20462529f56eSJonathan T. Looney return (copyout(src, dst, len)); 20472529f56eSJonathan T. Looney bcopy(src, dst, len); 20482529f56eSJonathan T. Looney return (0); 20492529f56eSJonathan T. Looney } 20502529f56eSJonathan T. Looney 20512529f56eSJonathan T. Looney static int 20522529f56eSJonathan T. Looney tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp, 20532529f56eSJonathan T. Looney struct tcp_log_buffer **end, int count) 20542529f56eSJonathan T. Looney { 20552529f56eSJonathan T. Looney struct tcp_log_buffer *out_entry; 20562529f56eSJonathan T. Looney struct tcp_log_mem *log_entry; 20572529f56eSJonathan T. Looney size_t entrysize; 20582529f56eSJonathan T. Looney int error; 20592529f56eSJonathan T. Looney #ifdef INVARIANTS 20602529f56eSJonathan T. Looney int orig_count = count; 20612529f56eSJonathan T. Looney #endif 20622529f56eSJonathan T. Looney 20632529f56eSJonathan T. Looney /* Copy the data out. */ 20642529f56eSJonathan T. Looney error = 0; 20652529f56eSJonathan T. Looney out_entry = (struct tcp_log_buffer *) sopt->sopt_val; 20662529f56eSJonathan T. Looney STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) { 20672529f56eSJonathan T. Looney count--; 20682529f56eSJonathan T. Looney KASSERT(count >= 0, 20692529f56eSJonathan T. Looney ("%s:%d: Exceeded expected count (%d) processing list %p", 20702529f56eSJonathan T. Looney __func__, __LINE__, orig_count, log_tailqp)); 20712529f56eSJonathan T. Looney 20722529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 20732529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_copyout, 1); 20742529f56eSJonathan T. Looney #endif 20752529f56eSJonathan T. Looney 20762529f56eSJonathan T. Looney /* 20772529f56eSJonathan T. Looney * Skip copying out the header if it isn't present. 20782529f56eSJonathan T. Looney * Instead, copy out zeros (to ensure we don't leak info). 20792529f56eSJonathan T. Looney * TODO: Make sure we truly do zero everything we don't 20802529f56eSJonathan T. Looney * explicitly set. 20812529f56eSJonathan T. Looney */ 20822529f56eSJonathan T. Looney if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR) 20832529f56eSJonathan T. Looney entrysize = sizeof(struct tcp_log_buffer); 20842529f56eSJonathan T. Looney else 20852529f56eSJonathan T. Looney entrysize = offsetof(struct tcp_log_buffer, tlb_th); 20862529f56eSJonathan T. Looney error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry, 20872529f56eSJonathan T. Looney entrysize); 20882529f56eSJonathan T. Looney if (error) 20892529f56eSJonathan T. Looney break; 20902529f56eSJonathan T. Looney if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) { 20912529f56eSJonathan T. Looney error = tcp_log_copyout(sopt, zerobuf, 20922529f56eSJonathan T. Looney ((uint8_t *)out_entry) + entrysize, 20932529f56eSJonathan T. Looney sizeof(struct tcp_log_buffer) - entrysize); 20942529f56eSJonathan T. Looney } 20952529f56eSJonathan T. Looney 20962529f56eSJonathan T. Looney /* 20972529f56eSJonathan T. Looney * Copy out the verbose bit, if needed. Either way, 20982529f56eSJonathan T. Looney * increment the output pointer the correct amount. 20992529f56eSJonathan T. Looney */ 21002529f56eSJonathan T. Looney if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) { 21012529f56eSJonathan T. Looney error = tcp_log_copyout(sopt, &log_entry->tlm_v, 21022529f56eSJonathan T. Looney out_entry->tlb_verbose, 21032529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose)); 21042529f56eSJonathan T. Looney if (error) 21052529f56eSJonathan T. Looney break; 21062529f56eSJonathan T. Looney out_entry = (struct tcp_log_buffer *) 21072529f56eSJonathan T. Looney (((uint8_t *) (out_entry + 1)) + 21082529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose)); 21092529f56eSJonathan T. Looney } else 21102529f56eSJonathan T. Looney out_entry++; 21112529f56eSJonathan T. Looney } 21122529f56eSJonathan T. Looney *end = out_entry; 21132529f56eSJonathan T. Looney KASSERT(error || count == 0, 21142529f56eSJonathan T. Looney ("%s:%d: Less than expected count (%d) processing list %p" 21152529f56eSJonathan T. Looney " (%d remain)", __func__, __LINE__, orig_count, 21162529f56eSJonathan T. Looney log_tailqp, count)); 21172529f56eSJonathan T. Looney 21182529f56eSJonathan T. Looney return (error); 21192529f56eSJonathan T. Looney } 21202529f56eSJonathan T. Looney 21212529f56eSJonathan T. Looney /* 21222529f56eSJonathan T. Looney * Copy out the buffer. Note that we do incremental copying, so 21232529f56eSJonathan T. Looney * sooptcopyout() won't work. However, the goal is to produce the same 21242529f56eSJonathan T. Looney * end result as if we copied in the entire user buffer, updated it, 21252529f56eSJonathan T. Looney * and then used sooptcopyout() to copy it out. 21262529f56eSJonathan T. Looney * 21272529f56eSJonathan T. Looney * NOTE: This should be called with a write lock on the PCB; however, 21282529f56eSJonathan T. Looney * the function will drop it after it extracts the data from the TCPCB. 21292529f56eSJonathan T. Looney */ 21302529f56eSJonathan T. Looney int 21312529f56eSJonathan T. Looney tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp) 21322529f56eSJonathan T. Looney { 21332529f56eSJonathan T. Looney struct tcp_log_stailq log_tailq; 21342529f56eSJonathan T. Looney struct tcp_log_mem *log_entry, *log_next; 21352529f56eSJonathan T. Looney struct tcp_log_buffer *out_entry; 21369eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 21372529f56eSJonathan T. Looney size_t outsize, entrysize; 21382529f56eSJonathan T. Looney int error, outnum; 21392529f56eSJonathan T. Looney 21409eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp); 21412529f56eSJonathan T. Looney 21422529f56eSJonathan T. Looney /* 21432529f56eSJonathan T. Looney * Determine which log entries will fit in the buffer. As an 21442529f56eSJonathan T. Looney * optimization, skip this if all the entries will clearly fit 21452529f56eSJonathan T. Looney * in the buffer. (However, get an exact size if we are using 21462529f56eSJonathan T. Looney * INVARIANTS.) 21472529f56eSJonathan T. Looney */ 21482529f56eSJonathan T. Looney #ifndef INVARIANTS 21492529f56eSJonathan T. Looney if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) + 21502529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose)) >= tp->t_lognum) { 21512529f56eSJonathan T. Looney log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue); 21522529f56eSJonathan T. Looney log_next = NULL; 21532529f56eSJonathan T. Looney outsize = 0; 21542529f56eSJonathan T. Looney outnum = tp->t_lognum; 21552529f56eSJonathan T. Looney } else { 21562529f56eSJonathan T. Looney #endif 21572529f56eSJonathan T. Looney outsize = outnum = 0; 21582529f56eSJonathan T. Looney log_entry = NULL; 21592529f56eSJonathan T. Looney STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) { 21602529f56eSJonathan T. Looney entrysize = sizeof(struct tcp_log_buffer); 21612529f56eSJonathan T. Looney if (log_next->tlm_buf.tlb_eventflags & 21622529f56eSJonathan T. Looney TLB_FLAG_VERBOSE) 21632529f56eSJonathan T. Looney entrysize += sizeof(struct tcp_log_verbose); 21642529f56eSJonathan T. Looney if ((sopt->sopt_valsize - outsize) < entrysize) 21652529f56eSJonathan T. Looney break; 21662529f56eSJonathan T. Looney outsize += entrysize; 21672529f56eSJonathan T. Looney outnum++; 21682529f56eSJonathan T. Looney log_entry = log_next; 21692529f56eSJonathan T. Looney } 21702529f56eSJonathan T. Looney KASSERT(outsize <= sopt->sopt_valsize, 21712529f56eSJonathan T. Looney ("%s: calculated output size (%zu) greater than available" 21722529f56eSJonathan T. Looney "space (%zu)", __func__, outsize, sopt->sopt_valsize)); 21732529f56eSJonathan T. Looney #ifndef INVARIANTS 21742529f56eSJonathan T. Looney } 21752529f56eSJonathan T. Looney #endif 21762529f56eSJonathan T. Looney 21772529f56eSJonathan T. Looney /* 21782529f56eSJonathan T. Looney * Copy traditional sooptcopyout() behavior: if sopt->sopt_val 21792529f56eSJonathan T. Looney * is NULL, silently skip the copy. However, in this case, we 21802529f56eSJonathan T. Looney * will leave the list alone and return. Functionally, this 21812529f56eSJonathan T. Looney * gives userspace a way to poll for an approximate buffer 21822529f56eSJonathan T. Looney * size they will need to get the log entries. 21832529f56eSJonathan T. Looney */ 21842529f56eSJonathan T. Looney if (sopt->sopt_val == NULL) { 21852529f56eSJonathan T. Looney INP_WUNLOCK(inp); 21862529f56eSJonathan T. Looney if (outsize == 0) { 21872529f56eSJonathan T. Looney outsize = outnum * (sizeof(struct tcp_log_buffer) + 21882529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose)); 21892529f56eSJonathan T. Looney } 21902529f56eSJonathan T. Looney if (sopt->sopt_valsize > outsize) 21912529f56eSJonathan T. Looney sopt->sopt_valsize = outsize; 21922529f56eSJonathan T. Looney return (0); 21932529f56eSJonathan T. Looney } 21942529f56eSJonathan T. Looney 21952529f56eSJonathan T. Looney /* 21962529f56eSJonathan T. Looney * Break apart the list. We'll save the ones we want to copy 21972529f56eSJonathan T. Looney * out locally and remove them from the TCPCB list. We can 21982529f56eSJonathan T. Looney * then drop the INPCB lock while we do the copyout. 21992529f56eSJonathan T. Looney * 22002529f56eSJonathan T. Looney * There are roughly three cases: 22012529f56eSJonathan T. Looney * 1. There was nothing to copy out. That's easy: drop the 22022529f56eSJonathan T. Looney * lock and return. 22032529f56eSJonathan T. Looney * 2. We are copying out the entire list. Again, that's easy: 22042529f56eSJonathan T. Looney * move the whole list. 22052529f56eSJonathan T. Looney * 3. We are copying out a partial list. That's harder. We 22062529f56eSJonathan T. Looney * need to update the list book-keeping entries. 22072529f56eSJonathan T. Looney */ 22082529f56eSJonathan T. Looney if (log_entry != NULL && log_next == NULL) { 22092529f56eSJonathan T. Looney /* Move entire list. */ 22102529f56eSJonathan T. Looney KASSERT(outnum == tp->t_lognum, 22112529f56eSJonathan T. Looney ("%s:%d: outnum (%d) should match tp->t_lognum (%d)", 22122529f56eSJonathan T. Looney __func__, __LINE__, outnum, tp->t_lognum)); 22132529f56eSJonathan T. Looney log_tailq = tp->t_logs; 22142529f56eSJonathan T. Looney tp->t_lognum = 0; 22152529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs); 22162529f56eSJonathan T. Looney } else if (log_entry != NULL) { 22172529f56eSJonathan T. Looney /* Move partial list. */ 22182529f56eSJonathan T. Looney KASSERT(outnum < tp->t_lognum, 22192529f56eSJonathan T. Looney ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)", 22202529f56eSJonathan T. Looney __func__, __LINE__, outnum, tp->t_lognum)); 22212529f56eSJonathan T. Looney STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs); 22222529f56eSJonathan T. Looney STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue); 22232529f56eSJonathan T. Looney KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL, 22242529f56eSJonathan T. Looney ("%s:%d: tp->t_logs is unexpectedly shorter than expected" 22252529f56eSJonathan T. Looney "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)", 22262529f56eSJonathan T. Looney __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum)); 22272529f56eSJonathan T. Looney STAILQ_NEXT(log_entry, tlm_queue) = NULL; 22282529f56eSJonathan T. Looney log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue); 22292529f56eSJonathan T. Looney tp->t_lognum -= outnum; 22302529f56eSJonathan T. Looney } else 22312529f56eSJonathan T. Looney STAILQ_INIT(&log_tailq); 22322529f56eSJonathan T. Looney 22332529f56eSJonathan T. Looney /* Drop the PCB lock. */ 22342529f56eSJonathan T. Looney INP_WUNLOCK(inp); 22352529f56eSJonathan T. Looney 22362529f56eSJonathan T. Looney /* Copy the data out. */ 22372529f56eSJonathan T. Looney error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum); 22382529f56eSJonathan T. Looney 22392529f56eSJonathan T. Looney if (error) { 22402529f56eSJonathan T. Looney /* Restore list */ 22412529f56eSJonathan T. Looney INP_WLOCK(inp); 224253af6903SGleb Smirnoff if ((inp->inp_flags & INP_DROPPED) == 0) { 22432529f56eSJonathan T. Looney tp = intotcpcb(inp); 22442529f56eSJonathan T. Looney 22452529f56eSJonathan T. Looney /* Merge the two lists. */ 22462529f56eSJonathan T. Looney STAILQ_CONCAT(&log_tailq, &tp->t_logs); 22472529f56eSJonathan T. Looney tp->t_logs = log_tailq; 22482529f56eSJonathan T. Looney tp->t_lognum += outnum; 22492529f56eSJonathan T. Looney } 22502529f56eSJonathan T. Looney INP_WUNLOCK(inp); 22512529f56eSJonathan T. Looney } else { 22522529f56eSJonathan T. Looney /* Sanity check entries */ 22532529f56eSJonathan T. Looney KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) == 22542529f56eSJonathan T. Looney outsize, ("%s: Actual output size (%zu) != " 22552529f56eSJonathan T. Looney "calculated output size (%zu)", __func__, 22562529f56eSJonathan T. Looney (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val), 22572529f56eSJonathan T. Looney outsize)); 22582529f56eSJonathan T. Looney 22592529f56eSJonathan T. Looney /* Free the entries we just copied out. */ 22602529f56eSJonathan T. Looney STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) { 22612529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry); 22622529f56eSJonathan T. Looney uma_zfree(tcp_log_zone, log_entry); 22632529f56eSJonathan T. Looney } 22642529f56eSJonathan T. Looney } 22652529f56eSJonathan T. Looney 22662529f56eSJonathan T. Looney sopt->sopt_valsize = (size_t)((caddr_t)out_entry - 22672529f56eSJonathan T. Looney (caddr_t)sopt->sopt_val); 22682529f56eSJonathan T. Looney return (error); 22692529f56eSJonathan T. Looney } 22702529f56eSJonathan T. Looney 22712529f56eSJonathan T. Looney static void 22722529f56eSJonathan T. Looney tcp_log_free_queue(struct tcp_log_dev_queue *param) 22732529f56eSJonathan T. Looney { 22742529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry; 22752529f56eSJonathan T. Looney 22762529f56eSJonathan T. Looney KASSERT(param != NULL, ("%s: called with NULL param", __func__)); 22772529f56eSJonathan T. Looney if (param == NULL) 22782529f56eSJonathan T. Looney return; 22792529f56eSJonathan T. Looney 22802529f56eSJonathan T. Looney entry = (struct tcp_log_dev_log_queue *)param; 22812529f56eSJonathan T. Looney 22822529f56eSJonathan T. Looney /* Free the entries. */ 22832529f56eSJonathan T. Looney tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 22842529f56eSJonathan T. Looney 22852529f56eSJonathan T. Looney /* Free the buffer, if it is allocated. */ 22862529f56eSJonathan T. Looney if (entry->tldl_common.tldq_buf != NULL) 22872529f56eSJonathan T. Looney free(entry->tldl_common.tldq_buf, M_TCPLOGDEV); 22882529f56eSJonathan T. Looney 22892529f56eSJonathan T. Looney /* Free the queue entry. */ 22902529f56eSJonathan T. Looney free(entry, M_TCPLOGDEV); 22912529f56eSJonathan T. Looney } 22922529f56eSJonathan T. Looney 22932529f56eSJonathan T. Looney static struct tcp_log_common_header * 22942529f56eSJonathan T. Looney tcp_log_expandlogbuf(struct tcp_log_dev_queue *param) 22952529f56eSJonathan T. Looney { 22962529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry; 22972529f56eSJonathan T. Looney struct tcp_log_header *hdr; 22982529f56eSJonathan T. Looney uint8_t *end; 22992529f56eSJonathan T. Looney struct sockopt sopt; 23002529f56eSJonathan T. Looney int error; 23012529f56eSJonathan T. Looney 23022529f56eSJonathan T. Looney entry = (struct tcp_log_dev_log_queue *)param; 23032529f56eSJonathan T. Looney 23042529f56eSJonathan T. Looney /* Take a worst-case guess at space needs. */ 23052529f56eSJonathan T. Looney sopt.sopt_valsize = sizeof(struct tcp_log_header) + 23062529f56eSJonathan T. Looney entry->tldl_count * (sizeof(struct tcp_log_buffer) + 23072529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose)); 23082529f56eSJonathan T. Looney hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT); 23092529f56eSJonathan T. Looney if (hdr == NULL) { 23102529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 23112529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail5, entry->tldl_count); 23122529f56eSJonathan T. Looney #endif 23132529f56eSJonathan T. Looney return (NULL); 23142529f56eSJonathan T. Looney } 23152529f56eSJonathan T. Looney sopt.sopt_val = hdr + 1; 23162529f56eSJonathan T. Looney sopt.sopt_valsize -= sizeof(struct tcp_log_header); 23172529f56eSJonathan T. Looney sopt.sopt_td = NULL; 23182529f56eSJonathan T. Looney 23192529f56eSJonathan T. Looney error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries, 23202529f56eSJonathan T. Looney (struct tcp_log_buffer **)&end, entry->tldl_count); 23212529f56eSJonathan T. Looney if (error) { 23222529f56eSJonathan T. Looney free(hdr, M_TCPLOGDEV); 23232529f56eSJonathan T. Looney return (NULL); 23242529f56eSJonathan T. Looney } 23252529f56eSJonathan T. Looney 23262529f56eSJonathan T. Looney /* Free the entries. */ 23272529f56eSJonathan T. Looney tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 23282529f56eSJonathan T. Looney entry->tldl_count = 0; 23292529f56eSJonathan T. Looney 23302529f56eSJonathan T. Looney memset(hdr, 0, sizeof(struct tcp_log_header)); 23312529f56eSJonathan T. Looney hdr->tlh_version = TCP_LOG_BUF_VER; 23322529f56eSJonathan T. Looney hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR; 23332529f56eSJonathan T. Looney hdr->tlh_length = end - (uint8_t *)hdr; 23342529f56eSJonathan T. Looney hdr->tlh_ie = entry->tldl_ie; 23352529f56eSJonathan T. Looney hdr->tlh_af = entry->tldl_af; 23362529f56eSJonathan T. Looney getboottime(&hdr->tlh_offset); 23372529f56eSJonathan T. Looney strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN); 2338a9a08eceSRandall Stewart strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN); 23392529f56eSJonathan T. Looney strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN); 23402529f56eSJonathan T. Looney return ((struct tcp_log_common_header *)hdr); 23412529f56eSJonathan T. Looney } 23422529f56eSJonathan T. Looney 23432529f56eSJonathan T. Looney /* 23442529f56eSJonathan T. Looney * Queue the tcpcb's log buffer for transmission via the log buffer facility. 23452529f56eSJonathan T. Looney * 23462529f56eSJonathan T. Looney * NOTE: This should be called with a write lock on the PCB. 23472529f56eSJonathan T. Looney * 23482529f56eSJonathan T. Looney * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 23492529f56eSJonathan T. Looney * and reacquire the INP lock if it needs to do so. 23502529f56eSJonathan T. Looney * 23512529f56eSJonathan T. Looney * If force is false, this will only dump auto-logged sessions if 23522529f56eSJonathan T. Looney * tcp_log_auto_all is true or if there is a log ID defined for the session. 23532529f56eSJonathan T. Looney */ 23542529f56eSJonathan T. Looney int 23552529f56eSJonathan T. Looney tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force) 23562529f56eSJonathan T. Looney { 23572529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry; 23589eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 23592529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 23602529f56eSJonathan T. Looney int num_entries; 23612529f56eSJonathan T. Looney #endif 23622529f56eSJonathan T. Looney 23632529f56eSJonathan T. Looney INP_WLOCK_ASSERT(inp); 23642529f56eSJonathan T. Looney 23652529f56eSJonathan T. Looney /* If there are no log entries, there is nothing to do. */ 23662529f56eSJonathan T. Looney if (tp->t_lognum == 0) 23672529f56eSJonathan T. Looney return (0); 23682529f56eSJonathan T. Looney 23692529f56eSJonathan T. Looney /* Check for a log ID. */ 23702529f56eSJonathan T. Looney if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 23712529f56eSJonathan T. Looney !tcp_log_auto_all && !force) { 23722529f56eSJonathan T. Looney struct tcp_log_mem *log_entry; 23732529f56eSJonathan T. Looney 23742529f56eSJonathan T. Looney /* 23752529f56eSJonathan T. Looney * We needed a log ID and none was found. Free the log entries 23762529f56eSJonathan T. Looney * and return success. Also, cancel further logging. If the 23772529f56eSJonathan T. Looney * session doesn't have a log ID by now, we'll assume it isn't 23782529f56eSJonathan T. Looney * going to get one. 23792529f56eSJonathan T. Looney */ 23802529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 23812529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry); 23822529f56eSJonathan T. Looney KASSERT(tp->t_lognum == 0, 23832529f56eSJonathan T. Looney ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 23842529f56eSJonathan T. Looney __func__, tp->t_lognum)); 238569c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF; 23862529f56eSJonathan T. Looney return (0); 23872529f56eSJonathan T. Looney } 23882529f56eSJonathan T. Looney 23892529f56eSJonathan T. Looney /* 23902529f56eSJonathan T. Looney * Allocate memory. If we must wait, we'll need to drop the locks 23912529f56eSJonathan T. Looney * and reacquire them (and do all the related business that goes 23922529f56eSJonathan T. Looney * along with that). 23932529f56eSJonathan T. Looney */ 23942529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 23952529f56eSJonathan T. Looney M_NOWAIT); 23962529f56eSJonathan T. Looney if (entry == NULL && (how & M_NOWAIT)) { 23972529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 23982529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail3, 1); 23992529f56eSJonathan T. Looney #endif 24002529f56eSJonathan T. Looney return (ENOBUFS); 24012529f56eSJonathan T. Looney } 24022529f56eSJonathan T. Looney if (entry == NULL) { 24032529f56eSJonathan T. Looney INP_WUNLOCK(inp); 24042529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue), 24052529f56eSJonathan T. Looney M_TCPLOGDEV, M_WAITOK); 24062529f56eSJonathan T. Looney INP_WLOCK(inp); 24072529f56eSJonathan T. Looney /* 24082529f56eSJonathan T. Looney * Note that this check is slightly overly-restrictive in 24092529f56eSJonathan T. Looney * that the TCB can survive either of these events. 24102529f56eSJonathan T. Looney * However, there is currently not a good way to ensure 24112529f56eSJonathan T. Looney * that is the case. So, if we hit this M_WAIT path, we 24122529f56eSJonathan T. Looney * may end up dropping some entries. That seems like a 24132529f56eSJonathan T. Looney * small price to pay for safety. 24142529f56eSJonathan T. Looney */ 241553af6903SGleb Smirnoff if (inp->inp_flags & INP_DROPPED) { 24162529f56eSJonathan T. Looney free(entry, M_TCPLOGDEV); 24172529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 24182529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail2, 1); 24192529f56eSJonathan T. Looney #endif 24202529f56eSJonathan T. Looney return (ECONNRESET); 24212529f56eSJonathan T. Looney } 24222529f56eSJonathan T. Looney tp = intotcpcb(inp); 24232529f56eSJonathan T. Looney if (tp->t_lognum == 0) { 24242529f56eSJonathan T. Looney free(entry, M_TCPLOGDEV); 24252529f56eSJonathan T. Looney return (0); 24262529f56eSJonathan T. Looney } 24272529f56eSJonathan T. Looney } 24282529f56eSJonathan T. Looney 24292529f56eSJonathan T. Looney /* Fill in the unique parts of the queue entry. */ 2430a9a08eceSRandall Stewart if (tp->t_lib != NULL) { 24312529f56eSJonathan T. Looney strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 2432a9a08eceSRandall Stewart strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN); 2433a9a08eceSRandall Stewart } else { 24342529f56eSJonathan T. Looney strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN); 2435a9a08eceSRandall Stewart strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN); 2436a9a08eceSRandall Stewart } 24372529f56eSJonathan T. Looney if (reason != NULL) 24382529f56eSJonathan T. Looney strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 24392529f56eSJonathan T. Looney else 24402529f56eSJonathan T. Looney strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 24412529f56eSJonathan T. Looney entry->tldl_ie = inp->inp_inc.inc_ie; 24422529f56eSJonathan T. Looney if (inp->inp_inc.inc_flags & INC_ISIPV6) 24432529f56eSJonathan T. Looney entry->tldl_af = AF_INET6; 24442529f56eSJonathan T. Looney else 24452529f56eSJonathan T. Looney entry->tldl_af = AF_INET; 24462529f56eSJonathan T. Looney entry->tldl_entries = tp->t_logs; 24472529f56eSJonathan T. Looney entry->tldl_count = tp->t_lognum; 24482529f56eSJonathan T. Looney 24492529f56eSJonathan T. Looney /* Fill in the common parts of the queue entry. */ 24502529f56eSJonathan T. Looney entry->tldl_common.tldq_buf = NULL; 24512529f56eSJonathan T. Looney entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 24522529f56eSJonathan T. Looney entry->tldl_common.tldq_dtor = tcp_log_free_queue; 24532529f56eSJonathan T. Looney 24542529f56eSJonathan T. Looney /* Clear the log data from the TCPCB. */ 24552529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 24562529f56eSJonathan T. Looney num_entries = tp->t_lognum; 24572529f56eSJonathan T. Looney #endif 24582529f56eSJonathan T. Looney tp->t_lognum = 0; 24592529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs); 24602529f56eSJonathan T. Looney 24612529f56eSJonathan T. Looney /* Add the entry. If no one is listening, free the entry. */ 24622529f56eSJonathan T. Looney if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) { 24632529f56eSJonathan T. Looney tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 24642529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS 24652529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail1, num_entries); 24662529f56eSJonathan T. Looney } else { 24672529f56eSJonathan T. Looney counter_u64_add(tcp_log_queued, num_entries); 24682529f56eSJonathan T. Looney #endif 24692529f56eSJonathan T. Looney } 24702529f56eSJonathan T. Looney return (0); 24712529f56eSJonathan T. Looney } 24722529f56eSJonathan T. Looney 24732529f56eSJonathan T. Looney /* 24742529f56eSJonathan T. Looney * Queue the log_id_node's log buffers for transmission via the log buffer 24752529f56eSJonathan T. Looney * facility. 24762529f56eSJonathan T. Looney * 24772529f56eSJonathan T. Looney * NOTE: This should be called with the bucket locked and referenced. 24782529f56eSJonathan T. Looney * 24792529f56eSJonathan T. Looney * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 24802529f56eSJonathan T. Looney * and reacquire the bucket lock if it needs to do so. (The caller must 24812529f56eSJonathan T. Looney * ensure that the tln is no longer on any lists so no one else will mess 24822529f56eSJonathan T. Looney * with this while the lock is dropped!) 24832529f56eSJonathan T. Looney */ 24842529f56eSJonathan T. Looney static int 24852529f56eSJonathan T. Looney tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how) 24862529f56eSJonathan T. Looney { 24872529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry; 24882529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb; 24892529f56eSJonathan T. Looney 24902529f56eSJonathan T. Looney tlb = tln->tln_bucket; 24912529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb); 24922529f56eSJonathan T. Looney KASSERT(tlb->tlb_refcnt > 0, 24932529f56eSJonathan T. Looney ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)", 24942529f56eSJonathan T. Looney __func__, __LINE__, tln, tlb)); 24952529f56eSJonathan T. Looney KASSERT(tln->tln_closed, 24962529f56eSJonathan T. Looney ("%s:%d: Called for node with tln_closed==false (tln=%p)", 24972529f56eSJonathan T. Looney __func__, __LINE__, tln)); 24982529f56eSJonathan T. Looney 24992529f56eSJonathan T. Looney /* If there are no log entries, there is nothing to do. */ 25002529f56eSJonathan T. Looney if (tln->tln_count == 0) 25012529f56eSJonathan T. Looney return (0); 25022529f56eSJonathan T. Looney 25032529f56eSJonathan T. Looney /* 25042529f56eSJonathan T. Looney * Allocate memory. If we must wait, we'll need to drop the locks 25052529f56eSJonathan T. Looney * and reacquire them (and do all the related business that goes 25062529f56eSJonathan T. Looney * along with that). 25072529f56eSJonathan T. Looney */ 25082529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 25092529f56eSJonathan T. Looney M_NOWAIT); 25102529f56eSJonathan T. Looney if (entry == NULL && (how & M_NOWAIT)) 25112529f56eSJonathan T. Looney return (ENOBUFS); 25122529f56eSJonathan T. Looney if (entry == NULL) { 25132529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 25142529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue), 25152529f56eSJonathan T. Looney M_TCPLOGDEV, M_WAITOK); 25162529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 25172529f56eSJonathan T. Looney } 25182529f56eSJonathan T. Looney 25192529f56eSJonathan T. Looney /* Fill in the common parts of the queue entry.. */ 25202529f56eSJonathan T. Looney entry->tldl_common.tldq_buf = NULL; 25212529f56eSJonathan T. Looney entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 25222529f56eSJonathan T. Looney entry->tldl_common.tldq_dtor = tcp_log_free_queue; 25232529f56eSJonathan T. Looney 25242529f56eSJonathan T. Looney /* Fill in the unique parts of the queue entry. */ 25252529f56eSJonathan T. Looney strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN); 2526a9a08eceSRandall Stewart strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN); 25272529f56eSJonathan T. Looney if (reason != NULL) 25282529f56eSJonathan T. Looney strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 25292529f56eSJonathan T. Looney else 25302529f56eSJonathan T. Looney strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 25312529f56eSJonathan T. Looney entry->tldl_ie = tln->tln_ie; 25322529f56eSJonathan T. Looney entry->tldl_entries = tln->tln_entries; 25332529f56eSJonathan T. Looney entry->tldl_count = tln->tln_count; 25342529f56eSJonathan T. Looney entry->tldl_af = tln->tln_af; 25352529f56eSJonathan T. Looney 25362529f56eSJonathan T. Looney /* Add the entry. If no one is listening, free the entry. */ 25372529f56eSJonathan T. Looney if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) 25382529f56eSJonathan T. Looney tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 25392529f56eSJonathan T. Looney 25402529f56eSJonathan T. Looney return (0); 25412529f56eSJonathan T. Looney } 25422529f56eSJonathan T. Looney 25432529f56eSJonathan T. Looney /* 25442529f56eSJonathan T. Looney * Queue the log buffers for all sessions in a bucket for transmissions via 25452529f56eSJonathan T. Looney * the log buffer facility. 25462529f56eSJonathan T. Looney * 25472529f56eSJonathan T. Looney * NOTE: This should be called with a locked bucket; however, the function 25482529f56eSJonathan T. Looney * will drop the lock. 25492529f56eSJonathan T. Looney */ 25502529f56eSJonathan T. Looney #define LOCAL_SAVE 10 25512529f56eSJonathan T. Looney static void 25522529f56eSJonathan T. Looney tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason) 25532529f56eSJonathan T. Looney { 25542529f56eSJonathan T. Looney struct tcp_log_id_node local_entries[LOCAL_SAVE]; 25552529f56eSJonathan T. Looney struct inpcb *inp; 25562529f56eSJonathan T. Looney struct tcpcb *tp; 25572529f56eSJonathan T. Looney struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln; 25582529f56eSJonathan T. Looney int i, num_local_entries, tree_locked; 25592529f56eSJonathan T. Looney bool expireq_locked; 25602529f56eSJonathan T. Looney 25612529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb); 25622529f56eSJonathan T. Looney 25632529f56eSJonathan T. Looney /* 25642529f56eSJonathan T. Looney * Take a reference on the bucket to keep it from disappearing until 25652529f56eSJonathan T. Looney * we are done. 25662529f56eSJonathan T. Looney */ 25672529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb); 25682529f56eSJonathan T. Looney 25692529f56eSJonathan T. Looney /* 25702529f56eSJonathan T. Looney * We'll try to create these without dropping locks. However, we 25712529f56eSJonathan T. Looney * might very well need to drop locks to get memory. If that's the 25722529f56eSJonathan T. Looney * case, we'll save up to 10 on the stack, and sacrifice the rest. 25732529f56eSJonathan T. Looney * (Otherwise, we need to worry about finding our place again in a 25742529f56eSJonathan T. Looney * potentially changed list. It just doesn't seem worth the trouble 25752529f56eSJonathan T. Looney * to do that. 25762529f56eSJonathan T. Looney */ 25772529f56eSJonathan T. Looney expireq_locked = false; 25782529f56eSJonathan T. Looney num_local_entries = 0; 25792529f56eSJonathan T. Looney prev_tln = NULL; 25802529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 25812529f56eSJonathan T. Looney SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) { 25822529f56eSJonathan T. Looney /* 25832529f56eSJonathan T. Looney * If this isn't associated with a TCPCB, we can pull it off 25842529f56eSJonathan T. Looney * the list now. We need to be careful that the expire timer 25852529f56eSJonathan T. Looney * hasn't already taken ownership (tln_expiretime == SBT_MAX). 25862529f56eSJonathan T. Looney * If so, we let the expire timer code free the data. 25872529f56eSJonathan T. Looney */ 25882529f56eSJonathan T. Looney if (cur_tln->tln_closed) { 25892529f56eSJonathan T. Looney no_inp: 25902529f56eSJonathan T. Looney /* 25912529f56eSJonathan T. Looney * Get the expireq lock so we can get a consistent 25922529f56eSJonathan T. Looney * read of tln_expiretime and so we can remove this 25932529f56eSJonathan T. Looney * from the expireq. 25942529f56eSJonathan T. Looney */ 25952529f56eSJonathan T. Looney if (!expireq_locked) { 25962529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK(); 25972529f56eSJonathan T. Looney expireq_locked = true; 25982529f56eSJonathan T. Looney } 25992529f56eSJonathan T. Looney 26002529f56eSJonathan T. Looney /* 26012529f56eSJonathan T. Looney * We ignore entries with tln_expiretime == SBT_MAX. 26022529f56eSJonathan T. Looney * The expire timer code already owns those. 26032529f56eSJonathan T. Looney */ 26042529f56eSJonathan T. Looney KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0, 26052529f56eSJonathan T. Looney ("%s:%d: node on the expire queue without positive " 26062529f56eSJonathan T. Looney "expire time", __func__, __LINE__)); 26072529f56eSJonathan T. Looney if (cur_tln->tln_expiretime == SBT_MAX) { 26082529f56eSJonathan T. Looney prev_tln = cur_tln; 26092529f56eSJonathan T. Looney continue; 26102529f56eSJonathan T. Looney } 26112529f56eSJonathan T. Looney 26122529f56eSJonathan T. Looney /* Remove the entry from the expireq. */ 26132529f56eSJonathan T. Looney STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln, 26142529f56eSJonathan T. Looney tcp_log_id_node, tln_expireq); 26152529f56eSJonathan T. Looney 26162529f56eSJonathan T. Looney /* Remove the entry from the bucket. */ 26172529f56eSJonathan T. Looney if (prev_tln != NULL) 26182529f56eSJonathan T. Looney SLIST_REMOVE_AFTER(prev_tln, tln_list); 26192529f56eSJonathan T. Looney else 26202529f56eSJonathan T. Looney SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list); 26212529f56eSJonathan T. Looney 26222529f56eSJonathan T. Looney /* 26232529f56eSJonathan T. Looney * Drop the INP and bucket reference counts. Due to 26242529f56eSJonathan T. Looney * lock-ordering rules, we need to drop the expire 26252529f56eSJonathan T. Looney * queue lock. 26262529f56eSJonathan T. Looney */ 26272529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 26282529f56eSJonathan T. Looney expireq_locked = false; 26292529f56eSJonathan T. Looney 26302529f56eSJonathan T. Looney /* Drop the INP reference. */ 26312529f56eSJonathan T. Looney INP_WLOCK(cur_tln->tln_inp); 26322529f56eSJonathan T. Looney if (!in_pcbrele_wlocked(cur_tln->tln_inp)) 26332529f56eSJonathan T. Looney INP_WUNLOCK(cur_tln->tln_inp); 26342529f56eSJonathan T. Looney 26352529f56eSJonathan T. Looney if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 26362529f56eSJonathan T. Looney #ifdef INVARIANTS 26372529f56eSJonathan T. Looney panic("%s: Bucket refcount unexpectedly 0.", 26382529f56eSJonathan T. Looney __func__); 26392529f56eSJonathan T. Looney #endif 26402529f56eSJonathan T. Looney /* 26412529f56eSJonathan T. Looney * Recover as best we can: free the entry we 26422529f56eSJonathan T. Looney * own. 26432529f56eSJonathan T. Looney */ 26442529f56eSJonathan T. Looney tcp_log_free_entries(&cur_tln->tln_entries, 26452529f56eSJonathan T. Looney &cur_tln->tln_count); 26468c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, cur_tln); 26472529f56eSJonathan T. Looney goto done; 26482529f56eSJonathan T. Looney } 26492529f56eSJonathan T. Looney 26502529f56eSJonathan T. Looney if (tcp_log_dump_node_logbuf(cur_tln, reason, 26512529f56eSJonathan T. Looney M_NOWAIT)) { 26522529f56eSJonathan T. Looney /* 26532529f56eSJonathan T. Looney * If we have sapce, save the entries locally. 26542529f56eSJonathan T. Looney * Otherwise, free them. 26552529f56eSJonathan T. Looney */ 26562529f56eSJonathan T. Looney if (num_local_entries < LOCAL_SAVE) { 26572529f56eSJonathan T. Looney local_entries[num_local_entries] = 26582529f56eSJonathan T. Looney *cur_tln; 26592529f56eSJonathan T. Looney num_local_entries++; 26602529f56eSJonathan T. Looney } else { 26612529f56eSJonathan T. Looney tcp_log_free_entries( 26622529f56eSJonathan T. Looney &cur_tln->tln_entries, 26632529f56eSJonathan T. Looney &cur_tln->tln_count); 26642529f56eSJonathan T. Looney } 26652529f56eSJonathan T. Looney } 26662529f56eSJonathan T. Looney 26672529f56eSJonathan T. Looney /* No matter what, we are done with the node now. */ 26688c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, cur_tln); 26692529f56eSJonathan T. Looney 26702529f56eSJonathan T. Looney /* 26712529f56eSJonathan T. Looney * Because we removed this entry from the list, prev_tln 26722529f56eSJonathan T. Looney * (which tracks the previous entry still on the tlb 26732529f56eSJonathan T. Looney * list) remains unchanged. 26742529f56eSJonathan T. Looney */ 26752529f56eSJonathan T. Looney continue; 26762529f56eSJonathan T. Looney } 26772529f56eSJonathan T. Looney 26782529f56eSJonathan T. Looney /* 26792529f56eSJonathan T. Looney * If we get to this point, the session data is still held in 26802529f56eSJonathan T. Looney * the TCPCB. So, we need to pull the data out of that. 26812529f56eSJonathan T. Looney * 26822529f56eSJonathan T. Looney * We will need to drop the expireq lock so we can lock the INP. 26832529f56eSJonathan T. Looney * We can then try to extract the data the "easy" way. If that 26842529f56eSJonathan T. Looney * fails, we'll save the log entries for later. 26852529f56eSJonathan T. Looney */ 26862529f56eSJonathan T. Looney if (expireq_locked) { 26872529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 26882529f56eSJonathan T. Looney expireq_locked = false; 26892529f56eSJonathan T. Looney } 26902529f56eSJonathan T. Looney 26912529f56eSJonathan T. Looney /* Lock the INP and then re-check the state. */ 26922529f56eSJonathan T. Looney inp = cur_tln->tln_inp; 26932529f56eSJonathan T. Looney INP_WLOCK(inp); 26942529f56eSJonathan T. Looney /* 26952529f56eSJonathan T. Looney * If we caught this while it was transitioning, the data 26962529f56eSJonathan T. Looney * might have moved from the TCPCB to the tln (signified by 26972529f56eSJonathan T. Looney * setting tln_closed to true. If so, treat this like an 26982529f56eSJonathan T. Looney * inactive connection. 26992529f56eSJonathan T. Looney */ 27002529f56eSJonathan T. Looney if (cur_tln->tln_closed) { 27012529f56eSJonathan T. Looney /* 27022529f56eSJonathan T. Looney * It looks like we may have caught this connection 27032529f56eSJonathan T. Looney * while it was transitioning from active to inactive. 27042529f56eSJonathan T. Looney * Treat this like an inactive connection. 27052529f56eSJonathan T. Looney */ 27062529f56eSJonathan T. Looney INP_WUNLOCK(inp); 27072529f56eSJonathan T. Looney goto no_inp; 27082529f56eSJonathan T. Looney } 27092529f56eSJonathan T. Looney 27102529f56eSJonathan T. Looney /* 27112529f56eSJonathan T. Looney * Try to dump the data from the tp without dropping the lock. 27122529f56eSJonathan T. Looney * If this fails, try to save off the data locally. 27132529f56eSJonathan T. Looney */ 27142529f56eSJonathan T. Looney tp = cur_tln->tln_tp; 27152529f56eSJonathan T. Looney if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) && 27162529f56eSJonathan T. Looney num_local_entries < LOCAL_SAVE) { 27172529f56eSJonathan T. Looney tcp_log_move_tp_to_node(tp, 27182529f56eSJonathan T. Looney &local_entries[num_local_entries]); 27192529f56eSJonathan T. Looney local_entries[num_local_entries].tln_closed = 1; 27202529f56eSJonathan T. Looney KASSERT(local_entries[num_local_entries].tln_bucket == 27212529f56eSJonathan T. Looney tlb, ("%s: %d: bucket mismatch for node %p", 27222529f56eSJonathan T. Looney __func__, __LINE__, cur_tln)); 27232529f56eSJonathan T. Looney num_local_entries++; 27242529f56eSJonathan T. Looney } 27252529f56eSJonathan T. Looney 27262529f56eSJonathan T. Looney INP_WUNLOCK(inp); 27272529f56eSJonathan T. Looney 27282529f56eSJonathan T. Looney /* 27292529f56eSJonathan T. Looney * We are goint to leave the current tln on the list. It will 27302529f56eSJonathan T. Looney * become the previous tln. 27312529f56eSJonathan T. Looney */ 27322529f56eSJonathan T. Looney prev_tln = cur_tln; 27332529f56eSJonathan T. Looney } 27342529f56eSJonathan T. Looney 27352529f56eSJonathan T. Looney /* Drop our locks, if any. */ 27362529f56eSJonathan T. Looney KASSERT(tree_locked == TREE_UNLOCKED, 27372529f56eSJonathan T. Looney ("%s: %d: tree unexpectedly locked", __func__, __LINE__)); 27382529f56eSJonathan T. Looney switch (tree_locked) { 27392529f56eSJonathan T. Looney case TREE_WLOCKED: 27402529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK(); 27412529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 27422529f56eSJonathan T. Looney break; 27432529f56eSJonathan T. Looney case TREE_RLOCKED: 27442529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 27452529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 27462529f56eSJonathan T. Looney break; 27472529f56eSJonathan T. Looney } 27482529f56eSJonathan T. Looney if (expireq_locked) { 27492529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 27502529f56eSJonathan T. Looney expireq_locked = false; 27512529f56eSJonathan T. Looney } 27522529f56eSJonathan T. Looney 27532529f56eSJonathan T. Looney /* 27542529f56eSJonathan T. Looney * Try again for any saved entries. tcp_log_dump_node_logbuf() is 27552529f56eSJonathan T. Looney * guaranteed to free the log entries within the node. And, since 27562529f56eSJonathan T. Looney * the node itself is on our stack, we don't need to free it. 27572529f56eSJonathan T. Looney */ 27582529f56eSJonathan T. Looney for (i = 0; i < num_local_entries; i++) 27592529f56eSJonathan T. Looney tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK); 27602529f56eSJonathan T. Looney 27612529f56eSJonathan T. Looney /* Drop our reference. */ 27622529f56eSJonathan T. Looney if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 27632529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb); 27642529f56eSJonathan T. Looney 27652529f56eSJonathan T. Looney done: 27662529f56eSJonathan T. Looney /* Drop our locks, if any. */ 27672529f56eSJonathan T. Looney switch (tree_locked) { 27682529f56eSJonathan T. Looney case TREE_WLOCKED: 27692529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK(); 27702529f56eSJonathan T. Looney break; 27712529f56eSJonathan T. Looney case TREE_RLOCKED: 27722529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 27732529f56eSJonathan T. Looney break; 27742529f56eSJonathan T. Looney } 27752529f56eSJonathan T. Looney if (expireq_locked) 27762529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK(); 27772529f56eSJonathan T. Looney } 27782529f56eSJonathan T. Looney #undef LOCAL_SAVE 27792529f56eSJonathan T. Looney 27802529f56eSJonathan T. Looney /* 27812529f56eSJonathan T. Looney * Queue the log buffers for all sessions in a bucket for transmissions via 27822529f56eSJonathan T. Looney * the log buffer facility. 27832529f56eSJonathan T. Looney * 27842529f56eSJonathan T. Looney * NOTE: This should be called with a locked INP; however, the function 27852529f56eSJonathan T. Looney * will drop the lock. 27862529f56eSJonathan T. Looney */ 27872529f56eSJonathan T. Looney void 27882529f56eSJonathan T. Looney tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason) 27892529f56eSJonathan T. Looney { 27909eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp); 27912529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb; 27922529f56eSJonathan T. Looney int tree_locked; 27932529f56eSJonathan T. Looney 27942529f56eSJonathan T. Looney /* Figure out our bucket and lock it. */ 27959eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp); 27962529f56eSJonathan T. Looney tlb = tp->t_lib; 27972529f56eSJonathan T. Looney if (tlb == NULL) { 27982529f56eSJonathan T. Looney /* 27992529f56eSJonathan T. Looney * No bucket; treat this like a request to dump a single 28002529f56eSJonathan T. Looney * session's traces. 28012529f56eSJonathan T. Looney */ 28022529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true); 28039eb0e832SGleb Smirnoff INP_WUNLOCK(inp); 28042529f56eSJonathan T. Looney return; 28052529f56eSJonathan T. Looney } 28062529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb); 28079eb0e832SGleb Smirnoff INP_WUNLOCK(inp); 28082529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb); 28092529f56eSJonathan T. Looney 28102529f56eSJonathan T. Looney /* If we are the last reference, we have nothing more to do here. */ 28112529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED; 28122529f56eSJonathan T. Looney if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 28132529f56eSJonathan T. Looney switch (tree_locked) { 28142529f56eSJonathan T. Looney case TREE_WLOCKED: 28152529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK(); 28162529f56eSJonathan T. Looney break; 28172529f56eSJonathan T. Looney case TREE_RLOCKED: 28182529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK(); 28192529f56eSJonathan T. Looney break; 28202529f56eSJonathan T. Looney } 28212529f56eSJonathan T. Looney return; 28222529f56eSJonathan T. Looney } 28232529f56eSJonathan T. Looney 28242529f56eSJonathan T. Looney /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */ 28252529f56eSJonathan T. Looney tcp_log_dumpbucketlogs(tlb, reason); 28262529f56eSJonathan T. Looney } 28272529f56eSJonathan T. Looney 28282529f56eSJonathan T. Looney /* 28292529f56eSJonathan T. Looney * Mark the end of a flow with the current stack. A stack can add 28302529f56eSJonathan T. Looney * stack-specific info to this trace event by overriding this 28312529f56eSJonathan T. Looney * function (see bbr_log_flowend() for example). 28322529f56eSJonathan T. Looney */ 28332529f56eSJonathan T. Looney void 28342529f56eSJonathan T. Looney tcp_log_flowend(struct tcpcb *tp) 28352529f56eSJonathan T. Looney { 283669c7c811SRandall Stewart if (tp->_t_logstate != TCP_LOG_STATE_OFF) { 28379eb0e832SGleb Smirnoff struct socket *so = tptosocket(tp); 28382529f56eSJonathan T. Looney TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, 28392529f56eSJonathan T. Looney TCP_LOG_FLOWEND, 0, 0, NULL, false); 28402529f56eSJonathan T. Looney } 28412529f56eSJonathan T. Looney } 284269c7c811SRandall Stewart 284369c7c811SRandall Stewart void 284469c7c811SRandall Stewart tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags) 284569c7c811SRandall Stewart { 284669c7c811SRandall Stewart struct inpcb *inp; 284769c7c811SRandall Stewart struct tcpcb *tp; 2848*73ee5756SRandall Stewart #ifdef TCP_REQUEST_TRK 2849*73ee5756SRandall Stewart struct http_sendfile_track *ent; 2850*73ee5756SRandall Stewart int i, fnd; 2851*73ee5756SRandall Stewart #endif 285269c7c811SRandall Stewart 285369c7c811SRandall Stewart inp = sotoinpcb(so); 285469c7c811SRandall Stewart KASSERT(inp != NULL, ("tcp_log_sendfile: inp == NULL")); 285569c7c811SRandall Stewart 285669c7c811SRandall Stewart /* quick check to see if logging is enabled for this connection */ 285769c7c811SRandall Stewart tp = intotcpcb(inp); 285869c7c811SRandall Stewart if ((inp->inp_flags & INP_DROPPED) || 285969c7c811SRandall Stewart (tp->_t_logstate == TCP_LOG_STATE_OFF)) { 286069c7c811SRandall Stewart return; 286169c7c811SRandall Stewart } 286269c7c811SRandall Stewart 286369c7c811SRandall Stewart INP_WLOCK(inp); 286469c7c811SRandall Stewart /* double check log state now that we have the lock */ 286569c7c811SRandall Stewart if (inp->inp_flags & INP_DROPPED) 286669c7c811SRandall Stewart goto done; 286769c7c811SRandall Stewart if (tp->_t_logstate != TCP_LOG_STATE_OFF) { 286869c7c811SRandall Stewart struct timeval tv; 286969c7c811SRandall Stewart tcp_log_eventspecific_t log; 287069c7c811SRandall Stewart 287169c7c811SRandall Stewart microuptime(&tv); 287269c7c811SRandall Stewart log.u_sf.offset = offset; 287369c7c811SRandall Stewart log.u_sf.length = nbytes; 287469c7c811SRandall Stewart log.u_sf.flags = flags; 287569c7c811SRandall Stewart 287669c7c811SRandall Stewart TCP_LOG_EVENTP(tp, NULL, 287769c7c811SRandall Stewart &tptosocket(tp)->so_rcv, 287869c7c811SRandall Stewart &tptosocket(tp)->so_snd, 287969c7c811SRandall Stewart TCP_LOG_SENDFILE, 0, 0, &log, false, &tv); 288069c7c811SRandall Stewart } 2881*73ee5756SRandall Stewart #ifdef TCP_REQUEST_TRK 2882*73ee5756SRandall Stewart if (tp->t_http_req == 0) { 2883*73ee5756SRandall Stewart /* No http requests to track */ 2884*73ee5756SRandall Stewart goto done; 2885*73ee5756SRandall Stewart } 2886*73ee5756SRandall Stewart fnd = 0; 2887*73ee5756SRandall Stewart if (tp->t_http_closed == 0) { 2888*73ee5756SRandall Stewart /* No closed end req to track */ 2889*73ee5756SRandall Stewart goto skip_closed_req; 2890*73ee5756SRandall Stewart } 2891*73ee5756SRandall Stewart for(i = 0; i < MAX_TCP_HTTP_REQ; i++) { 2892*73ee5756SRandall Stewart /* Lets see if this one can be found */ 2893*73ee5756SRandall Stewart ent = &tp->t_http_info[i]; 2894*73ee5756SRandall Stewart if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) { 2895*73ee5756SRandall Stewart /* Not used */ 2896*73ee5756SRandall Stewart continue; 2897*73ee5756SRandall Stewart } 2898*73ee5756SRandall Stewart if (ent->flags & TCP_HTTP_TRACK_FLG_OPEN) { 2899*73ee5756SRandall Stewart /* This pass does not consider open requests */ 2900*73ee5756SRandall Stewart continue; 2901*73ee5756SRandall Stewart } 2902*73ee5756SRandall Stewart if (ent->flags & TCP_HTTP_TRACK_FLG_COMP) { 2903*73ee5756SRandall Stewart /* Don't look at what we have completed */ 2904*73ee5756SRandall Stewart continue; 2905*73ee5756SRandall Stewart } 2906*73ee5756SRandall Stewart /* If we reach here its a allocated closed end request */ 2907*73ee5756SRandall Stewart if ((ent->start == offset) || 2908*73ee5756SRandall Stewart ((offset > ent->start) && (offset < ent->end))){ 2909*73ee5756SRandall Stewart /* Its within this request?? */ 2910*73ee5756SRandall Stewart fnd = 1; 2911*73ee5756SRandall Stewart } 2912*73ee5756SRandall Stewart if (fnd) { 2913*73ee5756SRandall Stewart /* 2914*73ee5756SRandall Stewart * It is at or past the end, its complete. 2915*73ee5756SRandall Stewart */ 2916*73ee5756SRandall Stewart ent->flags |= TCP_HTTP_TRACK_FLG_SEQV; 2917*73ee5756SRandall Stewart /* 2918*73ee5756SRandall Stewart * When an entry completes we can take (snd_una + sb_cc) and know where 2919*73ee5756SRandall Stewart * the end of the range really is. Note that this works since two 2920*73ee5756SRandall Stewart * requests must be sequential and sendfile now is complete for *this* request. 2921*73ee5756SRandall Stewart * we must use sb_ccc since the data may still be in-flight in TLS. 2922*73ee5756SRandall Stewart * 2923*73ee5756SRandall Stewart * We always cautiously move the end_seq only if our calculations 2924*73ee5756SRandall Stewart * show it happened (just in case sf has the call to here at the wrong 2925*73ee5756SRandall Stewart * place). When we go COMP we will stop coming here and hopefully be 2926*73ee5756SRandall Stewart * left with the correct end_seq. 2927*73ee5756SRandall Stewart */ 2928*73ee5756SRandall Stewart if (SEQ_GT((tp->snd_una + so->so_snd.sb_ccc), ent->end_seq)) 2929*73ee5756SRandall Stewart ent->end_seq = tp->snd_una + so->so_snd.sb_ccc; 2930*73ee5756SRandall Stewart if ((offset + nbytes) >= ent->end) { 2931*73ee5756SRandall Stewart ent->flags |= TCP_HTTP_TRACK_FLG_COMP; 2932*73ee5756SRandall Stewart tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_COMPLETE, offset, nbytes); 2933*73ee5756SRandall Stewart } else { 2934*73ee5756SRandall Stewart tcp_http_log_req_info(tp, ent, i, TCP_HTTP_REQ_LOG_MOREYET, offset, nbytes); 2935*73ee5756SRandall Stewart } 2936*73ee5756SRandall Stewart /* We assume that sendfile never sends overlapping requests */ 2937*73ee5756SRandall Stewart goto done; 2938*73ee5756SRandall Stewart } 2939*73ee5756SRandall Stewart } 2940*73ee5756SRandall Stewart skip_closed_req: 2941*73ee5756SRandall Stewart if (!fnd) { 2942*73ee5756SRandall Stewart /* Ok now lets look for open requests */ 2943*73ee5756SRandall Stewart for(i = 0; i < MAX_TCP_HTTP_REQ; i++) { 2944*73ee5756SRandall Stewart ent = &tp->t_http_info[i]; 2945*73ee5756SRandall Stewart if (ent->flags == TCP_HTTP_TRACK_FLG_EMPTY) { 2946*73ee5756SRandall Stewart /* Not used */ 2947*73ee5756SRandall Stewart continue; 2948*73ee5756SRandall Stewart } 2949*73ee5756SRandall Stewart if ((ent->flags & TCP_HTTP_TRACK_FLG_OPEN) == 0) 2950*73ee5756SRandall Stewart continue; 2951*73ee5756SRandall Stewart /* If we reach here its an allocated open request */ 2952*73ee5756SRandall Stewart if (ent->start == offset) { 2953*73ee5756SRandall Stewart /* It begins this request */ 2954*73ee5756SRandall Stewart ent->start_seq = tp->snd_una + 2955*73ee5756SRandall Stewart tptosocket(tp)->so_snd.sb_ccc; 2956*73ee5756SRandall Stewart ent->flags |= TCP_HTTP_TRACK_FLG_SEQV; 2957*73ee5756SRandall Stewart break; 2958*73ee5756SRandall Stewart } else if (offset > ent->start) { 2959*73ee5756SRandall Stewart ent->flags |= TCP_HTTP_TRACK_FLG_SEQV; 2960*73ee5756SRandall Stewart break; 2961*73ee5756SRandall Stewart } 2962*73ee5756SRandall Stewart } 2963*73ee5756SRandall Stewart } 2964*73ee5756SRandall Stewart #endif 296569c7c811SRandall Stewart done: 296669c7c811SRandall Stewart INP_WUNLOCK(inp); 296769c7c811SRandall Stewart } 2968