157a3a161SRandall Stewart
22529f56eSJonathan T. Looney /*-
34d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
42529f56eSJonathan T. Looney *
552467047SWarner Losh * Copyright (c) 2016-2018 Netflix, Inc.
62529f56eSJonathan T. Looney *
72529f56eSJonathan T. Looney * Redistribution and use in source and binary forms, with or without
82529f56eSJonathan T. Looney * modification, are permitted provided that the following conditions
92529f56eSJonathan T. Looney * are met:
102529f56eSJonathan T. Looney * 1. Redistributions of source code must retain the above copyright
112529f56eSJonathan T. Looney * notice, this list of conditions and the following disclaimer.
122529f56eSJonathan T. Looney * 2. Redistributions in binary form must reproduce the above copyright
132529f56eSJonathan T. Looney * notice, this list of conditions and the following disclaimer in the
142529f56eSJonathan T. Looney * documentation and/or other materials provided with the distribution.
152529f56eSJonathan T. Looney *
162529f56eSJonathan T. Looney * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
172529f56eSJonathan T. Looney * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
182529f56eSJonathan T. Looney * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
192529f56eSJonathan T. Looney * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
202529f56eSJonathan T. Looney * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
212529f56eSJonathan T. Looney * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
222529f56eSJonathan T. Looney * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
232529f56eSJonathan T. Looney * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
242529f56eSJonathan T. Looney * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
252529f56eSJonathan T. Looney * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
262529f56eSJonathan T. Looney * SUCH DAMAGE.
272529f56eSJonathan T. Looney *
282529f56eSJonathan T. Looney */
292529f56eSJonathan T. Looney
302529f56eSJonathan T. Looney #include <sys/cdefs.h>
3169c7c811SRandall Stewart #include "opt_inet.h"
322529f56eSJonathan T. Looney #include <sys/param.h>
33adc56f5aSEdward Tomasz Napierala #include <sys/arb.h>
3469c7c811SRandall Stewart #include <sys/hash.h>
352529f56eSJonathan T. Looney #include <sys/kernel.h>
362529f56eSJonathan T. Looney #include <sys/lock.h>
372529f56eSJonathan T. Looney #include <sys/malloc.h>
382529f56eSJonathan T. Looney #include <sys/mutex.h>
39adc56f5aSEdward Tomasz Napierala #include <sys/qmath.h>
402529f56eSJonathan T. Looney #include <sys/queue.h>
412529f56eSJonathan T. Looney #include <sys/refcount.h>
422529f56eSJonathan T. Looney #include <sys/rwlock.h>
432529f56eSJonathan T. Looney #include <sys/socket.h>
442529f56eSJonathan T. Looney #include <sys/socketvar.h>
452529f56eSJonathan T. Looney #include <sys/sysctl.h>
462529f56eSJonathan T. Looney #include <sys/tree.h>
47a9a08eceSRandall Stewart #include <sys/stats.h> /* Must come after qmath.h and tree.h */
482529f56eSJonathan T. Looney #include <sys/counter.h>
492529f56eSJonathan T. Looney #include <dev/tcp_log/tcp_log_dev.h>
502529f56eSJonathan T. Looney
512529f56eSJonathan T. Looney #include <net/if.h>
522529f56eSJonathan T. Looney #include <net/if_var.h>
532529f56eSJonathan T. Looney #include <net/vnet.h>
542529f56eSJonathan T. Looney
552529f56eSJonathan T. Looney #include <netinet/in.h>
562529f56eSJonathan T. Looney #include <netinet/in_pcb.h>
572529f56eSJonathan T. Looney #include <netinet/in_var.h>
582529f56eSJonathan T. Looney #include <netinet/tcp_var.h>
592529f56eSJonathan T. Looney #include <netinet/tcp_log_buf.h>
6073ee5756SRandall Stewart #include <netinet/tcp_seq.h>
6169c7c811SRandall Stewart #include <netinet/tcp_hpts.h>
622529f56eSJonathan T. Looney
632529f56eSJonathan T. Looney /* Default expiry time */
642529f56eSJonathan T. Looney #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S)
652529f56eSJonathan T. Looney
662529f56eSJonathan T. Looney /* Max interval at which to run the expiry timer */
672529f56eSJonathan T. Looney #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S)
682529f56eSJonathan T. Looney
692529f56eSJonathan T. Looney bool tcp_log_verbose;
708c47d8f5SAlan Somers static uma_zone_t tcp_log_id_bucket_zone, tcp_log_id_node_zone, tcp_log_zone;
712529f56eSJonathan T. Looney static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
722529f56eSJonathan T. Looney static uint32_t tcp_log_version = TCP_LOG_BUF_VER;
732529f56eSJonathan T. Looney RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
742529f56eSJonathan T. Looney static struct tcp_log_id_tree tcp_log_id_head;
752529f56eSJonathan T. Looney static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
762529f56eSJonathan T. Looney STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
772529f56eSJonathan T. Looney static struct mtx tcp_log_expireq_mtx;
782529f56eSJonathan T. Looney static struct callout tcp_log_expireq_callout;
799959c8b9SJonathan T. Looney static u_long tcp_log_auto_ratio = 0;
809959c8b9SJonathan T. Looney static volatile u_long tcp_log_auto_ratio_cur = 0;
812529f56eSJonathan T. Looney static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
822529f56eSJonathan T. Looney static bool tcp_log_auto_all = false;
83a9a08eceSRandall Stewart static uint32_t tcp_disable_all_bb_logs = 0;
842529f56eSJonathan T. Looney
852529f56eSJonathan T. Looney RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
862529f56eSJonathan T. Looney
877029da5cSPawel Biernacki SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
887029da5cSPawel Biernacki "TCP Black Box controls");
892529f56eSJonathan T. Looney
9069c7c811SRandall Stewart SYSCTL_NODE(_net_inet_tcp_bb, OID_AUTO, tp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
9169c7c811SRandall Stewart "TCP Black Box Trace Point controls");
9269c7c811SRandall Stewart
932529f56eSJonathan T. Looney SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
942529f56eSJonathan T. Looney 0, "Force verbose logging for TCP traces");
952529f56eSJonathan T. Looney
962529f56eSJonathan T. Looney SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
972529f56eSJonathan T. Looney CTLFLAG_RW, &tcp_log_session_limit, 0,
982529f56eSJonathan T. Looney "Maximum number of events maintained for each TCP session");
992529f56eSJonathan T. Looney
10069c7c811SRandall Stewart uint32_t tcp_trace_point_config = 0;
10169c7c811SRandall Stewart SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, number, CTLFLAG_RW,
10269c7c811SRandall Stewart &tcp_trace_point_config, TCP_LOG_STATE_HEAD_AUTO,
10369c7c811SRandall Stewart "What is the trace point number to activate (0=none, 0xffffffff = all)?");
10469c7c811SRandall Stewart
10569c7c811SRandall Stewart uint32_t tcp_trace_point_bb_mode = TCP_LOG_STATE_CONTINUAL;
10669c7c811SRandall Stewart SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, bbmode, CTLFLAG_RW,
10769c7c811SRandall Stewart &tcp_trace_point_bb_mode, TCP_LOG_STATE_HEAD_AUTO,
10869c7c811SRandall Stewart "What is BB logging mode that is activated?");
10969c7c811SRandall Stewart
11069c7c811SRandall Stewart int32_t tcp_trace_point_count = 0;
11169c7c811SRandall Stewart SYSCTL_U32(_net_inet_tcp_bb_tp, OID_AUTO, count, CTLFLAG_RW,
11269c7c811SRandall Stewart &tcp_trace_point_count, TCP_LOG_STATE_HEAD_AUTO,
11369c7c811SRandall Stewart "How many connections will have BB logging turned on that hit the tracepoint?");
11469c7c811SRandall Stewart
11569c7c811SRandall Stewart
11669c7c811SRandall Stewart
1172529f56eSJonathan T. Looney SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
1182529f56eSJonathan T. Looney &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
1192529f56eSJonathan T. Looney
1202529f56eSJonathan T. Looney SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
1212529f56eSJonathan T. Looney &tcp_log_zone, "Current number of events maintained for all TCP sessions");
1222529f56eSJonathan T. Looney
1232529f56eSJonathan T. Looney SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
1248c47d8f5SAlan Somers &tcp_log_id_bucket_zone, "Maximum number of log IDs");
1252529f56eSJonathan T. Looney
1262529f56eSJonathan T. Looney SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
1278c47d8f5SAlan Somers &tcp_log_id_bucket_zone, "Current number of log IDs");
1282529f56eSJonathan T. Looney
1292529f56eSJonathan T. Looney SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
1308c47d8f5SAlan Somers &tcp_log_id_node_zone, "Maximum number of tcpcbs with log IDs");
1312529f56eSJonathan T. Looney
1322529f56eSJonathan T. Looney SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
1338c47d8f5SAlan Somers &tcp_log_id_node_zone, "Current number of tcpcbs with log IDs");
1342529f56eSJonathan T. Looney
1352529f56eSJonathan T. Looney SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
1362529f56eSJonathan T. Looney 0, "Version of log formats exported");
1372529f56eSJonathan T. Looney
138a9a08eceSRandall Stewart SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW,
13957cc27a3SCheng Cui &tcp_disable_all_bb_logs, 0,
140a9a08eceSRandall Stewart "Disable all BB logging for all connections");
141a9a08eceSRandall Stewart
1429959c8b9SJonathan T. Looney SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
1432529f56eSJonathan T. Looney &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
1442529f56eSJonathan T. Looney
1452529f56eSJonathan T. Looney SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
14657cc27a3SCheng Cui &tcp_log_auto_mode, 0,
14757cc27a3SCheng Cui "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_TAIL)");
1482529f56eSJonathan T. Looney
1492529f56eSJonathan T. Looney SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
15057cc27a3SCheng Cui &tcp_log_auto_all, 0,
1512529f56eSJonathan T. Looney "Auto-select from all sessions (rather than just those with IDs)");
1522529f56eSJonathan T. Looney
1532529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
1542529f56eSJonathan T. Looney counter_u64_t tcp_log_queued;
1552529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail1;
1562529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail2;
1572529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail3;
1582529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail4;
1592529f56eSJonathan T. Looney counter_u64_t tcp_log_que_fail5;
1602529f56eSJonathan T. Looney counter_u64_t tcp_log_que_copyout;
1612529f56eSJonathan T. Looney counter_u64_t tcp_log_que_read;
1622529f56eSJonathan T. Looney counter_u64_t tcp_log_que_freed;
1632529f56eSJonathan T. Looney
1642529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
1652529f56eSJonathan T. Looney &tcp_log_queued, "Number of entries queued");
1662529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
1672529f56eSJonathan T. Looney &tcp_log_que_fail1, "Number of entries queued but fail 1");
1682529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
1692529f56eSJonathan T. Looney &tcp_log_que_fail2, "Number of entries queued but fail 2");
1702529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
1712529f56eSJonathan T. Looney &tcp_log_que_fail3, "Number of entries queued but fail 3");
1722529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
1732529f56eSJonathan T. Looney &tcp_log_que_fail4, "Number of entries queued but fail 4");
1742529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
1752529f56eSJonathan T. Looney &tcp_log_que_fail5, "Number of entries queued but fail 4");
1762529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
1772529f56eSJonathan T. Looney &tcp_log_que_copyout, "Number of entries copied out");
1782529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
1792529f56eSJonathan T. Looney &tcp_log_que_read, "Number of entries read from the queue");
1802529f56eSJonathan T. Looney SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
1812529f56eSJonathan T. Looney &tcp_log_que_freed, "Number of entries freed after reading");
1822529f56eSJonathan T. Looney #endif
1832529f56eSJonathan T. Looney
1842529f56eSJonathan T. Looney #ifdef INVARIANTS
1852529f56eSJonathan T. Looney #define TCPLOG_DEBUG_RINGBUF
1862529f56eSJonathan T. Looney #endif
187a9a08eceSRandall Stewart /* Number of requests to consider a PBCID "active". */
188a9a08eceSRandall Stewart #define ACTIVE_REQUEST_COUNT 10
189a9a08eceSRandall Stewart
190a9a08eceSRandall Stewart /* Statistic tracking for "active" PBCIDs. */
191a9a08eceSRandall Stewart static counter_u64_t tcp_log_pcb_ids_cur;
192a9a08eceSRandall Stewart static counter_u64_t tcp_log_pcb_ids_tot;
193a9a08eceSRandall Stewart
194a9a08eceSRandall Stewart SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD,
195a9a08eceSRandall Stewart &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system");
196a9a08eceSRandall Stewart SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD,
197a9a08eceSRandall Stewart &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated");
1982529f56eSJonathan T. Looney
1992529f56eSJonathan T. Looney struct tcp_log_mem
2002529f56eSJonathan T. Looney {
2012529f56eSJonathan T. Looney STAILQ_ENTRY(tcp_log_mem) tlm_queue;
2022529f56eSJonathan T. Looney struct tcp_log_buffer tlm_buf;
2032529f56eSJonathan T. Looney struct tcp_log_verbose tlm_v;
2042529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF
2052529f56eSJonathan T. Looney volatile int tlm_refcnt;
2062529f56eSJonathan T. Looney #endif
2072529f56eSJonathan T. Looney };
2082529f56eSJonathan T. Looney
2092529f56eSJonathan T. Looney /* 60 bytes for the header, + 16 bytes for padding */
2102529f56eSJonathan T. Looney static uint8_t zerobuf[76];
2112529f56eSJonathan T. Looney
2122529f56eSJonathan T. Looney /*
2132529f56eSJonathan T. Looney * Lock order:
2142529f56eSJonathan T. Looney * 1. TCPID_TREE
2152529f56eSJonathan T. Looney * 2. TCPID_BUCKET
2162529f56eSJonathan T. Looney * 3. INP
2172529f56eSJonathan T. Looney *
2182529f56eSJonathan T. Looney * Rules:
2192529f56eSJonathan T. Looney * A. You need a lock on the Tree to add/remove buckets.
2202529f56eSJonathan T. Looney * B. You need a lock on the bucket to add/remove nodes from the bucket.
2212529f56eSJonathan T. Looney * C. To change information in a node, you need the INP lock if the tln_closed
2222529f56eSJonathan T. Looney * field is false. Otherwise, you need the bucket lock. (Note that the
2232529f56eSJonathan T. Looney * tln_closed field can change at any point, so you need to recheck the
2242529f56eSJonathan T. Looney * entry after acquiring the INP lock.)
2252529f56eSJonathan T. Looney * D. To remove a node from the bucket, you must have that entry locked,
2262529f56eSJonathan T. Looney * according to the criteria of Rule C. Also, the node must not be on
2272529f56eSJonathan T. Looney * the expiry queue.
2282529f56eSJonathan T. Looney * E. The exception to C is the expiry queue fields, which are locked by
2292529f56eSJonathan T. Looney * the TCPLOG_EXPIREQ lock.
2302529f56eSJonathan T. Looney *
2312529f56eSJonathan T. Looney * Buckets have a reference count. Each node is a reference. Further,
2322529f56eSJonathan T. Looney * other callers may add reference counts to keep a bucket from disappearing.
2332529f56eSJonathan T. Looney * You can add a reference as long as you own a lock sufficient to keep the
2342529f56eSJonathan T. Looney * bucket from disappearing. For example, a common use is:
2352529f56eSJonathan T. Looney * a. Have a locked INP, but need to lock the TCPID_BUCKET.
2362529f56eSJonathan T. Looney * b. Add a refcount on the bucket. (Safe because the INP lock prevents
2372529f56eSJonathan T. Looney * the TCPID_BUCKET from going away.)
2382529f56eSJonathan T. Looney * c. Drop the INP lock.
2392529f56eSJonathan T. Looney * d. Acquire a lock on the TCPID_BUCKET.
2402529f56eSJonathan T. Looney * e. Acquire a lock on the INP.
2412529f56eSJonathan T. Looney * f. Drop the refcount on the bucket.
2422529f56eSJonathan T. Looney * (At this point, the bucket may disappear.)
2432529f56eSJonathan T. Looney *
2442529f56eSJonathan T. Looney * Expire queue lock:
2452529f56eSJonathan T. Looney * You can acquire this with either the bucket or INP lock. Don't reverse it.
2462529f56eSJonathan T. Looney * When the expire code has committed to freeing a node, it resets the expiry
2472529f56eSJonathan T. Looney * time to SBT_MAX. That is the signal to everyone else that they should
2482529f56eSJonathan T. Looney * leave that node alone.
2492529f56eSJonathan T. Looney */
2502529f56eSJonathan T. Looney static struct rwlock tcp_id_tree_lock;
2512529f56eSJonathan T. Looney #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock)
2522529f56eSJonathan T. Looney #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock)
2532529f56eSJonathan T. Looney #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock)
2542529f56eSJonathan T. Looney #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock)
2552529f56eSJonathan T. Looney #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock)
2562529f56eSJonathan T. Looney #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
2572529f56eSJonathan T. Looney #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
2582529f56eSJonathan T. Looney #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
2592529f56eSJonathan T. Looney
2602529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
2612529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx))
2622529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx))
2632529f56eSJonathan T. Looney #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx))
2642529f56eSJonathan T. Looney #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
2652529f56eSJonathan T. Looney #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
2662529f56eSJonathan T. Looney
2672529f56eSJonathan T. Looney #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt))
2682529f56eSJonathan T. Looney #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt))
2692529f56eSJonathan T. Looney
2702529f56eSJonathan T. Looney #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx)
2712529f56eSJonathan T. Looney #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx)
2722529f56eSJonathan T. Looney
2732529f56eSJonathan T. Looney SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
2742529f56eSJonathan T. Looney
2752529f56eSJonathan T. Looney struct tcp_log_id_bucket
2762529f56eSJonathan T. Looney {
2772529f56eSJonathan T. Looney /*
2782529f56eSJonathan T. Looney * tlb_id must be first. This lets us use strcmp on
2792529f56eSJonathan T. Looney * (struct tcp_log_id_bucket *) and (char *) interchangeably.
2802529f56eSJonathan T. Looney */
2812529f56eSJonathan T. Looney char tlb_id[TCP_LOG_ID_LEN];
282a9a08eceSRandall Stewart char tlb_tag[TCP_LOG_TAG_LEN];
2832529f56eSJonathan T. Looney RB_ENTRY(tcp_log_id_bucket) tlb_rb;
2842529f56eSJonathan T. Looney struct tcp_log_id_head tlb_head;
2852529f56eSJonathan T. Looney struct mtx tlb_mtx;
2862529f56eSJonathan T. Looney volatile u_int tlb_refcnt;
287a9a08eceSRandall Stewart volatile u_int tlb_reqcnt;
288a9a08eceSRandall Stewart uint32_t tlb_loglimit;
28969c7c811SRandall Stewart int8_t tlb_logstate;
2902529f56eSJonathan T. Looney };
2912529f56eSJonathan T. Looney
2922529f56eSJonathan T. Looney struct tcp_log_id_node
2932529f56eSJonathan T. Looney {
2942529f56eSJonathan T. Looney SLIST_ENTRY(tcp_log_id_node) tln_list;
2952529f56eSJonathan T. Looney STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
2962529f56eSJonathan T. Looney sbintime_t tln_expiretime; /* Locked by the expireq lock */
2972529f56eSJonathan T. Looney
2982529f56eSJonathan T. Looney /*
2992529f56eSJonathan T. Looney * If INP is NULL, that means the connection has closed. We've
3002529f56eSJonathan T. Looney * saved the connection endpoint information and the log entries
3012529f56eSJonathan T. Looney * in the tln_ie and tln_entries members. We've also saved a pointer
3022529f56eSJonathan T. Looney * to the enclosing bucket here. If INP is not NULL, the information is
3032529f56eSJonathan T. Looney * in the PCB and not here.
3042529f56eSJonathan T. Looney */
3052529f56eSJonathan T. Looney struct inpcb *tln_inp;
3062529f56eSJonathan T. Looney struct tcpcb *tln_tp;
3072529f56eSJonathan T. Looney struct tcp_log_id_bucket *tln_bucket;
3082529f56eSJonathan T. Looney struct in_endpoints tln_ie;
3092529f56eSJonathan T. Looney struct tcp_log_stailq tln_entries;
3102529f56eSJonathan T. Looney int tln_count;
3112529f56eSJonathan T. Looney volatile int tln_closed;
3122529f56eSJonathan T. Looney uint8_t tln_af;
3132529f56eSJonathan T. Looney };
3142529f56eSJonathan T. Looney
3152529f56eSJonathan T. Looney enum tree_lock_state {
3162529f56eSJonathan T. Looney TREE_UNLOCKED = 0,
3172529f56eSJonathan T. Looney TREE_RLOCKED,
3182529f56eSJonathan T. Looney TREE_WLOCKED,
3192529f56eSJonathan T. Looney };
3202529f56eSJonathan T. Looney
3212529f56eSJonathan T. Looney /* Do we want to select this session for auto-logging? */
3222529f56eSJonathan T. Looney static __inline bool
tcp_log_selectauto(void)3232529f56eSJonathan T. Looney tcp_log_selectauto(void)
3242529f56eSJonathan T. Looney {
3252529f56eSJonathan T. Looney
3262529f56eSJonathan T. Looney /*
3272529f56eSJonathan T. Looney * If we are doing auto-capturing, figure out whether we will capture
3282529f56eSJonathan T. Looney * this session.
3292529f56eSJonathan T. Looney */
3302529f56eSJonathan T. Looney if (tcp_log_auto_ratio &&
331a9a08eceSRandall Stewart (tcp_disable_all_bb_logs == 0) &&
3329959c8b9SJonathan T. Looney (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
3332529f56eSJonathan T. Looney tcp_log_auto_ratio) == 0)
3342529f56eSJonathan T. Looney return (true);
3352529f56eSJonathan T. Looney return (false);
3362529f56eSJonathan T. Looney }
3372529f56eSJonathan T. Looney
3382529f56eSJonathan T. Looney static __inline int
tcp_log_id_cmp(struct tcp_log_id_bucket * a,struct tcp_log_id_bucket * b)3392529f56eSJonathan T. Looney tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
3402529f56eSJonathan T. Looney {
3412529f56eSJonathan T. Looney KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
3422529f56eSJonathan T. Looney KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
3432529f56eSJonathan T. Looney return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
3442529f56eSJonathan T. Looney }
3452529f56eSJonathan T. Looney
RB_GENERATE_STATIC(tcp_log_id_tree,tcp_log_id_bucket,tlb_rb,tcp_log_id_cmp)3462529f56eSJonathan T. Looney RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
3472529f56eSJonathan T. Looney
3482529f56eSJonathan T. Looney static __inline void
3492529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(int tree_locked)
3502529f56eSJonathan T. Looney {
3512529f56eSJonathan T. Looney
3522529f56eSJonathan T. Looney #ifdef INVARIANTS
3532529f56eSJonathan T. Looney switch (tree_locked) {
3542529f56eSJonathan T. Looney case TREE_WLOCKED:
3552529f56eSJonathan T. Looney TCPID_TREE_WLOCK_ASSERT();
3562529f56eSJonathan T. Looney break;
3572529f56eSJonathan T. Looney case TREE_RLOCKED:
3582529f56eSJonathan T. Looney TCPID_TREE_RLOCK_ASSERT();
3592529f56eSJonathan T. Looney break;
3602529f56eSJonathan T. Looney case TREE_UNLOCKED:
3612529f56eSJonathan T. Looney TCPID_TREE_UNLOCK_ASSERT();
3622529f56eSJonathan T. Looney break;
3632529f56eSJonathan T. Looney default:
3642529f56eSJonathan T. Looney kassert_panic("%s:%d: unknown tree lock state", __func__,
3652529f56eSJonathan T. Looney __LINE__);
3662529f56eSJonathan T. Looney }
3672529f56eSJonathan T. Looney #endif
3682529f56eSJonathan T. Looney }
3692529f56eSJonathan T. Looney
3702529f56eSJonathan T. Looney static __inline void
tcp_log_remove_bucket(struct tcp_log_id_bucket * tlb)3712529f56eSJonathan T. Looney tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
3722529f56eSJonathan T. Looney {
3732529f56eSJonathan T. Looney
3742529f56eSJonathan T. Looney TCPID_TREE_WLOCK_ASSERT();
3752529f56eSJonathan T. Looney KASSERT(SLIST_EMPTY(&tlb->tlb_head),
3762529f56eSJonathan T. Looney ("%s: Attempt to remove non-empty bucket", __func__));
3772529f56eSJonathan T. Looney if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
3782529f56eSJonathan T. Looney #ifdef INVARIANTS
3792529f56eSJonathan T. Looney kassert_panic("%s:%d: error removing element from tree",
3802529f56eSJonathan T. Looney __func__, __LINE__);
3812529f56eSJonathan T. Looney #endif
3822529f56eSJonathan T. Looney }
3832529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_DESTROY(tlb);
384a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);
3858c47d8f5SAlan Somers uma_zfree(tcp_log_id_bucket_zone, tlb);
3862529f56eSJonathan T. Looney }
3872529f56eSJonathan T. Looney
3882529f56eSJonathan T. Looney /*
3892529f56eSJonathan T. Looney * Call with a referenced and locked bucket.
3902529f56eSJonathan T. Looney * Will return true if the bucket was freed; otherwise, false.
3912529f56eSJonathan T. Looney * tlb: The bucket to unreference.
3922529f56eSJonathan T. Looney * tree_locked: A pointer to the state of the tree lock. If the tree lock
3932529f56eSJonathan T. Looney * state changes, the function will update it.
3942529f56eSJonathan T. Looney * inp: If not NULL and the function needs to drop the inp lock to relock the
3952529f56eSJonathan T. Looney * tree, it will do so. (The caller must ensure inp will not become invalid,
3962529f56eSJonathan T. Looney * probably by holding a reference to it.)
3972529f56eSJonathan T. Looney */
3982529f56eSJonathan T. Looney static bool
tcp_log_unref_bucket(struct tcp_log_id_bucket * tlb,int * tree_locked,struct inpcb * inp)3992529f56eSJonathan T. Looney tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
4002529f56eSJonathan T. Looney struct inpcb *inp)
4012529f56eSJonathan T. Looney {
4022529f56eSJonathan T. Looney
4032529f56eSJonathan T. Looney KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
4042529f56eSJonathan T. Looney KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
4052529f56eSJonathan T. Looney __func__));
4062529f56eSJonathan T. Looney
4072529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(*tree_locked);
4082529f56eSJonathan T. Looney
4092529f56eSJonathan T. Looney /*
4102529f56eSJonathan T. Looney * Did we hold the last reference on the tlb? If so, we may need
4112529f56eSJonathan T. Looney * to free it. (Note that we can realistically only execute the
4122529f56eSJonathan T. Looney * loop twice: once without a write lock and once with a write
4132529f56eSJonathan T. Looney * lock.)
4142529f56eSJonathan T. Looney */
4152529f56eSJonathan T. Looney while (TCPID_BUCKET_UNREF(tlb)) {
4162529f56eSJonathan T. Looney /*
4172529f56eSJonathan T. Looney * We need a write lock on the tree to free this.
4182529f56eSJonathan T. Looney * If we can upgrade the tree lock, this is "easy". If we
4192529f56eSJonathan T. Looney * can't upgrade the tree lock, we need to do this the
4202529f56eSJonathan T. Looney * "hard" way: unwind all our locks and relock everything.
4212529f56eSJonathan T. Looney * In the meantime, anything could have changed. We even
4222529f56eSJonathan T. Looney * need to validate that we still need to free the bucket.
4232529f56eSJonathan T. Looney */
4242529f56eSJonathan T. Looney if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
4252529f56eSJonathan T. Looney *tree_locked = TREE_WLOCKED;
4262529f56eSJonathan T. Looney else if (*tree_locked != TREE_WLOCKED) {
4272529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb);
4282529f56eSJonathan T. Looney if (inp != NULL)
4292529f56eSJonathan T. Looney INP_WUNLOCK(inp);
4302529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
4312529f56eSJonathan T. Looney if (*tree_locked == TREE_RLOCKED)
4322529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
4332529f56eSJonathan T. Looney TCPID_TREE_WLOCK();
4342529f56eSJonathan T. Looney *tree_locked = TREE_WLOCKED;
4352529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
4362529f56eSJonathan T. Looney if (inp != NULL)
4372529f56eSJonathan T. Looney INP_WLOCK(inp);
4382529f56eSJonathan T. Looney continue;
4392529f56eSJonathan T. Looney }
4402529f56eSJonathan T. Looney
4412529f56eSJonathan T. Looney /*
4422529f56eSJonathan T. Looney * We have an empty bucket and a write lock on the tree.
4432529f56eSJonathan T. Looney * Remove the empty bucket.
4442529f56eSJonathan T. Looney */
4452529f56eSJonathan T. Looney tcp_log_remove_bucket(tlb);
4462529f56eSJonathan T. Looney return (true);
4472529f56eSJonathan T. Looney }
4482529f56eSJonathan T. Looney return (false);
4492529f56eSJonathan T. Looney }
4502529f56eSJonathan T. Looney
4512529f56eSJonathan T. Looney /*
4522529f56eSJonathan T. Looney * Call with a locked bucket. This function will release the lock on the
4532529f56eSJonathan T. Looney * bucket before returning.
4542529f56eSJonathan T. Looney *
4552529f56eSJonathan T. Looney * The caller is responsible for freeing the tp->t_lin/tln node!
4562529f56eSJonathan T. Looney *
4572529f56eSJonathan T. Looney * Note: one of tp or both tlb and tln must be supplied.
4582529f56eSJonathan T. Looney *
4592529f56eSJonathan T. Looney * inp: A pointer to the inp. If the function needs to drop the inp lock to
4602529f56eSJonathan T. Looney * acquire the tree write lock, it will do so. (The caller must ensure inp
4612529f56eSJonathan T. Looney * will not become invalid, probably by holding a reference to it.)
4622529f56eSJonathan T. Looney * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
4632529f56eSJonathan T. Looney * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
4642529f56eSJonathan T. Looney * tln: A pointer to the node. (optional; ignored if tp is specified)
4652529f56eSJonathan T. Looney * tree_locked: A pointer to the state of the tree lock. If the tree lock
4662529f56eSJonathan T. Looney * state changes, the function will update it.
4672529f56eSJonathan T. Looney *
4682529f56eSJonathan T. Looney * Will return true if the INP lock was reacquired; otherwise, false.
4692529f56eSJonathan T. Looney */
4702529f56eSJonathan T. Looney static bool
tcp_log_remove_id_node(struct inpcb * inp,struct tcpcb * tp,struct tcp_log_id_bucket * tlb,struct tcp_log_id_node * tln,int * tree_locked)4712529f56eSJonathan T. Looney tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
4722529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
4732529f56eSJonathan T. Looney int *tree_locked)
4742529f56eSJonathan T. Looney {
4752529f56eSJonathan T. Looney int orig_tree_locked;
4762529f56eSJonathan T. Looney
4772529f56eSJonathan T. Looney KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
4782529f56eSJonathan T. Looney ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
4792529f56eSJonathan T. Looney tp, tlb, tln));
4802529f56eSJonathan T. Looney KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
4812529f56eSJonathan T. Looney __func__));
4822529f56eSJonathan T. Looney
4832529f56eSJonathan T. Looney if (tp != NULL) {
4842529f56eSJonathan T. Looney tlb = tp->t_lib;
4852529f56eSJonathan T. Looney tln = tp->t_lin;
4862529f56eSJonathan T. Looney KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
4872529f56eSJonathan T. Looney KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
4882529f56eSJonathan T. Looney }
4892529f56eSJonathan T. Looney
4902529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(*tree_locked);
4912529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb);
4922529f56eSJonathan T. Looney
4932529f56eSJonathan T. Looney /*
4942529f56eSJonathan T. Looney * Remove the node, clear the log bucket and node from the TCPCB, and
4952529f56eSJonathan T. Looney * decrement the bucket refcount. In the process, if this is the
4962529f56eSJonathan T. Looney * last reference, the bucket will be freed.
4972529f56eSJonathan T. Looney */
4982529f56eSJonathan T. Looney SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
4992529f56eSJonathan T. Looney if (tp != NULL) {
5002529f56eSJonathan T. Looney tp->t_lib = NULL;
5012529f56eSJonathan T. Looney tp->t_lin = NULL;
5022529f56eSJonathan T. Looney }
5032529f56eSJonathan T. Looney orig_tree_locked = *tree_locked;
5042529f56eSJonathan T. Looney if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
5052529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
5062529f56eSJonathan T. Looney return (*tree_locked != orig_tree_locked);
5072529f56eSJonathan T. Looney }
5082529f56eSJonathan T. Looney
5092529f56eSJonathan T. Looney #define RECHECK_INP_CLEAN(cleanup) do { \
51053af6903SGleb Smirnoff if (inp->inp_flags & INP_DROPPED) { \
5112529f56eSJonathan T. Looney rv = ECONNRESET; \
5122529f56eSJonathan T. Looney cleanup; \
5132529f56eSJonathan T. Looney goto done; \
5142529f56eSJonathan T. Looney } \
5152529f56eSJonathan T. Looney tp = intotcpcb(inp); \
5162529f56eSJonathan T. Looney } while (0)
5172529f56eSJonathan T. Looney
5182529f56eSJonathan T. Looney #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */)
5192529f56eSJonathan T. Looney
5202529f56eSJonathan T. Looney static void
tcp_log_grow_tlb(char * tlb_id,struct tcpcb * tp)5212529f56eSJonathan T. Looney tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
5222529f56eSJonathan T. Looney {
5232529f56eSJonathan T. Looney
5249eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
5252529f56eSJonathan T. Looney
526adc56f5aSEdward Tomasz Napierala #ifdef STATS
5272529f56eSJonathan T. Looney if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
5282529f56eSJonathan T. Looney (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
5292529f56eSJonathan T. Looney #endif
5302529f56eSJonathan T. Looney }
5312529f56eSJonathan T. Looney
532a9a08eceSRandall Stewart static void
tcp_log_increment_reqcnt(struct tcp_log_id_bucket * tlb)533a9a08eceSRandall Stewart tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb)
534a9a08eceSRandall Stewart {
535a9a08eceSRandall Stewart
536a9a08eceSRandall Stewart atomic_fetchadd_int(&tlb->tlb_reqcnt, 1);
537a9a08eceSRandall Stewart }
538a9a08eceSRandall Stewart
53969c7c811SRandall Stewart int
tcp_log_apply_ratio(struct tcpcb * tp,int ratio)54069c7c811SRandall Stewart tcp_log_apply_ratio(struct tcpcb *tp, int ratio)
54169c7c811SRandall Stewart {
54269c7c811SRandall Stewart struct tcp_log_id_bucket *tlb;
54369c7c811SRandall Stewart struct inpcb *inp = tptoinpcb(tp);
54469c7c811SRandall Stewart uint32_t hash, ratio_hash_thresh;
54569c7c811SRandall Stewart int rv, tree_locked;
54669c7c811SRandall Stewart
54769c7c811SRandall Stewart rv = 0;
54869c7c811SRandall Stewart tree_locked = TREE_UNLOCKED;
54969c7c811SRandall Stewart tlb = tp->t_lib;
55069c7c811SRandall Stewart
55169c7c811SRandall Stewart INP_WLOCK_ASSERT(inp);
55269c7c811SRandall Stewart if (tlb == NULL) {
55369c7c811SRandall Stewart INP_WUNLOCK(inp);
55469c7c811SRandall Stewart return (EOPNOTSUPP);
55569c7c811SRandall Stewart }
55637229fedSRandall Stewart if (ratio)
55769c7c811SRandall Stewart ratio_hash_thresh = max(1, UINT32_MAX / ratio);
55837229fedSRandall Stewart else
55937229fedSRandall Stewart ratio_hash_thresh = 0;
56069c7c811SRandall Stewart TCPID_BUCKET_REF(tlb);
56169c7c811SRandall Stewart INP_WUNLOCK(inp);
56269c7c811SRandall Stewart TCPID_BUCKET_LOCK(tlb);
56369c7c811SRandall Stewart
56469c7c811SRandall Stewart hash = hash32_buf(tlb->tlb_id, strlen(tlb->tlb_id), 0);
56569c7c811SRandall Stewart if (hash > ratio_hash_thresh && tp->_t_logstate == TCP_LOG_STATE_OFF &&
56669c7c811SRandall Stewart tlb->tlb_logstate == TCP_LOG_STATE_OFF) {
56769c7c811SRandall Stewart /*
56869c7c811SRandall Stewart * Ratio decision not to log this log ID (and this connection by
56969c7c811SRandall Stewart * way of association). We only apply a log ratio log disable
57069c7c811SRandall Stewart * decision if it would not interfere with a log enable decision
57169c7c811SRandall Stewart * made elsewhere e.g. tcp_log_selectauto() or setsockopt().
57269c7c811SRandall Stewart */
57369c7c811SRandall Stewart tlb->tlb_logstate = TCP_LOG_STATE_RATIO_OFF;
57469c7c811SRandall Stewart INP_WLOCK(inp);
57569c7c811SRandall Stewart RECHECK_INP();
57669c7c811SRandall Stewart (void)tcp_log_state_change(tp, TCP_LOG_STATE_OFF);
57769c7c811SRandall Stewart done:
57869c7c811SRandall Stewart INP_WUNLOCK(inp);
57969c7c811SRandall Stewart }
58069c7c811SRandall Stewart
58169c7c811SRandall Stewart INP_UNLOCK_ASSERT(inp);
58269c7c811SRandall Stewart if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
58369c7c811SRandall Stewart TCPID_BUCKET_UNLOCK(tlb);
58469c7c811SRandall Stewart
58569c7c811SRandall Stewart if (tree_locked == TREE_WLOCKED) {
58669c7c811SRandall Stewart TCPID_TREE_WLOCK_ASSERT();
58769c7c811SRandall Stewart TCPID_TREE_WUNLOCK();
58869c7c811SRandall Stewart } else if (tree_locked == TREE_RLOCKED) {
58969c7c811SRandall Stewart TCPID_TREE_RLOCK_ASSERT();
59069c7c811SRandall Stewart TCPID_TREE_RUNLOCK();
59169c7c811SRandall Stewart } else
59269c7c811SRandall Stewart TCPID_TREE_UNLOCK_ASSERT();
59369c7c811SRandall Stewart
59469c7c811SRandall Stewart return (rv);
59569c7c811SRandall Stewart }
59669c7c811SRandall Stewart
597a9a08eceSRandall Stewart /*
598a9a08eceSRandall Stewart * Associate the specified tag with a particular TCP log ID.
599a9a08eceSRandall Stewart * Called with INPCB locked. Returns with it unlocked.
600a9a08eceSRandall Stewart * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID.
601a9a08eceSRandall Stewart */
602a9a08eceSRandall Stewart int
tcp_log_set_tag(struct tcpcb * tp,char * tag)603a9a08eceSRandall Stewart tcp_log_set_tag(struct tcpcb *tp, char *tag)
604a9a08eceSRandall Stewart {
6059eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
606a9a08eceSRandall Stewart struct tcp_log_id_bucket *tlb;
607a9a08eceSRandall Stewart int tree_locked;
608a9a08eceSRandall Stewart
6099eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp);
610a9a08eceSRandall Stewart
611a9a08eceSRandall Stewart tree_locked = TREE_UNLOCKED;
612a9a08eceSRandall Stewart tlb = tp->t_lib;
613a9a08eceSRandall Stewart if (tlb == NULL) {
6149eb0e832SGleb Smirnoff INP_WUNLOCK(inp);
615a9a08eceSRandall Stewart return (EOPNOTSUPP);
616a9a08eceSRandall Stewart }
617a9a08eceSRandall Stewart
618a9a08eceSRandall Stewart TCPID_BUCKET_REF(tlb);
6199eb0e832SGleb Smirnoff INP_WUNLOCK(inp);
620a9a08eceSRandall Stewart TCPID_BUCKET_LOCK(tlb);
621a9a08eceSRandall Stewart strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN);
622a9a08eceSRandall Stewart if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
623a9a08eceSRandall Stewart TCPID_BUCKET_UNLOCK(tlb);
624a9a08eceSRandall Stewart
625a9a08eceSRandall Stewart if (tree_locked == TREE_WLOCKED) {
626a9a08eceSRandall Stewart TCPID_TREE_WLOCK_ASSERT();
627a9a08eceSRandall Stewart TCPID_TREE_WUNLOCK();
628a9a08eceSRandall Stewart } else if (tree_locked == TREE_RLOCKED) {
629a9a08eceSRandall Stewart TCPID_TREE_RLOCK_ASSERT();
630a9a08eceSRandall Stewart TCPID_TREE_RUNLOCK();
631a9a08eceSRandall Stewart } else
632a9a08eceSRandall Stewart TCPID_TREE_UNLOCK_ASSERT();
633a9a08eceSRandall Stewart
634a9a08eceSRandall Stewart return (0);
635a9a08eceSRandall Stewart }
636a9a08eceSRandall Stewart
6372529f56eSJonathan T. Looney /*
6382529f56eSJonathan T. Looney * Set the TCP log ID for a TCPCB.
6392529f56eSJonathan T. Looney * Called with INPCB locked. Returns with it unlocked.
6402529f56eSJonathan T. Looney */
6412529f56eSJonathan T. Looney int
tcp_log_set_id(struct tcpcb * tp,char * id)6422529f56eSJonathan T. Looney tcp_log_set_id(struct tcpcb *tp, char *id)
6432529f56eSJonathan T. Looney {
6442529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb, *tmp_tlb;
6452529f56eSJonathan T. Looney struct tcp_log_id_node *tln;
6469eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
6472529f56eSJonathan T. Looney int tree_locked, rv;
64869c7c811SRandall Stewart bool bucket_locked, same;
6492529f56eSJonathan T. Looney
6502529f56eSJonathan T. Looney tlb = NULL;
6512529f56eSJonathan T. Looney tln = NULL;
6522529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
6532529f56eSJonathan T. Looney bucket_locked = false;
6542529f56eSJonathan T. Looney
6552529f56eSJonathan T. Looney restart:
6562529f56eSJonathan T. Looney INP_WLOCK_ASSERT(inp);
6572529f56eSJonathan T. Looney /* See if the ID is unchanged. */
65869c7c811SRandall Stewart same = ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
65969c7c811SRandall Stewart (tp->t_lib == NULL && *id == 0));
66069c7c811SRandall Stewart if (tp->_t_logstate && STAILQ_FIRST(&tp->t_logs) && !same) {
66169c7c811SRandall Stewart /*
66269c7c811SRandall Stewart * There are residual logs left we may
66369c7c811SRandall Stewart * be changing id's so dump what we can.
66469c7c811SRandall Stewart */
66569c7c811SRandall Stewart switch(tp->_t_logstate) {
66669c7c811SRandall Stewart case TCP_LOG_STATE_HEAD_AUTO:
66769c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at id switch",
66869c7c811SRandall Stewart M_NOWAIT, false);
66969c7c811SRandall Stewart break;
67069c7c811SRandall Stewart case TCP_LOG_STATE_TAIL_AUTO:
67169c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at id switch",
67269c7c811SRandall Stewart M_NOWAIT, false);
67369c7c811SRandall Stewart break;
67469c7c811SRandall Stewart case TCP_LOG_STATE_CONTINUAL:
67569c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual at id switch",
67669c7c811SRandall Stewart M_NOWAIT, false);
67769c7c811SRandall Stewart break;
67869c7c811SRandall Stewart case TCP_LOG_VIA_BBPOINTS:
67969c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints at id switch",
68069c7c811SRandall Stewart M_NOWAIT, false);
68169c7c811SRandall Stewart break;
68269c7c811SRandall Stewart }
68369c7c811SRandall Stewart }
68469c7c811SRandall Stewart if (same) {
685a9a08eceSRandall Stewart if (tp->t_lib != NULL) {
686a9a08eceSRandall Stewart tcp_log_increment_reqcnt(tp->t_lib);
68769c7c811SRandall Stewart if ((tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) &&
688a9a08eceSRandall Stewart (tp->t_log_state_set == 0)) {
689a9a08eceSRandall Stewart /* Clone in any logging */
690a9a08eceSRandall Stewart
69169c7c811SRandall Stewart tp->_t_logstate = tp->t_lib->tlb_logstate;
692a9a08eceSRandall Stewart }
693a9a08eceSRandall Stewart if ((tp->t_lib->tlb_loglimit) &&
694a9a08eceSRandall Stewart (tp->t_log_state_set == 0)) {
695a9a08eceSRandall Stewart /* We also have a limit set */
696a9a08eceSRandall Stewart
697a9a08eceSRandall Stewart tp->t_loglimit = tp->t_lib->tlb_loglimit;
698a9a08eceSRandall Stewart }
699a9a08eceSRandall Stewart }
7002529f56eSJonathan T. Looney rv = 0;
7012529f56eSJonathan T. Looney goto done;
7022529f56eSJonathan T. Looney }
7032529f56eSJonathan T. Looney
7042529f56eSJonathan T. Looney /*
7052529f56eSJonathan T. Looney * If the TCPCB had a previous ID, we need to extricate it from
7062529f56eSJonathan T. Looney * the previous list.
7072529f56eSJonathan T. Looney *
7082529f56eSJonathan T. Looney * Drop the TCPCB lock and lock the tree and the bucket.
7092529f56eSJonathan T. Looney * Because this is called in the socket context, we (theoretically)
7102529f56eSJonathan T. Looney * don't need to worry about the INPCB completely going away
7112529f56eSJonathan T. Looney * while we are gone.
7122529f56eSJonathan T. Looney */
7132529f56eSJonathan T. Looney if (tp->t_lib != NULL) {
7142529f56eSJonathan T. Looney tlb = tp->t_lib;
7152529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb);
7162529f56eSJonathan T. Looney INP_WUNLOCK(inp);
7172529f56eSJonathan T. Looney
7182529f56eSJonathan T. Looney if (tree_locked == TREE_UNLOCKED) {
7192529f56eSJonathan T. Looney TCPID_TREE_RLOCK();
7202529f56eSJonathan T. Looney tree_locked = TREE_RLOCKED;
7212529f56eSJonathan T. Looney }
7222529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
7232529f56eSJonathan T. Looney bucket_locked = true;
7242529f56eSJonathan T. Looney INP_WLOCK(inp);
7252529f56eSJonathan T. Looney
7262529f56eSJonathan T. Looney /*
7272529f56eSJonathan T. Looney * Unreference the bucket. If our bucket went away, it is no
7282529f56eSJonathan T. Looney * longer locked or valid.
7292529f56eSJonathan T. Looney */
7302529f56eSJonathan T. Looney if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
7312529f56eSJonathan T. Looney bucket_locked = false;
7322529f56eSJonathan T. Looney tlb = NULL;
7332529f56eSJonathan T. Looney }
7342529f56eSJonathan T. Looney
7352529f56eSJonathan T. Looney /* Validate the INP. */
7362529f56eSJonathan T. Looney RECHECK_INP();
7372529f56eSJonathan T. Looney
7382529f56eSJonathan T. Looney /*
7392529f56eSJonathan T. Looney * Evaluate whether the bucket changed while we were unlocked.
7402529f56eSJonathan T. Looney *
7412529f56eSJonathan T. Looney * Possible scenarios here:
7422529f56eSJonathan T. Looney * 1. Bucket is unchanged and the same one we started with.
7432529f56eSJonathan T. Looney * 2. The TCPCB no longer has a bucket and our bucket was
7442529f56eSJonathan T. Looney * freed.
7452529f56eSJonathan T. Looney * 3. The TCPCB has a new bucket, whether ours was freed.
7462529f56eSJonathan T. Looney * 4. The TCPCB no longer has a bucket and our bucket was
7472529f56eSJonathan T. Looney * not freed.
7482529f56eSJonathan T. Looney *
7492529f56eSJonathan T. Looney * In cases 2-4, we will start over. In case 1, we will
7502529f56eSJonathan T. Looney * proceed here to remove the bucket.
7512529f56eSJonathan T. Looney */
7522529f56eSJonathan T. Looney if (tlb == NULL || tp->t_lib != tlb) {
7532529f56eSJonathan T. Looney KASSERT(bucket_locked || tlb == NULL,
7542529f56eSJonathan T. Looney ("%s: bucket_locked (%d) and tlb (%p) are "
7552529f56eSJonathan T. Looney "inconsistent", __func__, bucket_locked, tlb));
7562529f56eSJonathan T. Looney
7572529f56eSJonathan T. Looney if (bucket_locked) {
7582529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
7592529f56eSJonathan T. Looney bucket_locked = false;
7602529f56eSJonathan T. Looney tlb = NULL;
7612529f56eSJonathan T. Looney }
7622529f56eSJonathan T. Looney goto restart;
7632529f56eSJonathan T. Looney }
7642529f56eSJonathan T. Looney
7652529f56eSJonathan T. Looney /*
7662529f56eSJonathan T. Looney * Store the (struct tcp_log_id_node) for reuse. Then, remove
7672529f56eSJonathan T. Looney * it from the bucket. In the process, we may end up relocking.
7682529f56eSJonathan T. Looney * If so, we need to validate that the INP is still valid, and
7692529f56eSJonathan T. Looney * the TCPCB entries match we expect.
7702529f56eSJonathan T. Looney *
7712529f56eSJonathan T. Looney * We will clear tlb and change the bucket_locked state just
7722529f56eSJonathan T. Looney * before calling tcp_log_remove_id_node(), since that function
7732529f56eSJonathan T. Looney * will unlock the bucket.
7742529f56eSJonathan T. Looney */
7752529f56eSJonathan T. Looney if (tln != NULL)
7768c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, tln);
7772529f56eSJonathan T. Looney tln = tp->t_lin;
7782529f56eSJonathan T. Looney tlb = NULL;
7792529f56eSJonathan T. Looney bucket_locked = false;
7802529f56eSJonathan T. Looney if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
7812529f56eSJonathan T. Looney RECHECK_INP();
7822529f56eSJonathan T. Looney
7832529f56eSJonathan T. Looney /*
7842529f56eSJonathan T. Looney * If the TCPCB moved to a new bucket while we had
7852529f56eSJonathan T. Looney * dropped the lock, restart.
7862529f56eSJonathan T. Looney */
7872529f56eSJonathan T. Looney if (tp->t_lib != NULL || tp->t_lin != NULL)
7882529f56eSJonathan T. Looney goto restart;
7892529f56eSJonathan T. Looney }
7902529f56eSJonathan T. Looney
7912529f56eSJonathan T. Looney /*
7922529f56eSJonathan T. Looney * Yay! We successfully removed the TCPCB from its old
7932529f56eSJonathan T. Looney * bucket. Phew!
7942529f56eSJonathan T. Looney *
7952529f56eSJonathan T. Looney * On to bigger and better things...
7962529f56eSJonathan T. Looney */
7972529f56eSJonathan T. Looney }
7982529f56eSJonathan T. Looney
7992529f56eSJonathan T. Looney /* At this point, the TCPCB should not be in any bucket. */
8002529f56eSJonathan T. Looney KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
8012529f56eSJonathan T. Looney
8022529f56eSJonathan T. Looney /*
8032529f56eSJonathan T. Looney * If the new ID is not empty, we need to now assign this TCPCB to a
8042529f56eSJonathan T. Looney * new bucket.
8052529f56eSJonathan T. Looney */
8062529f56eSJonathan T. Looney if (*id) {
8072529f56eSJonathan T. Looney /* Get a new tln, if we don't already have one to reuse. */
8082529f56eSJonathan T. Looney if (tln == NULL) {
8098c47d8f5SAlan Somers tln = uma_zalloc(tcp_log_id_node_zone,
8108c47d8f5SAlan Somers M_NOWAIT | M_ZERO);
8112529f56eSJonathan T. Looney if (tln == NULL) {
8122529f56eSJonathan T. Looney rv = ENOBUFS;
8132529f56eSJonathan T. Looney goto done;
8142529f56eSJonathan T. Looney }
8152529f56eSJonathan T. Looney tln->tln_inp = inp;
8162529f56eSJonathan T. Looney tln->tln_tp = tp;
8172529f56eSJonathan T. Looney }
8182529f56eSJonathan T. Looney
8192529f56eSJonathan T. Looney /*
8202529f56eSJonathan T. Looney * Drop the INP lock for a bit. We don't need it, and dropping
8212529f56eSJonathan T. Looney * it prevents lock order reversals.
8222529f56eSJonathan T. Looney */
8232529f56eSJonathan T. Looney INP_WUNLOCK(inp);
8242529f56eSJonathan T. Looney
8252529f56eSJonathan T. Looney /* Make sure we have at least a read lock on the tree. */
8262529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(tree_locked);
8272529f56eSJonathan T. Looney if (tree_locked == TREE_UNLOCKED) {
8282529f56eSJonathan T. Looney TCPID_TREE_RLOCK();
8292529f56eSJonathan T. Looney tree_locked = TREE_RLOCKED;
8302529f56eSJonathan T. Looney }
8312529f56eSJonathan T. Looney
8322529f56eSJonathan T. Looney refind:
8332529f56eSJonathan T. Looney /*
8342529f56eSJonathan T. Looney * Remember that we constructed (struct tcp_log_id_node) so
8352529f56eSJonathan T. Looney * we can safely cast the id to it for the purposes of finding.
8362529f56eSJonathan T. Looney */
8372529f56eSJonathan T. Looney KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
8382529f56eSJonathan T. Looney __func__, __LINE__));
8392529f56eSJonathan T. Looney tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
8402529f56eSJonathan T. Looney (struct tcp_log_id_bucket *) id);
8412529f56eSJonathan T. Looney
8422529f56eSJonathan T. Looney /*
8432529f56eSJonathan T. Looney * If we didn't find a matching bucket, we need to add a new
8442529f56eSJonathan T. Looney * one. This requires a write lock. But, of course, we will
8452529f56eSJonathan T. Looney * need to recheck some things when we re-acquire the lock.
8462529f56eSJonathan T. Looney */
8472529f56eSJonathan T. Looney if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
8482529f56eSJonathan T. Looney tree_locked = TREE_WLOCKED;
8492529f56eSJonathan T. Looney if (!TCPID_TREE_UPGRADE()) {
8502529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
8512529f56eSJonathan T. Looney TCPID_TREE_WLOCK();
8522529f56eSJonathan T. Looney
8532529f56eSJonathan T. Looney /*
8542529f56eSJonathan T. Looney * The tree may have changed while we were
8552529f56eSJonathan T. Looney * unlocked.
8562529f56eSJonathan T. Looney */
8572529f56eSJonathan T. Looney goto refind;
8582529f56eSJonathan T. Looney }
8592529f56eSJonathan T. Looney }
8602529f56eSJonathan T. Looney
8612529f56eSJonathan T. Looney /* If we need to add a new bucket, do it now. */
8622529f56eSJonathan T. Looney if (tmp_tlb == NULL) {
8632529f56eSJonathan T. Looney /* Allocate new bucket. */
8648c47d8f5SAlan Somers tlb = uma_zalloc(tcp_log_id_bucket_zone, M_NOWAIT);
8652529f56eSJonathan T. Looney if (tlb == NULL) {
8662529f56eSJonathan T. Looney rv = ENOBUFS;
8672529f56eSJonathan T. Looney goto done_noinp;
8682529f56eSJonathan T. Looney }
869a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_cur, 1);
870a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_tot, 1);
871a9a08eceSRandall Stewart
872a9a08eceSRandall Stewart if ((tcp_log_auto_all == false) &&
873a9a08eceSRandall Stewart tcp_log_auto_mode &&
874a9a08eceSRandall Stewart tcp_log_selectauto()) {
875a9a08eceSRandall Stewart /* Save off the log state */
876a9a08eceSRandall Stewart tlb->tlb_logstate = tcp_log_auto_mode;
877a9a08eceSRandall Stewart } else
878a9a08eceSRandall Stewart tlb->tlb_logstate = TCP_LOG_STATE_OFF;
879a9a08eceSRandall Stewart tlb->tlb_loglimit = 0;
880a9a08eceSRandall Stewart tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */
8812529f56eSJonathan T. Looney
8822529f56eSJonathan T. Looney /*
8832529f56eSJonathan T. Looney * Copy the ID to the bucket.
8842529f56eSJonathan T. Looney * NB: Don't use strlcpy() unless you are sure
8852529f56eSJonathan T. Looney * we've always validated NULL termination.
8862529f56eSJonathan T. Looney *
8872529f56eSJonathan T. Looney * TODO: When I'm done writing this, see if we
8882529f56eSJonathan T. Looney * we have correctly validated NULL termination and
8892529f56eSJonathan T. Looney * can use strlcpy(). :-)
8902529f56eSJonathan T. Looney */
8912529f56eSJonathan T. Looney strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
8922529f56eSJonathan T. Looney tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
8932529f56eSJonathan T. Looney
8942529f56eSJonathan T. Looney /*
8952529f56eSJonathan T. Looney * Take the refcount for the first node and go ahead
8962529f56eSJonathan T. Looney * and lock this. Note that we zero the tlb_mtx
8972529f56eSJonathan T. Looney * structure, since 0xdeadc0de flips the right bits
8982529f56eSJonathan T. Looney * for the code to think that this mutex has already
8992529f56eSJonathan T. Looney * been initialized. :-(
9002529f56eSJonathan T. Looney */
9012529f56eSJonathan T. Looney SLIST_INIT(&tlb->tlb_head);
9022529f56eSJonathan T. Looney refcount_init(&tlb->tlb_refcnt, 1);
903a9a08eceSRandall Stewart tlb->tlb_reqcnt = 1;
9042529f56eSJonathan T. Looney memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
9052529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_INIT(tlb);
9062529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
9072529f56eSJonathan T. Looney bucket_locked = true;
9082529f56eSJonathan T. Looney
9092529f56eSJonathan T. Looney #define FREE_NEW_TLB() do { \
9102529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_DESTROY(tlb); \
9118c47d8f5SAlan Somers uma_zfree(tcp_log_id_bucket_zone, tlb); \
912a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \
913a9a08eceSRandall Stewart counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \
9142529f56eSJonathan T. Looney bucket_locked = false; \
9152529f56eSJonathan T. Looney tlb = NULL; \
9162529f56eSJonathan T. Looney } while (0)
9172529f56eSJonathan T. Looney /*
9182529f56eSJonathan T. Looney * Relock the INP and make sure we are still
9192529f56eSJonathan T. Looney * unassigned.
9202529f56eSJonathan T. Looney */
9212529f56eSJonathan T. Looney INP_WLOCK(inp);
9222529f56eSJonathan T. Looney RECHECK_INP_CLEAN(FREE_NEW_TLB());
9232529f56eSJonathan T. Looney if (tp->t_lib != NULL) {
9242529f56eSJonathan T. Looney FREE_NEW_TLB();
9252529f56eSJonathan T. Looney goto restart;
9262529f56eSJonathan T. Looney }
9272529f56eSJonathan T. Looney
9282529f56eSJonathan T. Looney /* Add the new bucket to the tree. */
9292529f56eSJonathan T. Looney tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
9302529f56eSJonathan T. Looney tlb);
9312529f56eSJonathan T. Looney KASSERT(tmp_tlb == NULL,
9322529f56eSJonathan T. Looney ("%s: Unexpected conflicting bucket (%p) while "
9332529f56eSJonathan T. Looney "adding new bucket (%p)", __func__, tmp_tlb, tlb));
9342529f56eSJonathan T. Looney
9352529f56eSJonathan T. Looney /*
9362529f56eSJonathan T. Looney * If we found a conflicting bucket, free the new
9372529f56eSJonathan T. Looney * one we made and fall through to use the existing
9382529f56eSJonathan T. Looney * bucket.
9392529f56eSJonathan T. Looney */
9402529f56eSJonathan T. Looney if (tmp_tlb != NULL) {
9412529f56eSJonathan T. Looney FREE_NEW_TLB();
9422529f56eSJonathan T. Looney INP_WUNLOCK(inp);
9432529f56eSJonathan T. Looney }
9442529f56eSJonathan T. Looney #undef FREE_NEW_TLB
9452529f56eSJonathan T. Looney }
9462529f56eSJonathan T. Looney
9472529f56eSJonathan T. Looney /* If we found an existing bucket, use it. */
9482529f56eSJonathan T. Looney if (tmp_tlb != NULL) {
9492529f56eSJonathan T. Looney tlb = tmp_tlb;
9502529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
9512529f56eSJonathan T. Looney bucket_locked = true;
9522529f56eSJonathan T. Looney
9532529f56eSJonathan T. Looney /*
9542529f56eSJonathan T. Looney * Relock the INP and make sure we are still
9552529f56eSJonathan T. Looney * unassigned.
9562529f56eSJonathan T. Looney */
9572529f56eSJonathan T. Looney INP_UNLOCK_ASSERT(inp);
9582529f56eSJonathan T. Looney INP_WLOCK(inp);
9592529f56eSJonathan T. Looney RECHECK_INP();
9602529f56eSJonathan T. Looney if (tp->t_lib != NULL) {
9612529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
96280219286SRandall Stewart bucket_locked = false;
9632529f56eSJonathan T. Looney tlb = NULL;
9642529f56eSJonathan T. Looney goto restart;
9652529f56eSJonathan T. Looney }
9662529f56eSJonathan T. Looney
9672529f56eSJonathan T. Looney /* Take a reference on the bucket. */
9682529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb);
969a9a08eceSRandall Stewart
970a9a08eceSRandall Stewart /* Record the request. */
971a9a08eceSRandall Stewart tcp_log_increment_reqcnt(tlb);
9722529f56eSJonathan T. Looney }
9732529f56eSJonathan T. Looney
9742529f56eSJonathan T. Looney tcp_log_grow_tlb(tlb->tlb_id, tp);
9752529f56eSJonathan T. Looney
9762529f56eSJonathan T. Looney /* Add the new node to the list. */
9772529f56eSJonathan T. Looney SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
9782529f56eSJonathan T. Looney tp->t_lib = tlb;
9792529f56eSJonathan T. Looney tp->t_lin = tln;
98069c7c811SRandall Stewart if (tp->t_lib->tlb_logstate > TCP_LOG_STATE_OFF) {
981a9a08eceSRandall Stewart /* Clone in any logging */
982a9a08eceSRandall Stewart
98369c7c811SRandall Stewart tp->_t_logstate = tp->t_lib->tlb_logstate;
984a9a08eceSRandall Stewart }
985a9a08eceSRandall Stewart if (tp->t_lib->tlb_loglimit) {
986a9a08eceSRandall Stewart /* The loglimit too */
987a9a08eceSRandall Stewart
988a9a08eceSRandall Stewart tp->t_loglimit = tp->t_lib->tlb_loglimit;
989a9a08eceSRandall Stewart }
9902529f56eSJonathan T. Looney tln = NULL;
9912529f56eSJonathan T. Looney }
9922529f56eSJonathan T. Looney
9932529f56eSJonathan T. Looney rv = 0;
9942529f56eSJonathan T. Looney
9952529f56eSJonathan T. Looney done:
9962529f56eSJonathan T. Looney /* Unlock things, as needed, and return. */
9972529f56eSJonathan T. Looney INP_WUNLOCK(inp);
9982529f56eSJonathan T. Looney done_noinp:
9992529f56eSJonathan T. Looney INP_UNLOCK_ASSERT(inp);
10002529f56eSJonathan T. Looney if (bucket_locked) {
10012529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb);
10022529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
10032529f56eSJonathan T. Looney } else if (tlb != NULL)
10042529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK_ASSERT(tlb);
10052529f56eSJonathan T. Looney if (tree_locked == TREE_WLOCKED) {
10062529f56eSJonathan T. Looney TCPID_TREE_WLOCK_ASSERT();
10072529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK();
10082529f56eSJonathan T. Looney } else if (tree_locked == TREE_RLOCKED) {
10092529f56eSJonathan T. Looney TCPID_TREE_RLOCK_ASSERT();
10102529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
10112529f56eSJonathan T. Looney } else
10122529f56eSJonathan T. Looney TCPID_TREE_UNLOCK_ASSERT();
10132529f56eSJonathan T. Looney if (tln != NULL)
10148c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, tln);
10152529f56eSJonathan T. Looney return (rv);
10162529f56eSJonathan T. Looney }
10172529f56eSJonathan T. Looney
10182529f56eSJonathan T. Looney /*
10192529f56eSJonathan T. Looney * Get the TCP log ID for a TCPCB.
10202529f56eSJonathan T. Looney * Called with INPCB locked.
10212529f56eSJonathan T. Looney * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
10222529f56eSJonathan T. Looney * Returns number of bytes copied.
10232529f56eSJonathan T. Looney */
10242529f56eSJonathan T. Looney size_t
tcp_log_get_id(struct tcpcb * tp,char * buf)10252529f56eSJonathan T. Looney tcp_log_get_id(struct tcpcb *tp, char *buf)
10262529f56eSJonathan T. Looney {
10272529f56eSJonathan T. Looney size_t len;
10282529f56eSJonathan T. Looney
10299eb0e832SGleb Smirnoff INP_LOCK_ASSERT(tptoinpcb(tp));
10302529f56eSJonathan T. Looney if (tp->t_lib != NULL) {
10312529f56eSJonathan T. Looney len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
10322529f56eSJonathan T. Looney KASSERT(len < TCP_LOG_ID_LEN,
10332529f56eSJonathan T. Looney ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
10342529f56eSJonathan T. Looney __func__, __LINE__, len));
10352529f56eSJonathan T. Looney } else {
10362529f56eSJonathan T. Looney *buf = '\0';
10372529f56eSJonathan T. Looney len = 0;
10382529f56eSJonathan T. Looney }
10392529f56eSJonathan T. Looney return (len);
10402529f56eSJonathan T. Looney }
10412529f56eSJonathan T. Looney
10422529f56eSJonathan T. Looney /*
1043a9a08eceSRandall Stewart * Get the tag associated with the TCPCB's log ID.
1044a9a08eceSRandall Stewart * Called with INPCB locked. Returns with it unlocked.
1045a9a08eceSRandall Stewart * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long.
1046a9a08eceSRandall Stewart * Returns number of bytes copied.
1047a9a08eceSRandall Stewart */
1048a9a08eceSRandall Stewart size_t
tcp_log_get_tag(struct tcpcb * tp,char * buf)1049a9a08eceSRandall Stewart tcp_log_get_tag(struct tcpcb *tp, char *buf)
1050a9a08eceSRandall Stewart {
10519eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
1052a9a08eceSRandall Stewart struct tcp_log_id_bucket *tlb;
1053a9a08eceSRandall Stewart size_t len;
1054a9a08eceSRandall Stewart int tree_locked;
1055a9a08eceSRandall Stewart
10569eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp);
1057a9a08eceSRandall Stewart
1058a9a08eceSRandall Stewart tree_locked = TREE_UNLOCKED;
1059a9a08eceSRandall Stewart tlb = tp->t_lib;
1060a9a08eceSRandall Stewart
1061a9a08eceSRandall Stewart if (tlb != NULL) {
1062a9a08eceSRandall Stewart TCPID_BUCKET_REF(tlb);
10639eb0e832SGleb Smirnoff INP_WUNLOCK(inp);
1064a9a08eceSRandall Stewart TCPID_BUCKET_LOCK(tlb);
1065a9a08eceSRandall Stewart len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN);
1066a9a08eceSRandall Stewart KASSERT(len < TCP_LOG_TAG_LEN,
1067a9a08eceSRandall Stewart ("%s:%d: tp->t_lib->tlb_tag too long (%zu)",
1068a9a08eceSRandall Stewart __func__, __LINE__, len));
1069a9a08eceSRandall Stewart if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
1070a9a08eceSRandall Stewart TCPID_BUCKET_UNLOCK(tlb);
1071a9a08eceSRandall Stewart
1072a9a08eceSRandall Stewart if (tree_locked == TREE_WLOCKED) {
1073a9a08eceSRandall Stewart TCPID_TREE_WLOCK_ASSERT();
1074a9a08eceSRandall Stewart TCPID_TREE_WUNLOCK();
1075a9a08eceSRandall Stewart } else if (tree_locked == TREE_RLOCKED) {
1076a9a08eceSRandall Stewart TCPID_TREE_RLOCK_ASSERT();
1077a9a08eceSRandall Stewart TCPID_TREE_RUNLOCK();
1078a9a08eceSRandall Stewart } else
1079a9a08eceSRandall Stewart TCPID_TREE_UNLOCK_ASSERT();
1080a9a08eceSRandall Stewart } else {
10819eb0e832SGleb Smirnoff INP_WUNLOCK(inp);
1082a9a08eceSRandall Stewart *buf = '\0';
1083a9a08eceSRandall Stewart len = 0;
1084a9a08eceSRandall Stewart }
1085a9a08eceSRandall Stewart
1086a9a08eceSRandall Stewart return (len);
1087a9a08eceSRandall Stewart }
1088a9a08eceSRandall Stewart
1089a9a08eceSRandall Stewart /*
10902529f56eSJonathan T. Looney * Get number of connections with the same log ID.
10912529f56eSJonathan T. Looney * Log ID is taken from given TCPCB.
10922529f56eSJonathan T. Looney * Called with INPCB locked.
10932529f56eSJonathan T. Looney */
10942529f56eSJonathan T. Looney u_int
tcp_log_get_id_cnt(struct tcpcb * tp)10952529f56eSJonathan T. Looney tcp_log_get_id_cnt(struct tcpcb *tp)
10962529f56eSJonathan T. Looney {
10972529f56eSJonathan T. Looney
10989eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
10992529f56eSJonathan T. Looney return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
11002529f56eSJonathan T. Looney }
11012529f56eSJonathan T. Looney
11022529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF
11032529f56eSJonathan T. Looney /*
11042529f56eSJonathan T. Looney * Functions/macros to increment/decrement reference count for a log
11052529f56eSJonathan T. Looney * entry. This should catch when we do a double-free/double-remove or
11062529f56eSJonathan T. Looney * a double-add.
11072529f56eSJonathan T. Looney */
11082529f56eSJonathan T. Looney static inline void
_tcp_log_entry_refcnt_add(struct tcp_log_mem * log_entry,const char * func,int line)11092529f56eSJonathan T. Looney _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
11102529f56eSJonathan T. Looney int line)
11112529f56eSJonathan T. Looney {
11122529f56eSJonathan T. Looney int refcnt;
11132529f56eSJonathan T. Looney
11142529f56eSJonathan T. Looney refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
11152529f56eSJonathan T. Looney if (refcnt != 0)
11162529f56eSJonathan T. Looney panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
11172529f56eSJonathan T. Looney func, line, log_entry, refcnt);
11182529f56eSJonathan T. Looney }
11192529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_add(l) \
11202529f56eSJonathan T. Looney _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
11212529f56eSJonathan T. Looney
11222529f56eSJonathan T. Looney static inline void
_tcp_log_entry_refcnt_rem(struct tcp_log_mem * log_entry,const char * func,int line)11232529f56eSJonathan T. Looney _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
11242529f56eSJonathan T. Looney int line)
11252529f56eSJonathan T. Looney {
11262529f56eSJonathan T. Looney int refcnt;
11272529f56eSJonathan T. Looney
11282529f56eSJonathan T. Looney refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
11292529f56eSJonathan T. Looney if (refcnt != 1)
11302529f56eSJonathan T. Looney panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
11312529f56eSJonathan T. Looney func, line, log_entry, refcnt);
11322529f56eSJonathan T. Looney }
11332529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_rem(l) \
11342529f56eSJonathan T. Looney _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
11352529f56eSJonathan T. Looney
11362529f56eSJonathan T. Looney #else /* !TCPLOG_DEBUG_RINGBUF */
11372529f56eSJonathan T. Looney
11382529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_add(l)
11392529f56eSJonathan T. Looney #define tcp_log_entry_refcnt_rem(l)
11402529f56eSJonathan T. Looney
11412529f56eSJonathan T. Looney #endif
11422529f56eSJonathan T. Looney
11432529f56eSJonathan T. Looney /*
11442529f56eSJonathan T. Looney * Cleanup after removing a log entry, but only decrement the count if we
11452529f56eSJonathan T. Looney * are running INVARIANTS.
11462529f56eSJonathan T. Looney */
11472529f56eSJonathan T. Looney static inline void
tcp_log_free_log_common(struct tcp_log_mem * log_entry,int * count __unused)11482529f56eSJonathan T. Looney tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
11492529f56eSJonathan T. Looney {
11502529f56eSJonathan T. Looney
11512529f56eSJonathan T. Looney uma_zfree(tcp_log_zone, log_entry);
11522529f56eSJonathan T. Looney #ifdef INVARIANTS
11532529f56eSJonathan T. Looney (*count)--;
11542529f56eSJonathan T. Looney KASSERT(*count >= 0,
11552529f56eSJonathan T. Looney ("%s: count unexpectedly negative", __func__));
11562529f56eSJonathan T. Looney #endif
11572529f56eSJonathan T. Looney }
11582529f56eSJonathan T. Looney
11592529f56eSJonathan T. Looney static void
tcp_log_free_entries(struct tcp_log_stailq * head,int * count)11602529f56eSJonathan T. Looney tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
11612529f56eSJonathan T. Looney {
11622529f56eSJonathan T. Looney struct tcp_log_mem *log_entry;
11632529f56eSJonathan T. Looney
11642529f56eSJonathan T. Looney /* Free the entries. */
11652529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(head)) != NULL) {
11662529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(head, tlm_queue);
11672529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry);
11682529f56eSJonathan T. Looney tcp_log_free_log_common(log_entry, count);
11692529f56eSJonathan T. Looney }
11702529f56eSJonathan T. Looney }
11712529f56eSJonathan T. Looney
11722529f56eSJonathan T. Looney /* Cleanup after removing a log entry. */
11732529f56eSJonathan T. Looney static inline void
tcp_log_remove_log_cleanup(struct tcpcb * tp,struct tcp_log_mem * log_entry)11742529f56eSJonathan T. Looney tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
11752529f56eSJonathan T. Looney {
11762529f56eSJonathan T. Looney uma_zfree(tcp_log_zone, log_entry);
11772529f56eSJonathan T. Looney tp->t_lognum--;
11782529f56eSJonathan T. Looney KASSERT(tp->t_lognum >= 0,
11792529f56eSJonathan T. Looney ("%s: tp->t_lognum unexpectedly negative", __func__));
11802529f56eSJonathan T. Looney }
11812529f56eSJonathan T. Looney
11822529f56eSJonathan T. Looney /* Remove a log entry from the head of a list. */
11832529f56eSJonathan T. Looney static inline void
tcp_log_remove_log_head(struct tcpcb * tp,struct tcp_log_mem * log_entry)11842529f56eSJonathan T. Looney tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
11852529f56eSJonathan T. Looney {
11862529f56eSJonathan T. Looney
11872529f56eSJonathan T. Looney KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
11882529f56eSJonathan T. Looney ("%s: attempt to remove non-HEAD log entry", __func__));
11892529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
11902529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry);
11912529f56eSJonathan T. Looney tcp_log_remove_log_cleanup(tp, log_entry);
11922529f56eSJonathan T. Looney }
11932529f56eSJonathan T. Looney
11942529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF
11952529f56eSJonathan T. Looney /*
11962529f56eSJonathan T. Looney * Initialize the log entry's reference count, which we want to
11972529f56eSJonathan T. Looney * survive allocations.
11982529f56eSJonathan T. Looney */
11992529f56eSJonathan T. Looney static int
tcp_log_zone_init(void * mem,int size,int flags __unused)12002529f56eSJonathan T. Looney tcp_log_zone_init(void *mem, int size, int flags __unused)
12012529f56eSJonathan T. Looney {
12022529f56eSJonathan T. Looney struct tcp_log_mem *tlm;
12032529f56eSJonathan T. Looney
12042529f56eSJonathan T. Looney KASSERT(size >= sizeof(struct tcp_log_mem),
12052529f56eSJonathan T. Looney ("%s: unexpectedly short (%d) allocation", __func__, size));
12062529f56eSJonathan T. Looney tlm = (struct tcp_log_mem *)mem;
12072529f56eSJonathan T. Looney tlm->tlm_refcnt = 0;
12082529f56eSJonathan T. Looney return (0);
12092529f56eSJonathan T. Looney }
12102529f56eSJonathan T. Looney
12112529f56eSJonathan T. Looney /*
12122529f56eSJonathan T. Looney * Double check that the refcnt is zero on allocation and return.
12132529f56eSJonathan T. Looney */
12142529f56eSJonathan T. Looney static int
tcp_log_zone_ctor(void * mem,int size,void * args __unused,int flags __unused)12152529f56eSJonathan T. Looney tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
12162529f56eSJonathan T. Looney {
12172529f56eSJonathan T. Looney struct tcp_log_mem *tlm;
12182529f56eSJonathan T. Looney
12192529f56eSJonathan T. Looney KASSERT(size >= sizeof(struct tcp_log_mem),
12202529f56eSJonathan T. Looney ("%s: unexpectedly short (%d) allocation", __func__, size));
12212529f56eSJonathan T. Looney tlm = (struct tcp_log_mem *)mem;
12222529f56eSJonathan T. Looney if (tlm->tlm_refcnt != 0)
12232529f56eSJonathan T. Looney panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
12242529f56eSJonathan T. Looney __func__, __LINE__, tlm, tlm->tlm_refcnt);
12252529f56eSJonathan T. Looney return (0);
12262529f56eSJonathan T. Looney }
12272529f56eSJonathan T. Looney
12282529f56eSJonathan T. Looney static void
tcp_log_zone_dtor(void * mem,int size,void * args __unused)12292529f56eSJonathan T. Looney tcp_log_zone_dtor(void *mem, int size, void *args __unused)
12302529f56eSJonathan T. Looney {
12312529f56eSJonathan T. Looney struct tcp_log_mem *tlm;
12322529f56eSJonathan T. Looney
12332529f56eSJonathan T. Looney KASSERT(size >= sizeof(struct tcp_log_mem),
12342529f56eSJonathan T. Looney ("%s: unexpectedly short (%d) allocation", __func__, size));
12352529f56eSJonathan T. Looney tlm = (struct tcp_log_mem *)mem;
12362529f56eSJonathan T. Looney if (tlm->tlm_refcnt != 0)
12372529f56eSJonathan T. Looney panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
12382529f56eSJonathan T. Looney __func__, __LINE__, tlm, tlm->tlm_refcnt);
12392529f56eSJonathan T. Looney }
12402529f56eSJonathan T. Looney #endif /* TCPLOG_DEBUG_RINGBUF */
12412529f56eSJonathan T. Looney
12422529f56eSJonathan T. Looney /* Do global initialization. */
12432529f56eSJonathan T. Looney void
tcp_log_init(void)12442529f56eSJonathan T. Looney tcp_log_init(void)
12452529f56eSJonathan T. Looney {
12462529f56eSJonathan T. Looney
12472529f56eSJonathan T. Looney tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
12482529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_RINGBUF
12492529f56eSJonathan T. Looney tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
12502529f56eSJonathan T. Looney #else
12512529f56eSJonathan T. Looney NULL, NULL, NULL,
12522529f56eSJonathan T. Looney #endif
12532529f56eSJonathan T. Looney NULL, UMA_ALIGN_PTR, 0);
12542529f56eSJonathan T. Looney (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
12558c47d8f5SAlan Somers tcp_log_id_bucket_zone = uma_zcreate("tcp_log_id_bucket",
12562529f56eSJonathan T. Looney sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
12572529f56eSJonathan T. Looney UMA_ALIGN_PTR, 0);
12588c47d8f5SAlan Somers tcp_log_id_node_zone = uma_zcreate("tcp_log_id_node",
12592529f56eSJonathan T. Looney sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
12602529f56eSJonathan T. Looney UMA_ALIGN_PTR, 0);
12612529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
12622529f56eSJonathan T. Looney tcp_log_queued = counter_u64_alloc(M_WAITOK);
12632529f56eSJonathan T. Looney tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
12642529f56eSJonathan T. Looney tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
12652529f56eSJonathan T. Looney tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
12662529f56eSJonathan T. Looney tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
12672529f56eSJonathan T. Looney tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
12682529f56eSJonathan T. Looney tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
12692529f56eSJonathan T. Looney tcp_log_que_read = counter_u64_alloc(M_WAITOK);
12702529f56eSJonathan T. Looney tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
12712529f56eSJonathan T. Looney #endif
1272a9a08eceSRandall Stewart tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK);
1273a9a08eceSRandall Stewart tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK);
12742529f56eSJonathan T. Looney
12752529f56eSJonathan T. Looney rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
12762529f56eSJonathan T. Looney mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
12772529f56eSJonathan T. Looney callout_init(&tcp_log_expireq_callout, 1);
12782529f56eSJonathan T. Looney }
12792529f56eSJonathan T. Looney
12802529f56eSJonathan T. Looney /* Do per-TCPCB initialization. */
12812529f56eSJonathan T. Looney void
tcp_log_tcpcbinit(struct tcpcb * tp)12822529f56eSJonathan T. Looney tcp_log_tcpcbinit(struct tcpcb *tp)
12832529f56eSJonathan T. Looney {
12842529f56eSJonathan T. Looney
12852529f56eSJonathan T. Looney /* A new TCPCB should start out zero-initialized. */
12862529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs);
12872529f56eSJonathan T. Looney
12882529f56eSJonathan T. Looney /*
12892529f56eSJonathan T. Looney * If we are doing auto-capturing, figure out whether we will capture
12902529f56eSJonathan T. Looney * this session.
12912529f56eSJonathan T. Looney */
1292a9a08eceSRandall Stewart tp->t_loglimit = tcp_log_session_limit;
1293a9a08eceSRandall Stewart if ((tcp_log_auto_all == true) &&
1294a9a08eceSRandall Stewart tcp_log_auto_mode &&
1295a9a08eceSRandall Stewart tcp_log_selectauto()) {
129669c7c811SRandall Stewart tp->_t_logstate = tcp_log_auto_mode;
12972529f56eSJonathan T. Looney tp->t_flags2 |= TF2_LOG_AUTO;
12982529f56eSJonathan T. Looney }
12992529f56eSJonathan T. Looney }
13002529f56eSJonathan T. Looney
13012529f56eSJonathan T. Looney /* Remove entries */
13022529f56eSJonathan T. Looney static void
tcp_log_expire(void * unused __unused)13032529f56eSJonathan T. Looney tcp_log_expire(void *unused __unused)
13042529f56eSJonathan T. Looney {
13052529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb;
13062529f56eSJonathan T. Looney struct tcp_log_id_node *tln;
13072529f56eSJonathan T. Looney sbintime_t expiry_limit;
13082529f56eSJonathan T. Looney int tree_locked;
13092529f56eSJonathan T. Looney
13102529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK();
13112529f56eSJonathan T. Looney if (callout_pending(&tcp_log_expireq_callout)) {
13122529f56eSJonathan T. Looney /* Callout was reset. */
13132529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
13142529f56eSJonathan T. Looney return;
13152529f56eSJonathan T. Looney }
13162529f56eSJonathan T. Looney
13172529f56eSJonathan T. Looney /*
13182529f56eSJonathan T. Looney * Process entries until we reach one that expires too far in the
13192529f56eSJonathan T. Looney * future. Look one second in the future.
13202529f56eSJonathan T. Looney */
13212529f56eSJonathan T. Looney expiry_limit = getsbinuptime() + SBT_1S;
13222529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
13232529f56eSJonathan T. Looney
13242529f56eSJonathan T. Looney while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
13252529f56eSJonathan T. Looney tln->tln_expiretime <= expiry_limit) {
13262529f56eSJonathan T. Looney if (!callout_active(&tcp_log_expireq_callout)) {
13272529f56eSJonathan T. Looney /*
13282529f56eSJonathan T. Looney * Callout was stopped. I guess we should
13292529f56eSJonathan T. Looney * just quit at this point.
13302529f56eSJonathan T. Looney */
13312529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
13322529f56eSJonathan T. Looney return;
13332529f56eSJonathan T. Looney }
13342529f56eSJonathan T. Looney
13352529f56eSJonathan T. Looney /*
13362529f56eSJonathan T. Looney * Remove the node from the head of the list and unlock
13372529f56eSJonathan T. Looney * the list. Change the expiry time to SBT_MAX as a signal
13382529f56eSJonathan T. Looney * to other threads that we now own this.
13392529f56eSJonathan T. Looney */
13402529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
13412529f56eSJonathan T. Looney tln->tln_expiretime = SBT_MAX;
13422529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
13432529f56eSJonathan T. Looney
13442529f56eSJonathan T. Looney /*
13452529f56eSJonathan T. Looney * Remove the node from the bucket.
13462529f56eSJonathan T. Looney */
13472529f56eSJonathan T. Looney tlb = tln->tln_bucket;
13482529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
13492529f56eSJonathan T. Looney if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
13502529f56eSJonathan T. Looney tcp_log_id_validate_tree_lock(tree_locked);
13512529f56eSJonathan T. Looney if (tree_locked == TREE_WLOCKED)
13522529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK();
13532529f56eSJonathan T. Looney else
13542529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
13552529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
13562529f56eSJonathan T. Looney }
13572529f56eSJonathan T. Looney
13582529f56eSJonathan T. Looney /* Drop the INP reference. */
13592529f56eSJonathan T. Looney INP_WLOCK(tln->tln_inp);
13602529f56eSJonathan T. Looney if (!in_pcbrele_wlocked(tln->tln_inp))
13612529f56eSJonathan T. Looney INP_WUNLOCK(tln->tln_inp);
13622529f56eSJonathan T. Looney
13632529f56eSJonathan T. Looney /* Free the log records. */
13642529f56eSJonathan T. Looney tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
13652529f56eSJonathan T. Looney
13662529f56eSJonathan T. Looney /* Free the node. */
13678c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, tln);
13682529f56eSJonathan T. Looney
13692529f56eSJonathan T. Looney /* Relock the expiry queue. */
13702529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK();
13712529f56eSJonathan T. Looney }
13722529f56eSJonathan T. Looney
13732529f56eSJonathan T. Looney /*
13742529f56eSJonathan T. Looney * We've expired all the entries we can. Do we need to reschedule
13752529f56eSJonathan T. Looney * ourselves?
13762529f56eSJonathan T. Looney */
13772529f56eSJonathan T. Looney callout_deactivate(&tcp_log_expireq_callout);
13782529f56eSJonathan T. Looney if (tln != NULL) {
13792529f56eSJonathan T. Looney /*
13802529f56eSJonathan T. Looney * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
13812529f56eSJonathan T. Looney * set the next callout to that. (This helps ensure we generally
13822529f56eSJonathan T. Looney * run the callout no more often than desired.)
13832529f56eSJonathan T. Looney */
13842529f56eSJonathan T. Looney expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
13852529f56eSJonathan T. Looney if (expiry_limit < tln->tln_expiretime)
13862529f56eSJonathan T. Looney expiry_limit = tln->tln_expiretime;
13872529f56eSJonathan T. Looney callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
13882529f56eSJonathan T. Looney SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
13892529f56eSJonathan T. Looney }
13902529f56eSJonathan T. Looney
13912529f56eSJonathan T. Looney /* We're done. */
13922529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
13932529f56eSJonathan T. Looney return;
13942529f56eSJonathan T. Looney }
13952529f56eSJonathan T. Looney
13962529f56eSJonathan T. Looney /*
13972529f56eSJonathan T. Looney * Move log data from the TCPCB to a new node. This will reset the TCPCB log
13982529f56eSJonathan T. Looney * entries and log count; however, it will not touch other things from the
13992529f56eSJonathan T. Looney * TCPCB (e.g. t_lin, t_lib).
14002529f56eSJonathan T. Looney *
14012529f56eSJonathan T. Looney * NOTE: Must hold a lock on the INP.
14022529f56eSJonathan T. Looney */
14032529f56eSJonathan T. Looney static void
tcp_log_move_tp_to_node(struct tcpcb * tp,struct tcp_log_id_node * tln)14042529f56eSJonathan T. Looney tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
14052529f56eSJonathan T. Looney {
14069eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
14072529f56eSJonathan T. Looney
14089eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp);
14092529f56eSJonathan T. Looney
14109eb0e832SGleb Smirnoff tln->tln_ie = inp->inp_inc.inc_ie;
14119eb0e832SGleb Smirnoff if (inp->inp_inc.inc_flags & INC_ISIPV6)
14122529f56eSJonathan T. Looney tln->tln_af = AF_INET6;
14132529f56eSJonathan T. Looney else
14142529f56eSJonathan T. Looney tln->tln_af = AF_INET;
14152529f56eSJonathan T. Looney tln->tln_entries = tp->t_logs;
14162529f56eSJonathan T. Looney tln->tln_count = tp->t_lognum;
14172529f56eSJonathan T. Looney tln->tln_bucket = tp->t_lib;
14182529f56eSJonathan T. Looney
14192529f56eSJonathan T. Looney /* Clear information from the PCB. */
14202529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs);
14212529f56eSJonathan T. Looney tp->t_lognum = 0;
14222529f56eSJonathan T. Looney }
14232529f56eSJonathan T. Looney
14242529f56eSJonathan T. Looney /* Do per-TCPCB cleanup */
14252529f56eSJonathan T. Looney void
tcp_log_tcpcbfini(struct tcpcb * tp)14262529f56eSJonathan T. Looney tcp_log_tcpcbfini(struct tcpcb *tp)
14272529f56eSJonathan T. Looney {
14282529f56eSJonathan T. Looney struct tcp_log_id_node *tln, *tln_first;
14292529f56eSJonathan T. Looney struct tcp_log_mem *log_entry;
14302529f56eSJonathan T. Looney sbintime_t callouttime;
14312529f56eSJonathan T. Looney
143269c7c811SRandall Stewart
14339eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
143469c7c811SRandall Stewart if (tp->_t_logstate) {
143569c7c811SRandall Stewart union tcp_log_stackspecific log;
143669c7c811SRandall Stewart struct timeval tv;
1437303246dcSRandall Stewart #ifdef TCP_ACCOUNTING
1438303246dcSRandall Stewart struct tcp_log_buffer *lgb;
143969c7c811SRandall Stewart int i;
14402529f56eSJonathan T. Looney
144169c7c811SRandall Stewart memset(&log, 0, sizeof(log));
144269c7c811SRandall Stewart if (tp->t_flags2 & TF2_TCP_ACCOUNTING) {
144369c7c811SRandall Stewart for (i = 0; i < TCP_NUM_CNT_COUNTERS; i++) {
144469c7c811SRandall Stewart log.u_raw.u64_flex[i] = tp->tcp_cnt_counters[i];
144569c7c811SRandall Stewart }
144669c7c811SRandall Stewart lgb = tcp_log_event(tp, NULL,
144769c7c811SRandall Stewart NULL,
144869c7c811SRandall Stewart NULL,
144969c7c811SRandall Stewart TCP_LOG_ACCOUNTING, 0,
145069c7c811SRandall Stewart 0, &log, false, NULL, NULL, 0, &tv);
145137229fedSRandall Stewart if (lgb != NULL) {
145269c7c811SRandall Stewart lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS;
145369c7c811SRandall Stewart lgb->tlb_flex2 = 1;
145437229fedSRandall Stewart } else
145537229fedSRandall Stewart goto skip_out;
145669c7c811SRandall Stewart for (i = 0; i<TCP_NUM_CNT_COUNTERS; i++) {
145769c7c811SRandall Stewart log.u_raw.u64_flex[i] = tp->tcp_proc_time[i];
145869c7c811SRandall Stewart }
145969c7c811SRandall Stewart lgb = tcp_log_event(tp, NULL,
146069c7c811SRandall Stewart NULL,
146169c7c811SRandall Stewart NULL,
146269c7c811SRandall Stewart TCP_LOG_ACCOUNTING, 0,
146369c7c811SRandall Stewart 0, &log, false, NULL, NULL, 0, &tv);
146437229fedSRandall Stewart if (lgb != NULL) {
146569c7c811SRandall Stewart lgb->tlb_flex1 = TCP_NUM_CNT_COUNTERS;
146669c7c811SRandall Stewart lgb->tlb_flex2 = 2;
146769c7c811SRandall Stewart }
146837229fedSRandall Stewart }
146937229fedSRandall Stewart skip_out:
1470303246dcSRandall Stewart #endif
147169c7c811SRandall Stewart log.u_bbr.timeStamp = tcp_get_usecs(&tv);
147269c7c811SRandall Stewart log.u_bbr.cur_del_rate = tp->t_end_info;
1473303246dcSRandall Stewart (void)tcp_log_event(tp, NULL,
147469c7c811SRandall Stewart NULL,
147569c7c811SRandall Stewart NULL,
147669c7c811SRandall Stewart TCP_LOG_CONNEND, 0,
1477303246dcSRandall Stewart 0, &log, false, NULL, NULL, 0, &tv);
147869c7c811SRandall Stewart }
14792529f56eSJonathan T. Looney /*
14802529f56eSJonathan T. Looney * If we were gathering packets to be automatically dumped, try to do
14812529f56eSJonathan T. Looney * it now. If this succeeds, the log information in the TCPCB will be
14822529f56eSJonathan T. Looney * cleared. Otherwise, we'll handle the log information as we do
14832529f56eSJonathan T. Looney * for other states.
14842529f56eSJonathan T. Looney */
148569c7c811SRandall Stewart switch(tp->_t_logstate) {
14862529f56eSJonathan T. Looney case TCP_LOG_STATE_HEAD_AUTO:
14872529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
14882529f56eSJonathan T. Looney M_NOWAIT, false);
14892529f56eSJonathan T. Looney break;
14902529f56eSJonathan T. Looney case TCP_LOG_STATE_TAIL_AUTO:
14912529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
14922529f56eSJonathan T. Looney M_NOWAIT, false);
14932529f56eSJonathan T. Looney break;
149469c7c811SRandall Stewart case TCP_LOG_VIA_BBPOINTS:
149569c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints",
149669c7c811SRandall Stewart M_NOWAIT, false);
149769c7c811SRandall Stewart break;
14982529f56eSJonathan T. Looney case TCP_LOG_STATE_CONTINUAL:
14992529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
15002529f56eSJonathan T. Looney M_NOWAIT, false);
15012529f56eSJonathan T. Looney break;
15022529f56eSJonathan T. Looney }
15032529f56eSJonathan T. Looney
15042529f56eSJonathan T. Looney /*
15052529f56eSJonathan T. Looney * There are two ways we could keep logs: per-socket or per-ID. If
15062529f56eSJonathan T. Looney * we are tracking logs with an ID, then the logs survive the
15072529f56eSJonathan T. Looney * destruction of the TCPCB.
15082529f56eSJonathan T. Looney *
15092529f56eSJonathan T. Looney * If the TCPCB is associated with an ID node, move the logs from the
15102529f56eSJonathan T. Looney * TCPCB to the ID node. In theory, this is safe, for reasons which I
15112529f56eSJonathan T. Looney * will now explain for my own benefit when I next need to figure out
15122529f56eSJonathan T. Looney * this code. :-)
15132529f56eSJonathan T. Looney *
15142529f56eSJonathan T. Looney * We own the INP lock. Therefore, no one else can change the contents
15152529f56eSJonathan T. Looney * of this node (Rule C). Further, no one can remove this node from
15162529f56eSJonathan T. Looney * the bucket while we hold the lock (Rule D). Basically, no one can
15172529f56eSJonathan T. Looney * mess with this node. That leaves two states in which we could be:
15182529f56eSJonathan T. Looney *
15192529f56eSJonathan T. Looney * 1. Another thread is currently waiting to acquire the INP lock, with
15202529f56eSJonathan T. Looney * plans to do something with this node. When we drop the INP lock,
15212529f56eSJonathan T. Looney * they will have a chance to do that. They will recheck the
15222529f56eSJonathan T. Looney * tln_closed field (see note to Rule C) and then acquire the
15232529f56eSJonathan T. Looney * bucket lock before proceeding further.
15242529f56eSJonathan T. Looney *
15252529f56eSJonathan T. Looney * 2. Another thread will try to acquire a lock at some point in the
15262529f56eSJonathan T. Looney * future. If they try to acquire a lock before we set the
15272529f56eSJonathan T. Looney * tln_closed field, they will follow state #1. If they try to
15282529f56eSJonathan T. Looney * acquire a lock after we set the tln_closed field, they will be
15292529f56eSJonathan T. Looney * able to make changes to the node, at will, following Rule C.
15302529f56eSJonathan T. Looney *
15312529f56eSJonathan T. Looney * Therefore, we currently own this node and can make any changes
15322529f56eSJonathan T. Looney * we want. But, as soon as we set the tln_closed field to true, we
15332529f56eSJonathan T. Looney * have effectively dropped our lock on the node. (For this reason, we
15342529f56eSJonathan T. Looney * also need to make sure our writes are ordered correctly. An atomic
15352529f56eSJonathan T. Looney * operation with "release" semantics should be sufficient.)
15362529f56eSJonathan T. Looney */
15372529f56eSJonathan T. Looney
15382529f56eSJonathan T. Looney if (tp->t_lin != NULL) {
15399eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
15409eb0e832SGleb Smirnoff
15412529f56eSJonathan T. Looney /* Copy the relevant information to the log entry. */
15422529f56eSJonathan T. Looney tln = tp->t_lin;
15439eb0e832SGleb Smirnoff KASSERT(tln->tln_inp == inp,
15449eb0e832SGleb Smirnoff ("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)",
15459eb0e832SGleb Smirnoff __func__, tln->tln_inp, inp));
15462529f56eSJonathan T. Looney tcp_log_move_tp_to_node(tp, tln);
15472529f56eSJonathan T. Looney
15482529f56eSJonathan T. Looney /* Clear information from the PCB. */
15492529f56eSJonathan T. Looney tp->t_lin = NULL;
15502529f56eSJonathan T. Looney tp->t_lib = NULL;
15512529f56eSJonathan T. Looney
15522529f56eSJonathan T. Looney /*
15532529f56eSJonathan T. Looney * Take a reference on the INP. This ensures that the INP
15542529f56eSJonathan T. Looney * remains valid while the node is on the expiry queue. This
15552529f56eSJonathan T. Looney * ensures the INP is valid for other threads that may be
15562529f56eSJonathan T. Looney * racing to lock this node when we move it to the expire
15572529f56eSJonathan T. Looney * queue.
15582529f56eSJonathan T. Looney */
15599eb0e832SGleb Smirnoff in_pcbref(inp);
15602529f56eSJonathan T. Looney
15612529f56eSJonathan T. Looney /*
15622529f56eSJonathan T. Looney * Store the entry on the expiry list. The exact behavior
15632529f56eSJonathan T. Looney * depends on whether we have entries to keep. If so, we
15642529f56eSJonathan T. Looney * put the entry at the tail of the list and expire in
15652529f56eSJonathan T. Looney * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
15662529f56eSJonathan T. Looney * the entry at the head of the list. (Handling the cleanup
15672529f56eSJonathan T. Looney * via the expiry timer lets us avoid locking messy-ness here.)
15682529f56eSJonathan T. Looney */
15692529f56eSJonathan T. Looney tln->tln_expiretime = getsbinuptime();
15702529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK();
15712529f56eSJonathan T. Looney if (tln->tln_count) {
15722529f56eSJonathan T. Looney tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
15732529f56eSJonathan T. Looney if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
15742529f56eSJonathan T. Looney !callout_active(&tcp_log_expireq_callout)) {
15752529f56eSJonathan T. Looney /*
15762529f56eSJonathan T. Looney * We are adding the first entry and a callout
15772529f56eSJonathan T. Looney * is not currently scheduled; therefore, we
15782529f56eSJonathan T. Looney * need to schedule one.
15792529f56eSJonathan T. Looney */
15802529f56eSJonathan T. Looney callout_reset_sbt(&tcp_log_expireq_callout,
15812529f56eSJonathan T. Looney tln->tln_expiretime, SBT_1S, tcp_log_expire,
15822529f56eSJonathan T. Looney NULL, C_ABSOLUTE);
15832529f56eSJonathan T. Looney }
15842529f56eSJonathan T. Looney STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
15852529f56eSJonathan T. Looney tln_expireq);
15862529f56eSJonathan T. Looney } else {
15872529f56eSJonathan T. Looney callouttime = tln->tln_expiretime +
15882529f56eSJonathan T. Looney TCP_LOG_EXPIRE_INTVL;
15892529f56eSJonathan T. Looney tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
15902529f56eSJonathan T. Looney
15912529f56eSJonathan T. Looney if ((tln_first == NULL ||
15922529f56eSJonathan T. Looney callouttime < tln_first->tln_expiretime) &&
15932529f56eSJonathan T. Looney (callout_pending(&tcp_log_expireq_callout) ||
15942529f56eSJonathan T. Looney !callout_active(&tcp_log_expireq_callout))) {
15952529f56eSJonathan T. Looney /*
15962529f56eSJonathan T. Looney * The list is empty, or we want to run the
15972529f56eSJonathan T. Looney * expire code before the first entry's timer
15982529f56eSJonathan T. Looney * fires. Also, we are in a case where a callout
15992529f56eSJonathan T. Looney * is not actively running. We want to reset
16002529f56eSJonathan T. Looney * the callout to occur sooner.
16012529f56eSJonathan T. Looney */
16022529f56eSJonathan T. Looney callout_reset_sbt(&tcp_log_expireq_callout,
16032529f56eSJonathan T. Looney callouttime, SBT_1S, tcp_log_expire, NULL,
16042529f56eSJonathan T. Looney C_ABSOLUTE);
16052529f56eSJonathan T. Looney }
16062529f56eSJonathan T. Looney
16072529f56eSJonathan T. Looney /*
16082529f56eSJonathan T. Looney * Insert to the head, or just after the head, as
16092529f56eSJonathan T. Looney * appropriate. (This might result in small
16102529f56eSJonathan T. Looney * mis-orderings as a bunch of "expire now" entries
16112529f56eSJonathan T. Looney * gather at the start of the list, but that should
16122529f56eSJonathan T. Looney * not produce big problems, since the expire timer
16132529f56eSJonathan T. Looney * will walk through all of them.)
16142529f56eSJonathan T. Looney */
16152529f56eSJonathan T. Looney if (tln_first == NULL ||
16162529f56eSJonathan T. Looney tln->tln_expiretime < tln_first->tln_expiretime)
16172529f56eSJonathan T. Looney STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
16182529f56eSJonathan T. Looney tln_expireq);
16192529f56eSJonathan T. Looney else
16202529f56eSJonathan T. Looney STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
16212529f56eSJonathan T. Looney tln_first, tln, tln_expireq);
16222529f56eSJonathan T. Looney }
16232529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
16242529f56eSJonathan T. Looney
16252529f56eSJonathan T. Looney /*
16262529f56eSJonathan T. Looney * We are done messing with the tln. After this point, we
16272529f56eSJonathan T. Looney * can't touch it. (Note that the "release" semantics should
16282529f56eSJonathan T. Looney * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
16292529f56eSJonathan T. Looney * Therefore, they should be unnecessary here. However, it
16302529f56eSJonathan T. Looney * seems like a good idea to include them anyway, since we
16312529f56eSJonathan T. Looney * really are releasing a lock here.)
16322529f56eSJonathan T. Looney */
16332529f56eSJonathan T. Looney atomic_store_rel_int(&tln->tln_closed, 1);
16342529f56eSJonathan T. Looney } else {
16352529f56eSJonathan T. Looney /* Remove log entries. */
16362529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
16372529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry);
16382529f56eSJonathan T. Looney KASSERT(tp->t_lognum == 0,
16392529f56eSJonathan T. Looney ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
16402529f56eSJonathan T. Looney __func__, tp->t_lognum));
16412529f56eSJonathan T. Looney }
16422529f56eSJonathan T. Looney
16432529f56eSJonathan T. Looney /*
16442529f56eSJonathan T. Looney * Change the log state to off (just in case anything tries to sneak
16452529f56eSJonathan T. Looney * in a last-minute log).
16462529f56eSJonathan T. Looney */
164769c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
16482529f56eSJonathan T. Looney }
16492529f56eSJonathan T. Looney
1650a9a08eceSRandall Stewart static void
tcp_log_purge_tp_logbuf(struct tcpcb * tp)1651a9a08eceSRandall Stewart tcp_log_purge_tp_logbuf(struct tcpcb *tp)
1652a9a08eceSRandall Stewart {
1653a9a08eceSRandall Stewart struct tcp_log_mem *log_entry;
1654a9a08eceSRandall Stewart
16559eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
1656a9a08eceSRandall Stewart if (tp->t_lognum == 0)
1657a9a08eceSRandall Stewart return;
1658a9a08eceSRandall Stewart
1659a9a08eceSRandall Stewart while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1660a9a08eceSRandall Stewart tcp_log_remove_log_head(tp, log_entry);
1661a9a08eceSRandall Stewart KASSERT(tp->t_lognum == 0,
1662a9a08eceSRandall Stewart ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1663a9a08eceSRandall Stewart __func__, tp->t_lognum));
166469c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
1665a9a08eceSRandall Stewart }
1666a9a08eceSRandall Stewart
16672529f56eSJonathan T. Looney /*
16682529f56eSJonathan T. Looney * This logs an event for a TCP socket. Normally, this is called via
16692529f56eSJonathan T. Looney * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
16702529f56eSJonathan T. Looney * TCP_LOG_EVENT().
16712529f56eSJonathan T. Looney */
16722529f56eSJonathan T. Looney
16732529f56eSJonathan T. Looney struct tcp_log_buffer *
tcp_log_event(struct tcpcb * tp,struct tcphdr * th,struct sockbuf * rxbuf,struct sockbuf * txbuf,uint8_t eventid,int errornum,uint32_t len,union tcp_log_stackspecific * stackinfo,int th_hostorder,const char * output_caller,const char * func,int line,const struct timeval * itv)167469c7c811SRandall Stewart tcp_log_event(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
16752529f56eSJonathan T. Looney struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
16762529f56eSJonathan T. Looney union tcp_log_stackspecific *stackinfo, int th_hostorder,
16772529f56eSJonathan T. Looney const char *output_caller, const char *func, int line, const struct timeval *itv)
16782529f56eSJonathan T. Looney {
16792529f56eSJonathan T. Looney struct tcp_log_mem *log_entry;
16802529f56eSJonathan T. Looney struct tcp_log_buffer *log_buf;
16812529f56eSJonathan T. Looney int attempt_count = 0;
16822529f56eSJonathan T. Looney struct tcp_log_verbose *log_verbose;
16832529f56eSJonathan T. Looney uint32_t logsn;
16842529f56eSJonathan T. Looney
16852529f56eSJonathan T. Looney KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
16862529f56eSJonathan T. Looney ("%s called with inconsistent func (%p) and line (%d) arguments",
16872529f56eSJonathan T. Looney __func__, func, line));
16882529f56eSJonathan T. Looney
16899eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
1690a9a08eceSRandall Stewart if (tcp_disable_all_bb_logs) {
1691a9a08eceSRandall Stewart /*
1692a9a08eceSRandall Stewart * The global shutdown logging
1693a9a08eceSRandall Stewart * switch has been thrown. Call
1694a9a08eceSRandall Stewart * the purge function that frees
1695a9a08eceSRandall Stewart * purges out the logs and
1696a9a08eceSRandall Stewart * turns off logging.
1697a9a08eceSRandall Stewart */
1698a9a08eceSRandall Stewart tcp_log_purge_tp_logbuf(tp);
1699a9a08eceSRandall Stewart return (NULL);
1700a9a08eceSRandall Stewart }
170169c7c811SRandall Stewart KASSERT(tp->_t_logstate == TCP_LOG_STATE_HEAD ||
170269c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_TAIL ||
170369c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_CONTINUAL ||
170469c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
170569c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_VIA_BBPOINTS ||
170669c7c811SRandall Stewart tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO,
170769c7c811SRandall Stewart ("%s called with unexpected tp->_t_logstate (%d)", __func__,
170869c7c811SRandall Stewart tp->_t_logstate));
17092529f56eSJonathan T. Looney
17102529f56eSJonathan T. Looney /*
17112529f56eSJonathan T. Looney * Get the serial number. We do this early so it will
17122529f56eSJonathan T. Looney * increment even if we end up skipping the log entry for some
17132529f56eSJonathan T. Looney * reason.
17142529f56eSJonathan T. Looney */
17152529f56eSJonathan T. Looney logsn = tp->t_logsn++;
17162529f56eSJonathan T. Looney
17172529f56eSJonathan T. Looney /*
17182529f56eSJonathan T. Looney * Can we get a new log entry? If so, increment the lognum counter
17192529f56eSJonathan T. Looney * here.
17202529f56eSJonathan T. Looney */
17212529f56eSJonathan T. Looney retry:
1722a9a08eceSRandall Stewart if (tp->t_lognum < tp->t_loglimit) {
17232529f56eSJonathan T. Looney if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
17242529f56eSJonathan T. Looney tp->t_lognum++;
17252529f56eSJonathan T. Looney } else
17262529f56eSJonathan T. Looney log_entry = NULL;
17272529f56eSJonathan T. Looney
17282529f56eSJonathan T. Looney /* Do we need to try to reuse? */
17292529f56eSJonathan T. Looney if (log_entry == NULL) {
17302529f56eSJonathan T. Looney /*
17312529f56eSJonathan T. Looney * Sacrifice auto-logged sessions without a log ID if
17322529f56eSJonathan T. Looney * tcp_log_auto_all is false. (If they don't have a log
17332529f56eSJonathan T. Looney * ID by now, it is probable that either they won't get one
17342529f56eSJonathan T. Looney * or we are resource-constrained.)
17352529f56eSJonathan T. Looney */
17362529f56eSJonathan T. Looney if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
17372529f56eSJonathan T. Looney !tcp_log_auto_all) {
17382529f56eSJonathan T. Looney if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
17392529f56eSJonathan T. Looney #ifdef INVARIANTS
17402529f56eSJonathan T. Looney panic("%s:%d: tcp_log_state_change() failed "
17412529f56eSJonathan T. Looney "to set tp %p to TCP_LOG_STATE_CLEAR",
17422529f56eSJonathan T. Looney __func__, __LINE__, tp);
17432529f56eSJonathan T. Looney #endif
174469c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
17452529f56eSJonathan T. Looney }
17462529f56eSJonathan T. Looney return (NULL);
17472529f56eSJonathan T. Looney }
17482529f56eSJonathan T. Looney /*
17492529f56eSJonathan T. Looney * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
17502529f56eSJonathan T. Looney * the buffers. If successful, deactivate tracing. Otherwise,
17512529f56eSJonathan T. Looney * leave it active so we will retry.
17522529f56eSJonathan T. Looney */
175369c7c811SRandall Stewart if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
17542529f56eSJonathan T. Looney !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
17552529f56eSJonathan T. Looney M_NOWAIT, false)) {
175669c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
17572529f56eSJonathan T. Looney return(NULL);
175869c7c811SRandall Stewart } else if ((tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) &&
17592529f56eSJonathan T. Looney !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
17602529f56eSJonathan T. Looney M_NOWAIT, false)) {
17612529f56eSJonathan T. Looney if (attempt_count == 0) {
17622529f56eSJonathan T. Looney attempt_count++;
17632529f56eSJonathan T. Looney goto retry;
17642529f56eSJonathan T. Looney }
17652529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
17662529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail4, 1);
17672529f56eSJonathan T. Looney #endif
17682529f56eSJonathan T. Looney return(NULL);
176969c7c811SRandall Stewart
177069c7c811SRandall Stewart } else if ((tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) &&
177169c7c811SRandall Stewart !tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints",
177269c7c811SRandall Stewart M_NOWAIT, false)) {
177369c7c811SRandall Stewart if (attempt_count == 0) {
177469c7c811SRandall Stewart attempt_count++;
177569c7c811SRandall Stewart goto retry;
177669c7c811SRandall Stewart }
177769c7c811SRandall Stewart #ifdef TCPLOG_DEBUG_COUNTERS
177869c7c811SRandall Stewart counter_u64_add(tcp_log_que_fail4, 1);
177969c7c811SRandall Stewart #endif
178069c7c811SRandall Stewart return(NULL);
178169c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO)
17822529f56eSJonathan T. Looney return(NULL);
17832529f56eSJonathan T. Looney
17842529f56eSJonathan T. Looney /* If in HEAD state, just deactivate the tracing and return. */
178569c7c811SRandall Stewart if (tp->_t_logstate == TCP_LOG_STATE_HEAD) {
178669c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
17872529f56eSJonathan T. Looney return(NULL);
17882529f56eSJonathan T. Looney }
17892529f56eSJonathan T. Looney /*
17902529f56eSJonathan T. Looney * Get a buffer to reuse. If that fails, just give up.
17912529f56eSJonathan T. Looney * (We can't log anything without a buffer in which to
17922529f56eSJonathan T. Looney * put it.)
17932529f56eSJonathan T. Looney *
17942529f56eSJonathan T. Looney * Note that we don't change the t_lognum counter
17952529f56eSJonathan T. Looney * here. Because we are re-using the buffer, the total
17962529f56eSJonathan T. Looney * number won't change.
17972529f56eSJonathan T. Looney */
17982529f56eSJonathan T. Looney if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
17992529f56eSJonathan T. Looney return(NULL);
18002529f56eSJonathan T. Looney STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
18012529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry);
18022529f56eSJonathan T. Looney }
18032529f56eSJonathan T. Looney
18042529f56eSJonathan T. Looney KASSERT(log_entry != NULL,
18052529f56eSJonathan T. Looney ("%s: log_entry unexpectedly NULL", __func__));
18062529f56eSJonathan T. Looney
18072529f56eSJonathan T. Looney /* Extract the log buffer and verbose buffer pointers. */
18082529f56eSJonathan T. Looney log_buf = &log_entry->tlm_buf;
18092529f56eSJonathan T. Looney log_verbose = &log_entry->tlm_v;
18102529f56eSJonathan T. Looney
18112529f56eSJonathan T. Looney /* Basic entries. */
18122529f56eSJonathan T. Looney if (itv == NULL)
181369c7c811SRandall Stewart microuptime(&log_buf->tlb_tv);
18142529f56eSJonathan T. Looney else
18152529f56eSJonathan T. Looney memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
18162529f56eSJonathan T. Looney log_buf->tlb_ticks = ticks;
18172529f56eSJonathan T. Looney log_buf->tlb_sn = logsn;
18182529f56eSJonathan T. Looney log_buf->tlb_stackid = tp->t_fb->tfb_id;
18192529f56eSJonathan T. Looney log_buf->tlb_eventid = eventid;
18202529f56eSJonathan T. Looney log_buf->tlb_eventflags = 0;
18212529f56eSJonathan T. Looney log_buf->tlb_errno = errornum;
18222529f56eSJonathan T. Looney
18232529f56eSJonathan T. Looney /* Socket buffers */
18242529f56eSJonathan T. Looney if (rxbuf != NULL) {
18252529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
18262529f56eSJonathan T. Looney log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
18272529f56eSJonathan T. Looney log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
18282529f56eSJonathan T. Looney log_buf->tlb_rxbuf.tls_sb_spare = 0;
182969c7c811SRandall Stewart } else {
183069c7c811SRandall Stewart log_buf->tlb_rxbuf.tls_sb_acc = 0;
183169c7c811SRandall Stewart log_buf->tlb_rxbuf.tls_sb_ccc = 0;
18322529f56eSJonathan T. Looney }
18332529f56eSJonathan T. Looney if (txbuf != NULL) {
18342529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
18352529f56eSJonathan T. Looney log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
18362529f56eSJonathan T. Looney log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
18372529f56eSJonathan T. Looney log_buf->tlb_txbuf.tls_sb_spare = 0;
183869c7c811SRandall Stewart } else {
183969c7c811SRandall Stewart log_buf->tlb_txbuf.tls_sb_acc = 0;
184069c7c811SRandall Stewart log_buf->tlb_txbuf.tls_sb_ccc = 0;
18412529f56eSJonathan T. Looney }
18422529f56eSJonathan T. Looney /* Copy values from tp to the log entry. */
1843*46fc1274SMichael Tuexen log_buf->tlb_state = tp->t_state;
1844*46fc1274SMichael Tuexen log_buf->tlb_starttime = tp->t_starttime;
1845*46fc1274SMichael Tuexen log_buf->tlb_iss = tp->iss;
1846*46fc1274SMichael Tuexen log_buf->tlb_flags = tp->t_flags;
1847*46fc1274SMichael Tuexen log_buf->tlb_snd_una = tp->snd_una;
1848*46fc1274SMichael Tuexen log_buf->tlb_snd_max = tp->snd_max;
1849*46fc1274SMichael Tuexen log_buf->tlb_snd_cwnd = tp->snd_cwnd;
1850*46fc1274SMichael Tuexen log_buf->tlb_snd_nxt = tp->snd_nxt;
1851*46fc1274SMichael Tuexen log_buf->tlb_snd_recover = tp->snd_recover;
1852*46fc1274SMichael Tuexen log_buf->tlb_snd_wnd = tp->snd_wnd;
1853*46fc1274SMichael Tuexen log_buf->tlb_snd_ssthresh = tp->snd_ssthresh;
1854*46fc1274SMichael Tuexen log_buf->tlb_srtt = tp->t_srtt;
1855*46fc1274SMichael Tuexen log_buf->tlb_rttvar = tp->t_rttvar;
1856*46fc1274SMichael Tuexen log_buf->tlb_rcv_up = tp->rcv_up;
1857*46fc1274SMichael Tuexen log_buf->tlb_rcv_adv = tp->rcv_adv;
1858*46fc1274SMichael Tuexen log_buf->tlb_flags2 = tp->t_flags2;
1859*46fc1274SMichael Tuexen log_buf->tlb_rcv_nxt = tp->rcv_nxt;
1860*46fc1274SMichael Tuexen log_buf->tlb_rcv_wnd = tp->rcv_wnd;
1861*46fc1274SMichael Tuexen log_buf->tlb_dupacks = tp->t_dupacks;
1862*46fc1274SMichael Tuexen log_buf->tlb_segqlen = tp->t_segqlen;
1863*46fc1274SMichael Tuexen log_buf->tlb_snd_numholes = tp->snd_numholes;
186494acddd2SMichael Tuexen log_buf->tlb_flex1 = 0;
186594acddd2SMichael Tuexen log_buf->tlb_flex2 = 0;
1866*46fc1274SMichael Tuexen log_buf->tlb_fbyte_in = tp->t_fbyte_in;
1867*46fc1274SMichael Tuexen log_buf->tlb_fbyte_out = tp->t_fbyte_out;
1868*46fc1274SMichael Tuexen log_buf->tlb_snd_scale = tp->snd_scale;
1869*46fc1274SMichael Tuexen log_buf->tlb_rcv_scale = tp->rcv_scale;
187094acddd2SMichael Tuexen log_buf->_pad[0] = 0;
187194acddd2SMichael Tuexen log_buf->_pad[1] = 0;
187294acddd2SMichael Tuexen log_buf->_pad[2] = 0;
18732529f56eSJonathan T. Looney /* Copy stack-specific info. */
18742529f56eSJonathan T. Looney if (stackinfo != NULL) {
18752529f56eSJonathan T. Looney memcpy(&log_buf->tlb_stackinfo, stackinfo,
18762529f56eSJonathan T. Looney sizeof(log_buf->tlb_stackinfo));
18772529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
18782529f56eSJonathan T. Looney }
18792529f56eSJonathan T. Looney
18802529f56eSJonathan T. Looney /* The packet */
18812529f56eSJonathan T. Looney log_buf->tlb_len = len;
18822529f56eSJonathan T. Looney if (th) {
18832529f56eSJonathan T. Looney int optlen;
18842529f56eSJonathan T. Looney
18852529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_HDR;
18862529f56eSJonathan T. Looney log_buf->tlb_th = *th;
18872529f56eSJonathan T. Looney if (th_hostorder)
18882529f56eSJonathan T. Looney tcp_fields_to_net(&log_buf->tlb_th);
18892529f56eSJonathan T. Looney optlen = (th->th_off << 2) - sizeof (struct tcphdr);
18902529f56eSJonathan T. Looney if (optlen > 0)
18912529f56eSJonathan T. Looney memcpy(log_buf->tlb_opts, th + 1, optlen);
189269c7c811SRandall Stewart } else {
189369c7c811SRandall Stewart memset(&log_buf->tlb_th, 0, sizeof(*th));
18942529f56eSJonathan T. Looney }
18952529f56eSJonathan T. Looney
18962529f56eSJonathan T. Looney /* Verbose information */
18972529f56eSJonathan T. Looney if (func != NULL) {
18982529f56eSJonathan T. Looney log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
18992529f56eSJonathan T. Looney if (output_caller != NULL)
19002529f56eSJonathan T. Looney strlcpy(log_verbose->tlv_snd_frm, output_caller,
19012529f56eSJonathan T. Looney TCP_FUNC_LEN);
19022529f56eSJonathan T. Looney else
19032529f56eSJonathan T. Looney *log_verbose->tlv_snd_frm = 0;
19042529f56eSJonathan T. Looney strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
19052529f56eSJonathan T. Looney log_verbose->tlv_trace_line = line;
19062529f56eSJonathan T. Looney }
19072529f56eSJonathan T. Looney
19082529f56eSJonathan T. Looney /* Insert the new log at the tail. */
19092529f56eSJonathan T. Looney STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
19102529f56eSJonathan T. Looney tcp_log_entry_refcnt_add(log_entry);
19112529f56eSJonathan T. Looney return (log_buf);
19122529f56eSJonathan T. Looney }
19132529f56eSJonathan T. Looney
19142529f56eSJonathan T. Looney /*
19152529f56eSJonathan T. Looney * Change the logging state for a TCPCB. Returns 0 on success or an
19162529f56eSJonathan T. Looney * error code on failure.
19172529f56eSJonathan T. Looney */
19182529f56eSJonathan T. Looney int
tcp_log_state_change(struct tcpcb * tp,int state)19192529f56eSJonathan T. Looney tcp_log_state_change(struct tcpcb *tp, int state)
19202529f56eSJonathan T. Looney {
19212529f56eSJonathan T. Looney struct tcp_log_mem *log_entry;
192269c7c811SRandall Stewart int rv;
19232529f56eSJonathan T. Looney
19249eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
192569c7c811SRandall Stewart rv = 0;
19262529f56eSJonathan T. Looney switch(state) {
19272529f56eSJonathan T. Looney case TCP_LOG_STATE_CLEAR:
19282529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
19292529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry);
19307ea8d027SRichard Scheffenegger /* FALLTHROUGH */
19312529f56eSJonathan T. Looney
19322529f56eSJonathan T. Looney case TCP_LOG_STATE_OFF:
193369c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
19342529f56eSJonathan T. Looney break;
19352529f56eSJonathan T. Looney
19362529f56eSJonathan T. Looney case TCP_LOG_STATE_TAIL:
19372529f56eSJonathan T. Looney case TCP_LOG_STATE_HEAD:
19382529f56eSJonathan T. Looney case TCP_LOG_STATE_CONTINUAL:
193969c7c811SRandall Stewart case TCP_LOG_VIA_BBPOINTS:
19402529f56eSJonathan T. Looney case TCP_LOG_STATE_HEAD_AUTO:
19412529f56eSJonathan T. Looney case TCP_LOG_STATE_TAIL_AUTO:
194269c7c811SRandall Stewart /*
194369c7c811SRandall Stewart * When the RATIO_OFF state is set for the bucket, the log ID
194469c7c811SRandall Stewart * this tp is associated with has been probabilistically opted
194569c7c811SRandall Stewart * out of logging per tcp_log_apply_ratio().
194669c7c811SRandall Stewart */
194769c7c811SRandall Stewart if (tp->t_lib == NULL ||
194869c7c811SRandall Stewart tp->t_lib->tlb_logstate != TCP_LOG_STATE_RATIO_OFF) {
194969c7c811SRandall Stewart tp->_t_logstate = state;
195069c7c811SRandall Stewart } else {
195169c7c811SRandall Stewart rv = ECANCELED;
195269c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
195369c7c811SRandall Stewart }
19542529f56eSJonathan T. Looney break;
19552529f56eSJonathan T. Looney
19562529f56eSJonathan T. Looney default:
19572529f56eSJonathan T. Looney return (EINVAL);
19582529f56eSJonathan T. Looney }
1959a9a08eceSRandall Stewart if (tcp_disable_all_bb_logs) {
1960a9a08eceSRandall Stewart /* We are prohibited from doing any logs */
196169c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
196269c7c811SRandall Stewart rv = EBUSY;
1963a9a08eceSRandall Stewart }
19642529f56eSJonathan T. Looney tp->t_flags2 &= ~(TF2_LOG_AUTO);
19652529f56eSJonathan T. Looney
196669c7c811SRandall Stewart return (rv);
19672529f56eSJonathan T. Looney }
19682529f56eSJonathan T. Looney
19692529f56eSJonathan T. Looney /* If tcp_drain() is called, flush half the log entries. */
19702529f56eSJonathan T. Looney void
tcp_log_drain(struct tcpcb * tp)19712529f56eSJonathan T. Looney tcp_log_drain(struct tcpcb *tp)
19722529f56eSJonathan T. Looney {
19732529f56eSJonathan T. Looney struct tcp_log_mem *log_entry, *next;
19742529f56eSJonathan T. Looney int target, skip;
19752529f56eSJonathan T. Looney
19769eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(tptoinpcb(tp));
19772529f56eSJonathan T. Looney if ((target = tp->t_lognum / 2) == 0)
19782529f56eSJonathan T. Looney return;
19792529f56eSJonathan T. Looney
19802529f56eSJonathan T. Looney /*
198169c7c811SRandall Stewart * XXXRRS: At this I don't think this is wise that
198269c7c811SRandall Stewart * we do this. All that a drain call means is that
198369c7c811SRandall Stewart * we are hitting one of the system mbuf limits. BB
198469c7c811SRandall Stewart * logging, or freeing of them, will not create any
198569c7c811SRandall Stewart * more mbufs and really has nothing to do with
198669c7c811SRandall Stewart * the system running out of mbufs. For now I
198769c7c811SRandall Stewart * am changing this to free any "AUTO" by dumping
198869c7c811SRandall Stewart * them out. But this should either be changed
198969c7c811SRandall Stewart * so that it gets called when we hit the BB limit
199069c7c811SRandall Stewart * or it should just not get called (one of the two)
199169c7c811SRandall Stewart * since I don't think the mbuf <-> BB log cleanup
199269c7c811SRandall Stewart * is the right thing to do here.
199369c7c811SRandall Stewart */
199469c7c811SRandall Stewart /*
19952529f56eSJonathan T. Looney * If we are logging the "head" packets, we want to discard
19962529f56eSJonathan T. Looney * from the tail of the queue. Otherwise, we want to discard
19972529f56eSJonathan T. Looney * from the head.
19982529f56eSJonathan T. Looney */
199969c7c811SRandall Stewart if (tp->_t_logstate == TCP_LOG_STATE_HEAD) {
20002529f56eSJonathan T. Looney skip = tp->t_lognum - target;
20012529f56eSJonathan T. Looney STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
20022529f56eSJonathan T. Looney if (!--skip)
20032529f56eSJonathan T. Looney break;
20042529f56eSJonathan T. Looney KASSERT(log_entry != NULL,
20052529f56eSJonathan T. Looney ("%s: skipped through all entries!", __func__));
20062529f56eSJonathan T. Looney if (log_entry == NULL)
20072529f56eSJonathan T. Looney return;
20082529f56eSJonathan T. Looney while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
20092529f56eSJonathan T. Looney STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
20102529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(next);
20112529f56eSJonathan T. Looney tcp_log_remove_log_cleanup(tp, next);
20122529f56eSJonathan T. Looney #ifdef INVARIANTS
20132529f56eSJonathan T. Looney target--;
20142529f56eSJonathan T. Looney #endif
20152529f56eSJonathan T. Looney }
20162529f56eSJonathan T. Looney KASSERT(target == 0,
20172529f56eSJonathan T. Looney ("%s: After removing from tail, target was %d", __func__,
20182529f56eSJonathan T. Looney target));
201969c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
202069c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head at drain",
202169c7c811SRandall Stewart M_NOWAIT, false);
202269c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_TAIL_AUTO) {
202369c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail at drain",
202469c7c811SRandall Stewart M_NOWAIT, false);
202569c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_VIA_BBPOINTS) {
202669c7c811SRandall Stewart (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from bbpoints",
202769c7c811SRandall Stewart M_NOWAIT, false);
202869c7c811SRandall Stewart } else if (tp->_t_logstate == TCP_LOG_STATE_CONTINUAL) {
20292529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
20302529f56eSJonathan T. Looney M_NOWAIT, false);
20312529f56eSJonathan T. Looney } else {
20322529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
20332529f56eSJonathan T. Looney target--)
20342529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry);
20352529f56eSJonathan T. Looney KASSERT(target <= 0,
20362529f56eSJonathan T. Looney ("%s: After removing from head, target was %d", __func__,
20372529f56eSJonathan T. Looney target));
20382529f56eSJonathan T. Looney KASSERT(tp->t_lognum > 0,
20392529f56eSJonathan T. Looney ("%s: After removing from head, tp->t_lognum was %d",
20402529f56eSJonathan T. Looney __func__, target));
20412529f56eSJonathan T. Looney KASSERT(log_entry != NULL,
20422529f56eSJonathan T. Looney ("%s: After removing from head, the tailq was empty",
20432529f56eSJonathan T. Looney __func__));
20442529f56eSJonathan T. Looney }
20452529f56eSJonathan T. Looney }
20462529f56eSJonathan T. Looney
20472529f56eSJonathan T. Looney static inline int
tcp_log_copyout(struct sockopt * sopt,void * src,void * dst,size_t len)20482529f56eSJonathan T. Looney tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
20492529f56eSJonathan T. Looney {
20502529f56eSJonathan T. Looney
20512529f56eSJonathan T. Looney if (sopt->sopt_td != NULL)
20522529f56eSJonathan T. Looney return (copyout(src, dst, len));
20532529f56eSJonathan T. Looney bcopy(src, dst, len);
20542529f56eSJonathan T. Looney return (0);
20552529f56eSJonathan T. Looney }
20562529f56eSJonathan T. Looney
20572529f56eSJonathan T. Looney static int
tcp_log_logs_to_buf(struct sockopt * sopt,struct tcp_log_stailq * log_tailqp,struct tcp_log_buffer ** end,int count)20582529f56eSJonathan T. Looney tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
20592529f56eSJonathan T. Looney struct tcp_log_buffer **end, int count)
20602529f56eSJonathan T. Looney {
20612529f56eSJonathan T. Looney struct tcp_log_buffer *out_entry;
20622529f56eSJonathan T. Looney struct tcp_log_mem *log_entry;
20632529f56eSJonathan T. Looney size_t entrysize;
20642529f56eSJonathan T. Looney int error;
20652529f56eSJonathan T. Looney #ifdef INVARIANTS
20662529f56eSJonathan T. Looney int orig_count = count;
20672529f56eSJonathan T. Looney #endif
20682529f56eSJonathan T. Looney
20692529f56eSJonathan T. Looney /* Copy the data out. */
20702529f56eSJonathan T. Looney error = 0;
20712529f56eSJonathan T. Looney out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
20722529f56eSJonathan T. Looney STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
20732529f56eSJonathan T. Looney count--;
20742529f56eSJonathan T. Looney KASSERT(count >= 0,
20752529f56eSJonathan T. Looney ("%s:%d: Exceeded expected count (%d) processing list %p",
20762529f56eSJonathan T. Looney __func__, __LINE__, orig_count, log_tailqp));
20772529f56eSJonathan T. Looney
20782529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
20792529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_copyout, 1);
20802529f56eSJonathan T. Looney #endif
20812529f56eSJonathan T. Looney
20822529f56eSJonathan T. Looney /*
20832529f56eSJonathan T. Looney * Skip copying out the header if it isn't present.
20842529f56eSJonathan T. Looney * Instead, copy out zeros (to ensure we don't leak info).
20852529f56eSJonathan T. Looney * TODO: Make sure we truly do zero everything we don't
20862529f56eSJonathan T. Looney * explicitly set.
20872529f56eSJonathan T. Looney */
20882529f56eSJonathan T. Looney if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
20892529f56eSJonathan T. Looney entrysize = sizeof(struct tcp_log_buffer);
20902529f56eSJonathan T. Looney else
20912529f56eSJonathan T. Looney entrysize = offsetof(struct tcp_log_buffer, tlb_th);
20922529f56eSJonathan T. Looney error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
20932529f56eSJonathan T. Looney entrysize);
20942529f56eSJonathan T. Looney if (error)
20952529f56eSJonathan T. Looney break;
20962529f56eSJonathan T. Looney if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
20972529f56eSJonathan T. Looney error = tcp_log_copyout(sopt, zerobuf,
20982529f56eSJonathan T. Looney ((uint8_t *)out_entry) + entrysize,
20992529f56eSJonathan T. Looney sizeof(struct tcp_log_buffer) - entrysize);
21002529f56eSJonathan T. Looney }
21012529f56eSJonathan T. Looney
21022529f56eSJonathan T. Looney /*
21032529f56eSJonathan T. Looney * Copy out the verbose bit, if needed. Either way,
21042529f56eSJonathan T. Looney * increment the output pointer the correct amount.
21052529f56eSJonathan T. Looney */
21062529f56eSJonathan T. Looney if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
21072529f56eSJonathan T. Looney error = tcp_log_copyout(sopt, &log_entry->tlm_v,
21082529f56eSJonathan T. Looney out_entry->tlb_verbose,
21092529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose));
21102529f56eSJonathan T. Looney if (error)
21112529f56eSJonathan T. Looney break;
21122529f56eSJonathan T. Looney out_entry = (struct tcp_log_buffer *)
21132529f56eSJonathan T. Looney (((uint8_t *) (out_entry + 1)) +
21142529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose));
21152529f56eSJonathan T. Looney } else
21162529f56eSJonathan T. Looney out_entry++;
21172529f56eSJonathan T. Looney }
21182529f56eSJonathan T. Looney *end = out_entry;
21192529f56eSJonathan T. Looney KASSERT(error || count == 0,
21202529f56eSJonathan T. Looney ("%s:%d: Less than expected count (%d) processing list %p"
21212529f56eSJonathan T. Looney " (%d remain)", __func__, __LINE__, orig_count,
21222529f56eSJonathan T. Looney log_tailqp, count));
21232529f56eSJonathan T. Looney
21242529f56eSJonathan T. Looney return (error);
21252529f56eSJonathan T. Looney }
21262529f56eSJonathan T. Looney
21272529f56eSJonathan T. Looney /*
21282529f56eSJonathan T. Looney * Copy out the buffer. Note that we do incremental copying, so
21292529f56eSJonathan T. Looney * sooptcopyout() won't work. However, the goal is to produce the same
21302529f56eSJonathan T. Looney * end result as if we copied in the entire user buffer, updated it,
21312529f56eSJonathan T. Looney * and then used sooptcopyout() to copy it out.
21322529f56eSJonathan T. Looney *
21332529f56eSJonathan T. Looney * NOTE: This should be called with a write lock on the PCB; however,
21342529f56eSJonathan T. Looney * the function will drop it after it extracts the data from the TCPCB.
21352529f56eSJonathan T. Looney */
21362529f56eSJonathan T. Looney int
tcp_log_getlogbuf(struct sockopt * sopt,struct tcpcb * tp)21372529f56eSJonathan T. Looney tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
21382529f56eSJonathan T. Looney {
21392529f56eSJonathan T. Looney struct tcp_log_stailq log_tailq;
21402529f56eSJonathan T. Looney struct tcp_log_mem *log_entry, *log_next;
21412529f56eSJonathan T. Looney struct tcp_log_buffer *out_entry;
21429eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
21432529f56eSJonathan T. Looney size_t outsize, entrysize;
21442529f56eSJonathan T. Looney int error, outnum;
21452529f56eSJonathan T. Looney
21469eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp);
21472529f56eSJonathan T. Looney
21482529f56eSJonathan T. Looney /*
21492529f56eSJonathan T. Looney * Determine which log entries will fit in the buffer. As an
21502529f56eSJonathan T. Looney * optimization, skip this if all the entries will clearly fit
21512529f56eSJonathan T. Looney * in the buffer. (However, get an exact size if we are using
21522529f56eSJonathan T. Looney * INVARIANTS.)
21532529f56eSJonathan T. Looney */
21542529f56eSJonathan T. Looney #ifndef INVARIANTS
21552529f56eSJonathan T. Looney if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
21562529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
21572529f56eSJonathan T. Looney log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
21582529f56eSJonathan T. Looney log_next = NULL;
21592529f56eSJonathan T. Looney outsize = 0;
21602529f56eSJonathan T. Looney outnum = tp->t_lognum;
21612529f56eSJonathan T. Looney } else {
21622529f56eSJonathan T. Looney #endif
21632529f56eSJonathan T. Looney outsize = outnum = 0;
21642529f56eSJonathan T. Looney log_entry = NULL;
21652529f56eSJonathan T. Looney STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
21662529f56eSJonathan T. Looney entrysize = sizeof(struct tcp_log_buffer);
21672529f56eSJonathan T. Looney if (log_next->tlm_buf.tlb_eventflags &
21682529f56eSJonathan T. Looney TLB_FLAG_VERBOSE)
21692529f56eSJonathan T. Looney entrysize += sizeof(struct tcp_log_verbose);
21702529f56eSJonathan T. Looney if ((sopt->sopt_valsize - outsize) < entrysize)
21712529f56eSJonathan T. Looney break;
21722529f56eSJonathan T. Looney outsize += entrysize;
21732529f56eSJonathan T. Looney outnum++;
21742529f56eSJonathan T. Looney log_entry = log_next;
21752529f56eSJonathan T. Looney }
21762529f56eSJonathan T. Looney KASSERT(outsize <= sopt->sopt_valsize,
21772529f56eSJonathan T. Looney ("%s: calculated output size (%zu) greater than available"
21782529f56eSJonathan T. Looney "space (%zu)", __func__, outsize, sopt->sopt_valsize));
21792529f56eSJonathan T. Looney #ifndef INVARIANTS
21802529f56eSJonathan T. Looney }
21812529f56eSJonathan T. Looney #endif
21822529f56eSJonathan T. Looney
21832529f56eSJonathan T. Looney /*
21842529f56eSJonathan T. Looney * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
21852529f56eSJonathan T. Looney * is NULL, silently skip the copy. However, in this case, we
21862529f56eSJonathan T. Looney * will leave the list alone and return. Functionally, this
21872529f56eSJonathan T. Looney * gives userspace a way to poll for an approximate buffer
21882529f56eSJonathan T. Looney * size they will need to get the log entries.
21892529f56eSJonathan T. Looney */
21902529f56eSJonathan T. Looney if (sopt->sopt_val == NULL) {
21912529f56eSJonathan T. Looney INP_WUNLOCK(inp);
21922529f56eSJonathan T. Looney if (outsize == 0) {
21932529f56eSJonathan T. Looney outsize = outnum * (sizeof(struct tcp_log_buffer) +
21942529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose));
21952529f56eSJonathan T. Looney }
21962529f56eSJonathan T. Looney if (sopt->sopt_valsize > outsize)
21972529f56eSJonathan T. Looney sopt->sopt_valsize = outsize;
21982529f56eSJonathan T. Looney return (0);
21992529f56eSJonathan T. Looney }
22002529f56eSJonathan T. Looney
22012529f56eSJonathan T. Looney /*
22022529f56eSJonathan T. Looney * Break apart the list. We'll save the ones we want to copy
22032529f56eSJonathan T. Looney * out locally and remove them from the TCPCB list. We can
22042529f56eSJonathan T. Looney * then drop the INPCB lock while we do the copyout.
22052529f56eSJonathan T. Looney *
22062529f56eSJonathan T. Looney * There are roughly three cases:
22072529f56eSJonathan T. Looney * 1. There was nothing to copy out. That's easy: drop the
22082529f56eSJonathan T. Looney * lock and return.
22092529f56eSJonathan T. Looney * 2. We are copying out the entire list. Again, that's easy:
22102529f56eSJonathan T. Looney * move the whole list.
22112529f56eSJonathan T. Looney * 3. We are copying out a partial list. That's harder. We
22122529f56eSJonathan T. Looney * need to update the list book-keeping entries.
22132529f56eSJonathan T. Looney */
22142529f56eSJonathan T. Looney if (log_entry != NULL && log_next == NULL) {
22152529f56eSJonathan T. Looney /* Move entire list. */
22162529f56eSJonathan T. Looney KASSERT(outnum == tp->t_lognum,
22172529f56eSJonathan T. Looney ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
22182529f56eSJonathan T. Looney __func__, __LINE__, outnum, tp->t_lognum));
22192529f56eSJonathan T. Looney log_tailq = tp->t_logs;
22202529f56eSJonathan T. Looney tp->t_lognum = 0;
22212529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs);
22222529f56eSJonathan T. Looney } else if (log_entry != NULL) {
22232529f56eSJonathan T. Looney /* Move partial list. */
22242529f56eSJonathan T. Looney KASSERT(outnum < tp->t_lognum,
22252529f56eSJonathan T. Looney ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
22262529f56eSJonathan T. Looney __func__, __LINE__, outnum, tp->t_lognum));
22272529f56eSJonathan T. Looney STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
22282529f56eSJonathan T. Looney STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
22292529f56eSJonathan T. Looney KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
22302529f56eSJonathan T. Looney ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
22312529f56eSJonathan T. Looney "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
22322529f56eSJonathan T. Looney __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
22332529f56eSJonathan T. Looney STAILQ_NEXT(log_entry, tlm_queue) = NULL;
22342529f56eSJonathan T. Looney log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
22352529f56eSJonathan T. Looney tp->t_lognum -= outnum;
22362529f56eSJonathan T. Looney } else
22372529f56eSJonathan T. Looney STAILQ_INIT(&log_tailq);
22382529f56eSJonathan T. Looney
22392529f56eSJonathan T. Looney /* Drop the PCB lock. */
22402529f56eSJonathan T. Looney INP_WUNLOCK(inp);
22412529f56eSJonathan T. Looney
22422529f56eSJonathan T. Looney /* Copy the data out. */
22432529f56eSJonathan T. Looney error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
22442529f56eSJonathan T. Looney
22452529f56eSJonathan T. Looney if (error) {
22462529f56eSJonathan T. Looney /* Restore list */
22472529f56eSJonathan T. Looney INP_WLOCK(inp);
224853af6903SGleb Smirnoff if ((inp->inp_flags & INP_DROPPED) == 0) {
22492529f56eSJonathan T. Looney tp = intotcpcb(inp);
22502529f56eSJonathan T. Looney
22512529f56eSJonathan T. Looney /* Merge the two lists. */
22522529f56eSJonathan T. Looney STAILQ_CONCAT(&log_tailq, &tp->t_logs);
22532529f56eSJonathan T. Looney tp->t_logs = log_tailq;
22542529f56eSJonathan T. Looney tp->t_lognum += outnum;
22552529f56eSJonathan T. Looney }
22562529f56eSJonathan T. Looney INP_WUNLOCK(inp);
22572529f56eSJonathan T. Looney } else {
22582529f56eSJonathan T. Looney /* Sanity check entries */
22592529f56eSJonathan T. Looney KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) ==
22602529f56eSJonathan T. Looney outsize, ("%s: Actual output size (%zu) != "
22612529f56eSJonathan T. Looney "calculated output size (%zu)", __func__,
22622529f56eSJonathan T. Looney (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
22632529f56eSJonathan T. Looney outsize));
22642529f56eSJonathan T. Looney
22652529f56eSJonathan T. Looney /* Free the entries we just copied out. */
22662529f56eSJonathan T. Looney STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
22672529f56eSJonathan T. Looney tcp_log_entry_refcnt_rem(log_entry);
22682529f56eSJonathan T. Looney uma_zfree(tcp_log_zone, log_entry);
22692529f56eSJonathan T. Looney }
22702529f56eSJonathan T. Looney }
22712529f56eSJonathan T. Looney
22722529f56eSJonathan T. Looney sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
22732529f56eSJonathan T. Looney (caddr_t)sopt->sopt_val);
22742529f56eSJonathan T. Looney return (error);
22752529f56eSJonathan T. Looney }
22762529f56eSJonathan T. Looney
22772529f56eSJonathan T. Looney static void
tcp_log_free_queue(struct tcp_log_dev_queue * param)22782529f56eSJonathan T. Looney tcp_log_free_queue(struct tcp_log_dev_queue *param)
22792529f56eSJonathan T. Looney {
22802529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry;
22812529f56eSJonathan T. Looney
22822529f56eSJonathan T. Looney KASSERT(param != NULL, ("%s: called with NULL param", __func__));
22832529f56eSJonathan T. Looney if (param == NULL)
22842529f56eSJonathan T. Looney return;
22852529f56eSJonathan T. Looney
22862529f56eSJonathan T. Looney entry = (struct tcp_log_dev_log_queue *)param;
22872529f56eSJonathan T. Looney
22882529f56eSJonathan T. Looney /* Free the entries. */
22892529f56eSJonathan T. Looney tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
22902529f56eSJonathan T. Looney
22912529f56eSJonathan T. Looney /* Free the buffer, if it is allocated. */
22922529f56eSJonathan T. Looney if (entry->tldl_common.tldq_buf != NULL)
22932529f56eSJonathan T. Looney free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
22942529f56eSJonathan T. Looney
22952529f56eSJonathan T. Looney /* Free the queue entry. */
22962529f56eSJonathan T. Looney free(entry, M_TCPLOGDEV);
22972529f56eSJonathan T. Looney }
22982529f56eSJonathan T. Looney
22992529f56eSJonathan T. Looney static struct tcp_log_common_header *
tcp_log_expandlogbuf(struct tcp_log_dev_queue * param)23002529f56eSJonathan T. Looney tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
23012529f56eSJonathan T. Looney {
23022529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry;
23032529f56eSJonathan T. Looney struct tcp_log_header *hdr;
23042529f56eSJonathan T. Looney uint8_t *end;
23052529f56eSJonathan T. Looney struct sockopt sopt;
23062529f56eSJonathan T. Looney int error;
23072529f56eSJonathan T. Looney
23082529f56eSJonathan T. Looney entry = (struct tcp_log_dev_log_queue *)param;
23092529f56eSJonathan T. Looney
23102529f56eSJonathan T. Looney /* Take a worst-case guess at space needs. */
23112529f56eSJonathan T. Looney sopt.sopt_valsize = sizeof(struct tcp_log_header) +
23122529f56eSJonathan T. Looney entry->tldl_count * (sizeof(struct tcp_log_buffer) +
23132529f56eSJonathan T. Looney sizeof(struct tcp_log_verbose));
23142529f56eSJonathan T. Looney hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
23152529f56eSJonathan T. Looney if (hdr == NULL) {
23162529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
23172529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
23182529f56eSJonathan T. Looney #endif
23192529f56eSJonathan T. Looney return (NULL);
23202529f56eSJonathan T. Looney }
23212529f56eSJonathan T. Looney sopt.sopt_val = hdr + 1;
23222529f56eSJonathan T. Looney sopt.sopt_valsize -= sizeof(struct tcp_log_header);
23232529f56eSJonathan T. Looney sopt.sopt_td = NULL;
23242529f56eSJonathan T. Looney
23252529f56eSJonathan T. Looney error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
23262529f56eSJonathan T. Looney (struct tcp_log_buffer **)&end, entry->tldl_count);
23272529f56eSJonathan T. Looney if (error) {
23282529f56eSJonathan T. Looney free(hdr, M_TCPLOGDEV);
23292529f56eSJonathan T. Looney return (NULL);
23302529f56eSJonathan T. Looney }
23312529f56eSJonathan T. Looney
23322529f56eSJonathan T. Looney /* Free the entries. */
23332529f56eSJonathan T. Looney tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
23342529f56eSJonathan T. Looney entry->tldl_count = 0;
23352529f56eSJonathan T. Looney
23362529f56eSJonathan T. Looney memset(hdr, 0, sizeof(struct tcp_log_header));
23372529f56eSJonathan T. Looney hdr->tlh_version = TCP_LOG_BUF_VER;
23382529f56eSJonathan T. Looney hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
23392529f56eSJonathan T. Looney hdr->tlh_length = end - (uint8_t *)hdr;
23402529f56eSJonathan T. Looney hdr->tlh_ie = entry->tldl_ie;
23412529f56eSJonathan T. Looney hdr->tlh_af = entry->tldl_af;
23422529f56eSJonathan T. Looney getboottime(&hdr->tlh_offset);
23432529f56eSJonathan T. Looney strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
2344a9a08eceSRandall Stewart strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN);
23452529f56eSJonathan T. Looney strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
23462529f56eSJonathan T. Looney return ((struct tcp_log_common_header *)hdr);
23472529f56eSJonathan T. Looney }
23482529f56eSJonathan T. Looney
23492529f56eSJonathan T. Looney /*
23502529f56eSJonathan T. Looney * Queue the tcpcb's log buffer for transmission via the log buffer facility.
23512529f56eSJonathan T. Looney *
23522529f56eSJonathan T. Looney * NOTE: This should be called with a write lock on the PCB.
23532529f56eSJonathan T. Looney *
23542529f56eSJonathan T. Looney * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
23552529f56eSJonathan T. Looney * and reacquire the INP lock if it needs to do so.
23562529f56eSJonathan T. Looney *
23572529f56eSJonathan T. Looney * If force is false, this will only dump auto-logged sessions if
23582529f56eSJonathan T. Looney * tcp_log_auto_all is true or if there is a log ID defined for the session.
23592529f56eSJonathan T. Looney */
23602529f56eSJonathan T. Looney int
tcp_log_dump_tp_logbuf(struct tcpcb * tp,char * reason,int how,bool force)23612529f56eSJonathan T. Looney tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
23622529f56eSJonathan T. Looney {
23632529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry;
23649eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
23652529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
23662529f56eSJonathan T. Looney int num_entries;
23672529f56eSJonathan T. Looney #endif
23682529f56eSJonathan T. Looney
23692529f56eSJonathan T. Looney INP_WLOCK_ASSERT(inp);
23702529f56eSJonathan T. Looney
23712529f56eSJonathan T. Looney /* If there are no log entries, there is nothing to do. */
23722529f56eSJonathan T. Looney if (tp->t_lognum == 0)
23732529f56eSJonathan T. Looney return (0);
23742529f56eSJonathan T. Looney
23752529f56eSJonathan T. Looney /* Check for a log ID. */
23762529f56eSJonathan T. Looney if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
23772529f56eSJonathan T. Looney !tcp_log_auto_all && !force) {
23782529f56eSJonathan T. Looney struct tcp_log_mem *log_entry;
23792529f56eSJonathan T. Looney
23802529f56eSJonathan T. Looney /*
23812529f56eSJonathan T. Looney * We needed a log ID and none was found. Free the log entries
23822529f56eSJonathan T. Looney * and return success. Also, cancel further logging. If the
23832529f56eSJonathan T. Looney * session doesn't have a log ID by now, we'll assume it isn't
23842529f56eSJonathan T. Looney * going to get one.
23852529f56eSJonathan T. Looney */
23862529f56eSJonathan T. Looney while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
23872529f56eSJonathan T. Looney tcp_log_remove_log_head(tp, log_entry);
23882529f56eSJonathan T. Looney KASSERT(tp->t_lognum == 0,
23892529f56eSJonathan T. Looney ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
23902529f56eSJonathan T. Looney __func__, tp->t_lognum));
239169c7c811SRandall Stewart tp->_t_logstate = TCP_LOG_STATE_OFF;
23922529f56eSJonathan T. Looney return (0);
23932529f56eSJonathan T. Looney }
23942529f56eSJonathan T. Looney
23952529f56eSJonathan T. Looney /*
23962529f56eSJonathan T. Looney * Allocate memory. If we must wait, we'll need to drop the locks
23972529f56eSJonathan T. Looney * and reacquire them (and do all the related business that goes
23982529f56eSJonathan T. Looney * along with that).
23992529f56eSJonathan T. Looney */
24002529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
24012529f56eSJonathan T. Looney M_NOWAIT);
24022529f56eSJonathan T. Looney if (entry == NULL && (how & M_NOWAIT)) {
24032529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
24042529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail3, 1);
24052529f56eSJonathan T. Looney #endif
24062529f56eSJonathan T. Looney return (ENOBUFS);
24072529f56eSJonathan T. Looney }
24082529f56eSJonathan T. Looney if (entry == NULL) {
24092529f56eSJonathan T. Looney INP_WUNLOCK(inp);
24102529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue),
24112529f56eSJonathan T. Looney M_TCPLOGDEV, M_WAITOK);
24122529f56eSJonathan T. Looney INP_WLOCK(inp);
24132529f56eSJonathan T. Looney /*
24142529f56eSJonathan T. Looney * Note that this check is slightly overly-restrictive in
24152529f56eSJonathan T. Looney * that the TCB can survive either of these events.
24162529f56eSJonathan T. Looney * However, there is currently not a good way to ensure
24172529f56eSJonathan T. Looney * that is the case. So, if we hit this M_WAIT path, we
24182529f56eSJonathan T. Looney * may end up dropping some entries. That seems like a
24192529f56eSJonathan T. Looney * small price to pay for safety.
24202529f56eSJonathan T. Looney */
242153af6903SGleb Smirnoff if (inp->inp_flags & INP_DROPPED) {
24222529f56eSJonathan T. Looney free(entry, M_TCPLOGDEV);
24232529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
24242529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail2, 1);
24252529f56eSJonathan T. Looney #endif
24262529f56eSJonathan T. Looney return (ECONNRESET);
24272529f56eSJonathan T. Looney }
24282529f56eSJonathan T. Looney tp = intotcpcb(inp);
24292529f56eSJonathan T. Looney if (tp->t_lognum == 0) {
24302529f56eSJonathan T. Looney free(entry, M_TCPLOGDEV);
24312529f56eSJonathan T. Looney return (0);
24322529f56eSJonathan T. Looney }
24332529f56eSJonathan T. Looney }
24342529f56eSJonathan T. Looney
24352529f56eSJonathan T. Looney /* Fill in the unique parts of the queue entry. */
2436a9a08eceSRandall Stewart if (tp->t_lib != NULL) {
24372529f56eSJonathan T. Looney strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2438a9a08eceSRandall Stewart strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN);
2439a9a08eceSRandall Stewart } else {
24402529f56eSJonathan T. Looney strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2441a9a08eceSRandall Stewart strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN);
2442a9a08eceSRandall Stewart }
24432529f56eSJonathan T. Looney if (reason != NULL)
24442529f56eSJonathan T. Looney strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
24452529f56eSJonathan T. Looney else
2446ed505f89SMichael Tuexen strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_REASON_LEN);
24472529f56eSJonathan T. Looney entry->tldl_ie = inp->inp_inc.inc_ie;
24482529f56eSJonathan T. Looney if (inp->inp_inc.inc_flags & INC_ISIPV6)
24492529f56eSJonathan T. Looney entry->tldl_af = AF_INET6;
24502529f56eSJonathan T. Looney else
24512529f56eSJonathan T. Looney entry->tldl_af = AF_INET;
24522529f56eSJonathan T. Looney entry->tldl_entries = tp->t_logs;
24532529f56eSJonathan T. Looney entry->tldl_count = tp->t_lognum;
24542529f56eSJonathan T. Looney
24552529f56eSJonathan T. Looney /* Fill in the common parts of the queue entry. */
24562529f56eSJonathan T. Looney entry->tldl_common.tldq_buf = NULL;
24572529f56eSJonathan T. Looney entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
24582529f56eSJonathan T. Looney entry->tldl_common.tldq_dtor = tcp_log_free_queue;
24592529f56eSJonathan T. Looney
24602529f56eSJonathan T. Looney /* Clear the log data from the TCPCB. */
24612529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
24622529f56eSJonathan T. Looney num_entries = tp->t_lognum;
24632529f56eSJonathan T. Looney #endif
24642529f56eSJonathan T. Looney tp->t_lognum = 0;
24652529f56eSJonathan T. Looney STAILQ_INIT(&tp->t_logs);
24662529f56eSJonathan T. Looney
24672529f56eSJonathan T. Looney /* Add the entry. If no one is listening, free the entry. */
24682529f56eSJonathan T. Looney if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
24692529f56eSJonathan T. Looney tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
24702529f56eSJonathan T. Looney #ifdef TCPLOG_DEBUG_COUNTERS
24712529f56eSJonathan T. Looney counter_u64_add(tcp_log_que_fail1, num_entries);
24722529f56eSJonathan T. Looney } else {
24732529f56eSJonathan T. Looney counter_u64_add(tcp_log_queued, num_entries);
24742529f56eSJonathan T. Looney #endif
24752529f56eSJonathan T. Looney }
24762529f56eSJonathan T. Looney return (0);
24772529f56eSJonathan T. Looney }
24782529f56eSJonathan T. Looney
24792529f56eSJonathan T. Looney /*
24802529f56eSJonathan T. Looney * Queue the log_id_node's log buffers for transmission via the log buffer
24812529f56eSJonathan T. Looney * facility.
24822529f56eSJonathan T. Looney *
24832529f56eSJonathan T. Looney * NOTE: This should be called with the bucket locked and referenced.
24842529f56eSJonathan T. Looney *
24852529f56eSJonathan T. Looney * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
24862529f56eSJonathan T. Looney * and reacquire the bucket lock if it needs to do so. (The caller must
24872529f56eSJonathan T. Looney * ensure that the tln is no longer on any lists so no one else will mess
24882529f56eSJonathan T. Looney * with this while the lock is dropped!)
24892529f56eSJonathan T. Looney */
24902529f56eSJonathan T. Looney static int
tcp_log_dump_node_logbuf(struct tcp_log_id_node * tln,char * reason,int how)24912529f56eSJonathan T. Looney tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
24922529f56eSJonathan T. Looney {
24932529f56eSJonathan T. Looney struct tcp_log_dev_log_queue *entry;
24942529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb;
24952529f56eSJonathan T. Looney
24962529f56eSJonathan T. Looney tlb = tln->tln_bucket;
24972529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb);
24982529f56eSJonathan T. Looney KASSERT(tlb->tlb_refcnt > 0,
24992529f56eSJonathan T. Looney ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
25002529f56eSJonathan T. Looney __func__, __LINE__, tln, tlb));
25012529f56eSJonathan T. Looney KASSERT(tln->tln_closed,
25022529f56eSJonathan T. Looney ("%s:%d: Called for node with tln_closed==false (tln=%p)",
25032529f56eSJonathan T. Looney __func__, __LINE__, tln));
25042529f56eSJonathan T. Looney
25052529f56eSJonathan T. Looney /* If there are no log entries, there is nothing to do. */
25062529f56eSJonathan T. Looney if (tln->tln_count == 0)
25072529f56eSJonathan T. Looney return (0);
25082529f56eSJonathan T. Looney
25092529f56eSJonathan T. Looney /*
25102529f56eSJonathan T. Looney * Allocate memory. If we must wait, we'll need to drop the locks
25112529f56eSJonathan T. Looney * and reacquire them (and do all the related business that goes
25122529f56eSJonathan T. Looney * along with that).
25132529f56eSJonathan T. Looney */
25142529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
25152529f56eSJonathan T. Looney M_NOWAIT);
25162529f56eSJonathan T. Looney if (entry == NULL && (how & M_NOWAIT))
25172529f56eSJonathan T. Looney return (ENOBUFS);
25182529f56eSJonathan T. Looney if (entry == NULL) {
25192529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
25202529f56eSJonathan T. Looney entry = malloc(sizeof(struct tcp_log_dev_log_queue),
25212529f56eSJonathan T. Looney M_TCPLOGDEV, M_WAITOK);
25222529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
25232529f56eSJonathan T. Looney }
25242529f56eSJonathan T. Looney
25252529f56eSJonathan T. Looney /* Fill in the common parts of the queue entry.. */
25262529f56eSJonathan T. Looney entry->tldl_common.tldq_buf = NULL;
25272529f56eSJonathan T. Looney entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
25282529f56eSJonathan T. Looney entry->tldl_common.tldq_dtor = tcp_log_free_queue;
25292529f56eSJonathan T. Looney
25302529f56eSJonathan T. Looney /* Fill in the unique parts of the queue entry. */
25312529f56eSJonathan T. Looney strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2532a9a08eceSRandall Stewart strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN);
25332529f56eSJonathan T. Looney if (reason != NULL)
25342529f56eSJonathan T. Looney strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
25352529f56eSJonathan T. Looney else
2536ed505f89SMichael Tuexen strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_REASON_LEN);
25372529f56eSJonathan T. Looney entry->tldl_ie = tln->tln_ie;
25382529f56eSJonathan T. Looney entry->tldl_entries = tln->tln_entries;
25392529f56eSJonathan T. Looney entry->tldl_count = tln->tln_count;
25402529f56eSJonathan T. Looney entry->tldl_af = tln->tln_af;
25412529f56eSJonathan T. Looney
25422529f56eSJonathan T. Looney /* Add the entry. If no one is listening, free the entry. */
25432529f56eSJonathan T. Looney if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
25442529f56eSJonathan T. Looney tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
25452529f56eSJonathan T. Looney
25462529f56eSJonathan T. Looney return (0);
25472529f56eSJonathan T. Looney }
25482529f56eSJonathan T. Looney
25492529f56eSJonathan T. Looney /*
25502529f56eSJonathan T. Looney * Queue the log buffers for all sessions in a bucket for transmissions via
25512529f56eSJonathan T. Looney * the log buffer facility.
25522529f56eSJonathan T. Looney *
25532529f56eSJonathan T. Looney * NOTE: This should be called with a locked bucket; however, the function
25542529f56eSJonathan T. Looney * will drop the lock.
25552529f56eSJonathan T. Looney */
25562529f56eSJonathan T. Looney #define LOCAL_SAVE 10
25572529f56eSJonathan T. Looney static void
tcp_log_dumpbucketlogs(struct tcp_log_id_bucket * tlb,char * reason)25582529f56eSJonathan T. Looney tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
25592529f56eSJonathan T. Looney {
25602529f56eSJonathan T. Looney struct tcp_log_id_node local_entries[LOCAL_SAVE];
25612529f56eSJonathan T. Looney struct inpcb *inp;
25622529f56eSJonathan T. Looney struct tcpcb *tp;
25632529f56eSJonathan T. Looney struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
25642529f56eSJonathan T. Looney int i, num_local_entries, tree_locked;
25652529f56eSJonathan T. Looney bool expireq_locked;
25662529f56eSJonathan T. Looney
25672529f56eSJonathan T. Looney TCPID_BUCKET_LOCK_ASSERT(tlb);
25682529f56eSJonathan T. Looney
25692529f56eSJonathan T. Looney /*
25702529f56eSJonathan T. Looney * Take a reference on the bucket to keep it from disappearing until
25712529f56eSJonathan T. Looney * we are done.
25722529f56eSJonathan T. Looney */
25732529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb);
25742529f56eSJonathan T. Looney
25752529f56eSJonathan T. Looney /*
25762529f56eSJonathan T. Looney * We'll try to create these without dropping locks. However, we
25772529f56eSJonathan T. Looney * might very well need to drop locks to get memory. If that's the
25782529f56eSJonathan T. Looney * case, we'll save up to 10 on the stack, and sacrifice the rest.
25792529f56eSJonathan T. Looney * (Otherwise, we need to worry about finding our place again in a
25802529f56eSJonathan T. Looney * potentially changed list. It just doesn't seem worth the trouble
25812529f56eSJonathan T. Looney * to do that.
25822529f56eSJonathan T. Looney */
25832529f56eSJonathan T. Looney expireq_locked = false;
25842529f56eSJonathan T. Looney num_local_entries = 0;
25852529f56eSJonathan T. Looney prev_tln = NULL;
25862529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
25872529f56eSJonathan T. Looney SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
25882529f56eSJonathan T. Looney /*
25892529f56eSJonathan T. Looney * If this isn't associated with a TCPCB, we can pull it off
25902529f56eSJonathan T. Looney * the list now. We need to be careful that the expire timer
25912529f56eSJonathan T. Looney * hasn't already taken ownership (tln_expiretime == SBT_MAX).
25922529f56eSJonathan T. Looney * If so, we let the expire timer code free the data.
25932529f56eSJonathan T. Looney */
25942529f56eSJonathan T. Looney if (cur_tln->tln_closed) {
25952529f56eSJonathan T. Looney no_inp:
25962529f56eSJonathan T. Looney /*
25972529f56eSJonathan T. Looney * Get the expireq lock so we can get a consistent
25982529f56eSJonathan T. Looney * read of tln_expiretime and so we can remove this
25992529f56eSJonathan T. Looney * from the expireq.
26002529f56eSJonathan T. Looney */
26012529f56eSJonathan T. Looney if (!expireq_locked) {
26022529f56eSJonathan T. Looney TCPLOG_EXPIREQ_LOCK();
26032529f56eSJonathan T. Looney expireq_locked = true;
26042529f56eSJonathan T. Looney }
26052529f56eSJonathan T. Looney
26062529f56eSJonathan T. Looney /*
26072529f56eSJonathan T. Looney * We ignore entries with tln_expiretime == SBT_MAX.
26082529f56eSJonathan T. Looney * The expire timer code already owns those.
26092529f56eSJonathan T. Looney */
26102529f56eSJonathan T. Looney KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
26112529f56eSJonathan T. Looney ("%s:%d: node on the expire queue without positive "
26122529f56eSJonathan T. Looney "expire time", __func__, __LINE__));
26132529f56eSJonathan T. Looney if (cur_tln->tln_expiretime == SBT_MAX) {
26142529f56eSJonathan T. Looney prev_tln = cur_tln;
26152529f56eSJonathan T. Looney continue;
26162529f56eSJonathan T. Looney }
26172529f56eSJonathan T. Looney
26182529f56eSJonathan T. Looney /* Remove the entry from the expireq. */
26192529f56eSJonathan T. Looney STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
26202529f56eSJonathan T. Looney tcp_log_id_node, tln_expireq);
26212529f56eSJonathan T. Looney
26222529f56eSJonathan T. Looney /* Remove the entry from the bucket. */
26232529f56eSJonathan T. Looney if (prev_tln != NULL)
26242529f56eSJonathan T. Looney SLIST_REMOVE_AFTER(prev_tln, tln_list);
26252529f56eSJonathan T. Looney else
26262529f56eSJonathan T. Looney SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
26272529f56eSJonathan T. Looney
26282529f56eSJonathan T. Looney /*
26292529f56eSJonathan T. Looney * Drop the INP and bucket reference counts. Due to
26302529f56eSJonathan T. Looney * lock-ordering rules, we need to drop the expire
26312529f56eSJonathan T. Looney * queue lock.
26322529f56eSJonathan T. Looney */
26332529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
26342529f56eSJonathan T. Looney expireq_locked = false;
26352529f56eSJonathan T. Looney
26362529f56eSJonathan T. Looney /* Drop the INP reference. */
26372529f56eSJonathan T. Looney INP_WLOCK(cur_tln->tln_inp);
26382529f56eSJonathan T. Looney if (!in_pcbrele_wlocked(cur_tln->tln_inp))
26392529f56eSJonathan T. Looney INP_WUNLOCK(cur_tln->tln_inp);
26402529f56eSJonathan T. Looney
26412529f56eSJonathan T. Looney if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
26422529f56eSJonathan T. Looney #ifdef INVARIANTS
26432529f56eSJonathan T. Looney panic("%s: Bucket refcount unexpectedly 0.",
26442529f56eSJonathan T. Looney __func__);
26452529f56eSJonathan T. Looney #endif
26462529f56eSJonathan T. Looney /*
26472529f56eSJonathan T. Looney * Recover as best we can: free the entry we
26482529f56eSJonathan T. Looney * own.
26492529f56eSJonathan T. Looney */
26502529f56eSJonathan T. Looney tcp_log_free_entries(&cur_tln->tln_entries,
26512529f56eSJonathan T. Looney &cur_tln->tln_count);
26528c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, cur_tln);
26532529f56eSJonathan T. Looney goto done;
26542529f56eSJonathan T. Looney }
26552529f56eSJonathan T. Looney
26562529f56eSJonathan T. Looney if (tcp_log_dump_node_logbuf(cur_tln, reason,
26572529f56eSJonathan T. Looney M_NOWAIT)) {
26582529f56eSJonathan T. Looney /*
26592529f56eSJonathan T. Looney * If we have sapce, save the entries locally.
26602529f56eSJonathan T. Looney * Otherwise, free them.
26612529f56eSJonathan T. Looney */
26622529f56eSJonathan T. Looney if (num_local_entries < LOCAL_SAVE) {
26632529f56eSJonathan T. Looney local_entries[num_local_entries] =
26642529f56eSJonathan T. Looney *cur_tln;
26652529f56eSJonathan T. Looney num_local_entries++;
26662529f56eSJonathan T. Looney } else {
26672529f56eSJonathan T. Looney tcp_log_free_entries(
26682529f56eSJonathan T. Looney &cur_tln->tln_entries,
26692529f56eSJonathan T. Looney &cur_tln->tln_count);
26702529f56eSJonathan T. Looney }
26712529f56eSJonathan T. Looney }
26722529f56eSJonathan T. Looney
26732529f56eSJonathan T. Looney /* No matter what, we are done with the node now. */
26748c47d8f5SAlan Somers uma_zfree(tcp_log_id_node_zone, cur_tln);
26752529f56eSJonathan T. Looney
26762529f56eSJonathan T. Looney /*
26772529f56eSJonathan T. Looney * Because we removed this entry from the list, prev_tln
26782529f56eSJonathan T. Looney * (which tracks the previous entry still on the tlb
26792529f56eSJonathan T. Looney * list) remains unchanged.
26802529f56eSJonathan T. Looney */
26812529f56eSJonathan T. Looney continue;
26822529f56eSJonathan T. Looney }
26832529f56eSJonathan T. Looney
26842529f56eSJonathan T. Looney /*
26852529f56eSJonathan T. Looney * If we get to this point, the session data is still held in
26862529f56eSJonathan T. Looney * the TCPCB. So, we need to pull the data out of that.
26872529f56eSJonathan T. Looney *
26882529f56eSJonathan T. Looney * We will need to drop the expireq lock so we can lock the INP.
26892529f56eSJonathan T. Looney * We can then try to extract the data the "easy" way. If that
26902529f56eSJonathan T. Looney * fails, we'll save the log entries for later.
26912529f56eSJonathan T. Looney */
26922529f56eSJonathan T. Looney if (expireq_locked) {
26932529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
26942529f56eSJonathan T. Looney expireq_locked = false;
26952529f56eSJonathan T. Looney }
26962529f56eSJonathan T. Looney
26972529f56eSJonathan T. Looney /* Lock the INP and then re-check the state. */
26982529f56eSJonathan T. Looney inp = cur_tln->tln_inp;
26992529f56eSJonathan T. Looney INP_WLOCK(inp);
27002529f56eSJonathan T. Looney /*
27012529f56eSJonathan T. Looney * If we caught this while it was transitioning, the data
27022529f56eSJonathan T. Looney * might have moved from the TCPCB to the tln (signified by
27032529f56eSJonathan T. Looney * setting tln_closed to true. If so, treat this like an
27042529f56eSJonathan T. Looney * inactive connection.
27052529f56eSJonathan T. Looney */
27062529f56eSJonathan T. Looney if (cur_tln->tln_closed) {
27072529f56eSJonathan T. Looney /*
27082529f56eSJonathan T. Looney * It looks like we may have caught this connection
27092529f56eSJonathan T. Looney * while it was transitioning from active to inactive.
27102529f56eSJonathan T. Looney * Treat this like an inactive connection.
27112529f56eSJonathan T. Looney */
27122529f56eSJonathan T. Looney INP_WUNLOCK(inp);
27132529f56eSJonathan T. Looney goto no_inp;
27142529f56eSJonathan T. Looney }
27152529f56eSJonathan T. Looney
27162529f56eSJonathan T. Looney /*
27172529f56eSJonathan T. Looney * Try to dump the data from the tp without dropping the lock.
27182529f56eSJonathan T. Looney * If this fails, try to save off the data locally.
27192529f56eSJonathan T. Looney */
27202529f56eSJonathan T. Looney tp = cur_tln->tln_tp;
27212529f56eSJonathan T. Looney if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
27222529f56eSJonathan T. Looney num_local_entries < LOCAL_SAVE) {
27232529f56eSJonathan T. Looney tcp_log_move_tp_to_node(tp,
27242529f56eSJonathan T. Looney &local_entries[num_local_entries]);
27252529f56eSJonathan T. Looney local_entries[num_local_entries].tln_closed = 1;
27262529f56eSJonathan T. Looney KASSERT(local_entries[num_local_entries].tln_bucket ==
27272529f56eSJonathan T. Looney tlb, ("%s: %d: bucket mismatch for node %p",
27282529f56eSJonathan T. Looney __func__, __LINE__, cur_tln));
27292529f56eSJonathan T. Looney num_local_entries++;
27302529f56eSJonathan T. Looney }
27312529f56eSJonathan T. Looney
27322529f56eSJonathan T. Looney INP_WUNLOCK(inp);
27332529f56eSJonathan T. Looney
27342529f56eSJonathan T. Looney /*
27352529f56eSJonathan T. Looney * We are goint to leave the current tln on the list. It will
27362529f56eSJonathan T. Looney * become the previous tln.
27372529f56eSJonathan T. Looney */
27382529f56eSJonathan T. Looney prev_tln = cur_tln;
27392529f56eSJonathan T. Looney }
27402529f56eSJonathan T. Looney
27412529f56eSJonathan T. Looney /* Drop our locks, if any. */
27422529f56eSJonathan T. Looney KASSERT(tree_locked == TREE_UNLOCKED,
27432529f56eSJonathan T. Looney ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
27442529f56eSJonathan T. Looney switch (tree_locked) {
27452529f56eSJonathan T. Looney case TREE_WLOCKED:
27462529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK();
27472529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
27482529f56eSJonathan T. Looney break;
27492529f56eSJonathan T. Looney case TREE_RLOCKED:
27502529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
27512529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
27522529f56eSJonathan T. Looney break;
27532529f56eSJonathan T. Looney }
27542529f56eSJonathan T. Looney if (expireq_locked) {
27552529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
27562529f56eSJonathan T. Looney expireq_locked = false;
27572529f56eSJonathan T. Looney }
27582529f56eSJonathan T. Looney
27592529f56eSJonathan T. Looney /*
27602529f56eSJonathan T. Looney * Try again for any saved entries. tcp_log_dump_node_logbuf() is
27612529f56eSJonathan T. Looney * guaranteed to free the log entries within the node. And, since
27622529f56eSJonathan T. Looney * the node itself is on our stack, we don't need to free it.
27632529f56eSJonathan T. Looney */
27642529f56eSJonathan T. Looney for (i = 0; i < num_local_entries; i++)
27652529f56eSJonathan T. Looney tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
27662529f56eSJonathan T. Looney
27672529f56eSJonathan T. Looney /* Drop our reference. */
27682529f56eSJonathan T. Looney if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
27692529f56eSJonathan T. Looney TCPID_BUCKET_UNLOCK(tlb);
27702529f56eSJonathan T. Looney
27712529f56eSJonathan T. Looney done:
27722529f56eSJonathan T. Looney /* Drop our locks, if any. */
27732529f56eSJonathan T. Looney switch (tree_locked) {
27742529f56eSJonathan T. Looney case TREE_WLOCKED:
27752529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK();
27762529f56eSJonathan T. Looney break;
27772529f56eSJonathan T. Looney case TREE_RLOCKED:
27782529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
27792529f56eSJonathan T. Looney break;
27802529f56eSJonathan T. Looney }
27812529f56eSJonathan T. Looney if (expireq_locked)
27822529f56eSJonathan T. Looney TCPLOG_EXPIREQ_UNLOCK();
27832529f56eSJonathan T. Looney }
27842529f56eSJonathan T. Looney #undef LOCAL_SAVE
27852529f56eSJonathan T. Looney
27862529f56eSJonathan T. Looney /*
27872529f56eSJonathan T. Looney * Queue the log buffers for all sessions in a bucket for transmissions via
27882529f56eSJonathan T. Looney * the log buffer facility.
27892529f56eSJonathan T. Looney *
27902529f56eSJonathan T. Looney * NOTE: This should be called with a locked INP; however, the function
27912529f56eSJonathan T. Looney * will drop the lock.
27922529f56eSJonathan T. Looney */
27932529f56eSJonathan T. Looney void
tcp_log_dump_tp_bucket_logbufs(struct tcpcb * tp,char * reason)27942529f56eSJonathan T. Looney tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
27952529f56eSJonathan T. Looney {
27969eb0e832SGleb Smirnoff struct inpcb *inp = tptoinpcb(tp);
27972529f56eSJonathan T. Looney struct tcp_log_id_bucket *tlb;
27982529f56eSJonathan T. Looney int tree_locked;
27992529f56eSJonathan T. Looney
28002529f56eSJonathan T. Looney /* Figure out our bucket and lock it. */
28019eb0e832SGleb Smirnoff INP_WLOCK_ASSERT(inp);
28022529f56eSJonathan T. Looney tlb = tp->t_lib;
28032529f56eSJonathan T. Looney if (tlb == NULL) {
28042529f56eSJonathan T. Looney /*
28052529f56eSJonathan T. Looney * No bucket; treat this like a request to dump a single
28062529f56eSJonathan T. Looney * session's traces.
28072529f56eSJonathan T. Looney */
28082529f56eSJonathan T. Looney (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
28099eb0e832SGleb Smirnoff INP_WUNLOCK(inp);
28102529f56eSJonathan T. Looney return;
28112529f56eSJonathan T. Looney }
28122529f56eSJonathan T. Looney TCPID_BUCKET_REF(tlb);
28139eb0e832SGleb Smirnoff INP_WUNLOCK(inp);
28142529f56eSJonathan T. Looney TCPID_BUCKET_LOCK(tlb);
28152529f56eSJonathan T. Looney
28162529f56eSJonathan T. Looney /* If we are the last reference, we have nothing more to do here. */
28172529f56eSJonathan T. Looney tree_locked = TREE_UNLOCKED;
28182529f56eSJonathan T. Looney if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
28192529f56eSJonathan T. Looney switch (tree_locked) {
28202529f56eSJonathan T. Looney case TREE_WLOCKED:
28212529f56eSJonathan T. Looney TCPID_TREE_WUNLOCK();
28222529f56eSJonathan T. Looney break;
28232529f56eSJonathan T. Looney case TREE_RLOCKED:
28242529f56eSJonathan T. Looney TCPID_TREE_RUNLOCK();
28252529f56eSJonathan T. Looney break;
28262529f56eSJonathan T. Looney }
28272529f56eSJonathan T. Looney return;
28282529f56eSJonathan T. Looney }
28292529f56eSJonathan T. Looney
28302529f56eSJonathan T. Looney /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
28312529f56eSJonathan T. Looney tcp_log_dumpbucketlogs(tlb, reason);
28322529f56eSJonathan T. Looney }
28332529f56eSJonathan T. Looney
28342529f56eSJonathan T. Looney /*
28352529f56eSJonathan T. Looney * Mark the end of a flow with the current stack. A stack can add
28362529f56eSJonathan T. Looney * stack-specific info to this trace event by overriding this
28372529f56eSJonathan T. Looney * function (see bbr_log_flowend() for example).
28382529f56eSJonathan T. Looney */
28392529f56eSJonathan T. Looney void
tcp_log_flowend(struct tcpcb * tp)28402529f56eSJonathan T. Looney tcp_log_flowend(struct tcpcb *tp)
28412529f56eSJonathan T. Looney {
284269c7c811SRandall Stewart if (tp->_t_logstate != TCP_LOG_STATE_OFF) {
28439eb0e832SGleb Smirnoff struct socket *so = tptosocket(tp);
28442529f56eSJonathan T. Looney TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
28452529f56eSJonathan T. Looney TCP_LOG_FLOWEND, 0, 0, NULL, false);
28462529f56eSJonathan T. Looney }
28472529f56eSJonathan T. Looney }
284869c7c811SRandall Stewart
284969c7c811SRandall Stewart void
tcp_log_sendfile(struct socket * so,off_t offset,size_t nbytes,int flags)285069c7c811SRandall Stewart tcp_log_sendfile(struct socket *so, off_t offset, size_t nbytes, int flags)
285169c7c811SRandall Stewart {
285269c7c811SRandall Stewart struct inpcb *inp;
285369c7c811SRandall Stewart struct tcpcb *tp;
285473ee5756SRandall Stewart #ifdef TCP_REQUEST_TRK
285557a3a161SRandall Stewart struct tcp_sendfile_track *ent;
285673ee5756SRandall Stewart int i, fnd;
285773ee5756SRandall Stewart #endif
285869c7c811SRandall Stewart
285969c7c811SRandall Stewart inp = sotoinpcb(so);
286069c7c811SRandall Stewart KASSERT(inp != NULL, ("tcp_log_sendfile: inp == NULL"));
286169c7c811SRandall Stewart
286269c7c811SRandall Stewart /* quick check to see if logging is enabled for this connection */
286369c7c811SRandall Stewart tp = intotcpcb(inp);
286469c7c811SRandall Stewart if ((inp->inp_flags & INP_DROPPED) ||
286569c7c811SRandall Stewart (tp->_t_logstate == TCP_LOG_STATE_OFF)) {
286669c7c811SRandall Stewart return;
286769c7c811SRandall Stewart }
286869c7c811SRandall Stewart
286969c7c811SRandall Stewart INP_WLOCK(inp);
287069c7c811SRandall Stewart /* double check log state now that we have the lock */
287169c7c811SRandall Stewart if (inp->inp_flags & INP_DROPPED)
287269c7c811SRandall Stewart goto done;
287369c7c811SRandall Stewart if (tp->_t_logstate != TCP_LOG_STATE_OFF) {
287469c7c811SRandall Stewart struct timeval tv;
287569c7c811SRandall Stewart tcp_log_eventspecific_t log;
287669c7c811SRandall Stewart
28773bd1e85fSPeter Lei memset(&log, 0, sizeof(log));
287869c7c811SRandall Stewart microuptime(&tv);
287969c7c811SRandall Stewart log.u_sf.offset = offset;
288069c7c811SRandall Stewart log.u_sf.length = nbytes;
288169c7c811SRandall Stewart log.u_sf.flags = flags;
288269c7c811SRandall Stewart
288369c7c811SRandall Stewart TCP_LOG_EVENTP(tp, NULL,
288469c7c811SRandall Stewart &tptosocket(tp)->so_rcv,
288569c7c811SRandall Stewart &tptosocket(tp)->so_snd,
288669c7c811SRandall Stewart TCP_LOG_SENDFILE, 0, 0, &log, false, &tv);
288769c7c811SRandall Stewart }
288873ee5756SRandall Stewart #ifdef TCP_REQUEST_TRK
288957a3a161SRandall Stewart if (tp->t_tcpreq_req == 0) {
289073ee5756SRandall Stewart /* No http requests to track */
289173ee5756SRandall Stewart goto done;
289273ee5756SRandall Stewart }
289373ee5756SRandall Stewart fnd = 0;
289457a3a161SRandall Stewart if (tp->t_tcpreq_closed == 0) {
289573ee5756SRandall Stewart /* No closed end req to track */
289673ee5756SRandall Stewart goto skip_closed_req;
289773ee5756SRandall Stewart }
289857a3a161SRandall Stewart for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
289973ee5756SRandall Stewart /* Lets see if this one can be found */
290057a3a161SRandall Stewart ent = &tp->t_tcpreq_info[i];
290157a3a161SRandall Stewart if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
290273ee5756SRandall Stewart /* Not used */
290373ee5756SRandall Stewart continue;
290473ee5756SRandall Stewart }
290557a3a161SRandall Stewart if (ent->flags & TCP_TRK_TRACK_FLG_OPEN) {
290673ee5756SRandall Stewart /* This pass does not consider open requests */
290773ee5756SRandall Stewart continue;
290873ee5756SRandall Stewart }
290957a3a161SRandall Stewart if (ent->flags & TCP_TRK_TRACK_FLG_COMP) {
291073ee5756SRandall Stewart /* Don't look at what we have completed */
291173ee5756SRandall Stewart continue;
291273ee5756SRandall Stewart }
291373ee5756SRandall Stewart /* If we reach here its a allocated closed end request */
291473ee5756SRandall Stewart if ((ent->start == offset) ||
291573ee5756SRandall Stewart ((offset > ent->start) && (offset < ent->end))){
291673ee5756SRandall Stewart /* Its within this request?? */
291773ee5756SRandall Stewart fnd = 1;
291873ee5756SRandall Stewart }
291973ee5756SRandall Stewart if (fnd) {
292073ee5756SRandall Stewart /*
292173ee5756SRandall Stewart * It is at or past the end, its complete.
292273ee5756SRandall Stewart */
292357a3a161SRandall Stewart ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
292473ee5756SRandall Stewart /*
292573ee5756SRandall Stewart * When an entry completes we can take (snd_una + sb_cc) and know where
292673ee5756SRandall Stewart * the end of the range really is. Note that this works since two
292773ee5756SRandall Stewart * requests must be sequential and sendfile now is complete for *this* request.
292873ee5756SRandall Stewart * we must use sb_ccc since the data may still be in-flight in TLS.
292973ee5756SRandall Stewart *
293073ee5756SRandall Stewart * We always cautiously move the end_seq only if our calculations
293173ee5756SRandall Stewart * show it happened (just in case sf has the call to here at the wrong
293273ee5756SRandall Stewart * place). When we go COMP we will stop coming here and hopefully be
293373ee5756SRandall Stewart * left with the correct end_seq.
293473ee5756SRandall Stewart */
293573ee5756SRandall Stewart if (SEQ_GT((tp->snd_una + so->so_snd.sb_ccc), ent->end_seq))
293673ee5756SRandall Stewart ent->end_seq = tp->snd_una + so->so_snd.sb_ccc;
293773ee5756SRandall Stewart if ((offset + nbytes) >= ent->end) {
293857a3a161SRandall Stewart ent->flags |= TCP_TRK_TRACK_FLG_COMP;
293957a3a161SRandall Stewart tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_COMPLETE, offset, nbytes);
294073ee5756SRandall Stewart } else {
294157a3a161SRandall Stewart tcp_req_log_req_info(tp, ent, i, TCP_TRK_REQ_LOG_MOREYET, offset, nbytes);
294273ee5756SRandall Stewart }
294373ee5756SRandall Stewart /* We assume that sendfile never sends overlapping requests */
294473ee5756SRandall Stewart goto done;
294573ee5756SRandall Stewart }
294673ee5756SRandall Stewart }
294773ee5756SRandall Stewart skip_closed_req:
294873ee5756SRandall Stewart if (!fnd) {
294973ee5756SRandall Stewart /* Ok now lets look for open requests */
295057a3a161SRandall Stewart for(i = 0; i < MAX_TCP_TRK_REQ; i++) {
295157a3a161SRandall Stewart ent = &tp->t_tcpreq_info[i];
295257a3a161SRandall Stewart if (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) {
295373ee5756SRandall Stewart /* Not used */
295473ee5756SRandall Stewart continue;
295573ee5756SRandall Stewart }
295657a3a161SRandall Stewart if ((ent->flags & TCP_TRK_TRACK_FLG_OPEN) == 0)
295773ee5756SRandall Stewart continue;
295873ee5756SRandall Stewart /* If we reach here its an allocated open request */
295973ee5756SRandall Stewart if (ent->start == offset) {
296073ee5756SRandall Stewart /* It begins this request */
296173ee5756SRandall Stewart ent->start_seq = tp->snd_una +
296273ee5756SRandall Stewart tptosocket(tp)->so_snd.sb_ccc;
296357a3a161SRandall Stewart ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
296473ee5756SRandall Stewart break;
296573ee5756SRandall Stewart } else if (offset > ent->start) {
296657a3a161SRandall Stewart ent->flags |= TCP_TRK_TRACK_FLG_SEQV;
296773ee5756SRandall Stewart break;
296873ee5756SRandall Stewart }
296973ee5756SRandall Stewart }
297073ee5756SRandall Stewart }
297173ee5756SRandall Stewart #endif
297269c7c811SRandall Stewart done:
297369c7c811SRandall Stewart INP_WUNLOCK(inp);
297469c7c811SRandall Stewart }
2975