xref: /freebsd/sys/netinet/tcp_log_buf.c (revision 058ac3e8063366dafa634d9107642e12b038bf09)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016-2018 Netflix, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/arb.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/qmath.h>
39 #include <sys/queue.h>
40 #include <sys/refcount.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/tree.h>
46 #include <sys/stats.h> /* Must come after qmath.h and tree.h */
47 #include <sys/counter.h>
48 
49 #include <dev/tcp_log/tcp_log_dev.h>
50 
51 #include <net/if.h>
52 #include <net/if_var.h>
53 #include <net/vnet.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/in_pcb.h>
57 #include <netinet/in_var.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/tcp_log_buf.h>
60 
61 /* Default expiry time */
62 #define	TCP_LOG_EXPIRE_TIME	((sbintime_t)60 * SBT_1S)
63 
64 /* Max interval at which to run the expiry timer */
65 #define	TCP_LOG_EXPIRE_INTVL	((sbintime_t)5 * SBT_1S)
66 
67 bool	tcp_log_verbose;
68 static uma_zone_t tcp_log_id_bucket_zone, tcp_log_id_node_zone, tcp_log_zone;
69 static int	tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT;
70 static uint32_t	tcp_log_version = TCP_LOG_BUF_VER;
71 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket);
72 static struct tcp_log_id_tree tcp_log_id_head;
73 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head =
74     STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head);
75 static struct mtx tcp_log_expireq_mtx;
76 static struct callout tcp_log_expireq_callout;
77 static u_long tcp_log_auto_ratio = 0;
78 static volatile u_long tcp_log_auto_ratio_cur = 0;
79 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL;
80 static bool tcp_log_auto_all = false;
81 static uint32_t tcp_disable_all_bb_logs = 0;
82 
83 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
84 
85 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
86     "TCP Black Box controls");
87 
88 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose,
89     0, "Force verbose logging for TCP traces");
90 
91 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit,
92     CTLFLAG_RW, &tcp_log_session_limit, 0,
93     "Maximum number of events maintained for each TCP session");
94 
95 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW,
96     &tcp_log_zone, "Maximum number of events maintained for all TCP sessions");
97 
98 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD,
99     &tcp_log_zone, "Current number of events maintained for all TCP sessions");
100 
101 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW,
102     &tcp_log_id_bucket_zone, "Maximum number of log IDs");
103 
104 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD,
105     &tcp_log_id_bucket_zone, "Current number of log IDs");
106 
107 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW,
108     &tcp_log_id_node_zone, "Maximum number of tcpcbs with log IDs");
109 
110 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD,
111     &tcp_log_id_node_zone, "Current number of tcpcbs with log IDs");
112 
113 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version,
114     0, "Version of log formats exported");
115 
116 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW,
117     &tcp_disable_all_bb_logs, 0,
118     "Disable all BB logging for all connections");
119 
120 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW,
121     &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions");
122 
123 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW,
124     &tcp_log_auto_mode, 0,
125     "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_TAIL)");
126 
127 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW,
128     &tcp_log_auto_all, 0,
129     "Auto-select from all sessions (rather than just those with IDs)");
130 
131 #ifdef TCPLOG_DEBUG_COUNTERS
132 counter_u64_t tcp_log_queued;
133 counter_u64_t tcp_log_que_fail1;
134 counter_u64_t tcp_log_que_fail2;
135 counter_u64_t tcp_log_que_fail3;
136 counter_u64_t tcp_log_que_fail4;
137 counter_u64_t tcp_log_que_fail5;
138 counter_u64_t tcp_log_que_copyout;
139 counter_u64_t tcp_log_que_read;
140 counter_u64_t tcp_log_que_freed;
141 
142 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD,
143     &tcp_log_queued, "Number of entries queued");
144 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD,
145     &tcp_log_que_fail1, "Number of entries queued but fail 1");
146 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD,
147     &tcp_log_que_fail2, "Number of entries queued but fail 2");
148 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD,
149     &tcp_log_que_fail3, "Number of entries queued but fail 3");
150 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD,
151     &tcp_log_que_fail4, "Number of entries queued but fail 4");
152 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD,
153     &tcp_log_que_fail5, "Number of entries queued but fail 4");
154 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD,
155     &tcp_log_que_copyout, "Number of entries copied out");
156 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD,
157     &tcp_log_que_read, "Number of entries read from the queue");
158 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD,
159     &tcp_log_que_freed, "Number of entries freed after reading");
160 #endif
161 
162 #ifdef INVARIANTS
163 #define	TCPLOG_DEBUG_RINGBUF
164 #endif
165 /* Number of requests to consider a PBCID "active". */
166 #define	ACTIVE_REQUEST_COUNT	10
167 
168 /* Statistic tracking for "active" PBCIDs. */
169 static counter_u64_t tcp_log_pcb_ids_cur;
170 static counter_u64_t tcp_log_pcb_ids_tot;
171 
172 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD,
173     &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system");
174 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD,
175     &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated");
176 
177 struct tcp_log_mem
178 {
179 	STAILQ_ENTRY(tcp_log_mem) tlm_queue;
180 	struct tcp_log_buffer	tlm_buf;
181 	struct tcp_log_verbose	tlm_v;
182 #ifdef TCPLOG_DEBUG_RINGBUF
183 	volatile int		tlm_refcnt;
184 #endif
185 };
186 
187 /* 60 bytes for the header, + 16 bytes for padding */
188 static uint8_t	zerobuf[76];
189 
190 /*
191  * Lock order:
192  * 1. TCPID_TREE
193  * 2. TCPID_BUCKET
194  * 3. INP
195  *
196  * Rules:
197  * A. You need a lock on the Tree to add/remove buckets.
198  * B. You need a lock on the bucket to add/remove nodes from the bucket.
199  * C. To change information in a node, you need the INP lock if the tln_closed
200  *    field is false. Otherwise, you need the bucket lock. (Note that the
201  *    tln_closed field can change at any point, so you need to recheck the
202  *    entry after acquiring the INP lock.)
203  * D. To remove a node from the bucket, you must have that entry locked,
204  *    according to the criteria of Rule C. Also, the node must not be on
205  *    the expiry queue.
206  * E. The exception to C is the expiry queue fields, which are locked by
207  *    the TCPLOG_EXPIREQ lock.
208  *
209  * Buckets have a reference count. Each node is a reference. Further,
210  * other callers may add reference counts to keep a bucket from disappearing.
211  * You can add a reference as long as you own a lock sufficient to keep the
212  * bucket from disappearing. For example, a common use is:
213  *   a. Have a locked INP, but need to lock the TCPID_BUCKET.
214  *   b. Add a refcount on the bucket. (Safe because the INP lock prevents
215  *      the TCPID_BUCKET from going away.)
216  *   c. Drop the INP lock.
217  *   d. Acquire a lock on the TCPID_BUCKET.
218  *   e. Acquire a lock on the INP.
219  *   f. Drop the refcount on the bucket.
220  *      (At this point, the bucket may disappear.)
221  *
222  * Expire queue lock:
223  * You can acquire this with either the bucket or INP lock. Don't reverse it.
224  * When the expire code has committed to freeing a node, it resets the expiry
225  * time to SBT_MAX. That is the signal to everyone else that they should
226  * leave that node alone.
227  */
228 static struct rwlock tcp_id_tree_lock;
229 #define	TCPID_TREE_WLOCK()		rw_wlock(&tcp_id_tree_lock)
230 #define	TCPID_TREE_RLOCK()		rw_rlock(&tcp_id_tree_lock)
231 #define	TCPID_TREE_UPGRADE()		rw_try_upgrade(&tcp_id_tree_lock)
232 #define	TCPID_TREE_WUNLOCK()		rw_wunlock(&tcp_id_tree_lock)
233 #define	TCPID_TREE_RUNLOCK()		rw_runlock(&tcp_id_tree_lock)
234 #define	TCPID_TREE_WLOCK_ASSERT()	rw_assert(&tcp_id_tree_lock, RA_WLOCKED)
235 #define	TCPID_TREE_RLOCK_ASSERT()	rw_assert(&tcp_id_tree_lock, RA_RLOCKED)
236 #define	TCPID_TREE_UNLOCK_ASSERT()	rw_assert(&tcp_id_tree_lock, RA_UNLOCKED)
237 
238 #define	TCPID_BUCKET_LOCK_INIT(tlb)	mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF)
239 #define	TCPID_BUCKET_LOCK_DESTROY(tlb)	mtx_destroy(&((tlb)->tlb_mtx))
240 #define	TCPID_BUCKET_LOCK(tlb)		mtx_lock(&((tlb)->tlb_mtx))
241 #define	TCPID_BUCKET_UNLOCK(tlb)	mtx_unlock(&((tlb)->tlb_mtx))
242 #define	TCPID_BUCKET_LOCK_ASSERT(tlb)	mtx_assert(&((tlb)->tlb_mtx), MA_OWNED)
243 #define	TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED)
244 
245 #define	TCPID_BUCKET_REF(tlb)		refcount_acquire(&((tlb)->tlb_refcnt))
246 #define	TCPID_BUCKET_UNREF(tlb)		refcount_release(&((tlb)->tlb_refcnt))
247 
248 #define	TCPLOG_EXPIREQ_LOCK()		mtx_lock(&tcp_log_expireq_mtx)
249 #define	TCPLOG_EXPIREQ_UNLOCK()		mtx_unlock(&tcp_log_expireq_mtx)
250 
251 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node);
252 
253 struct tcp_log_id_bucket
254 {
255 	/*
256 	 * tlb_id must be first. This lets us use strcmp on
257 	 * (struct tcp_log_id_bucket *) and (char *) interchangeably.
258 	 */
259 	char				tlb_id[TCP_LOG_ID_LEN];
260 	char				tlb_tag[TCP_LOG_TAG_LEN];
261 	RB_ENTRY(tcp_log_id_bucket)	tlb_rb;
262 	struct tcp_log_id_head		tlb_head;
263 	struct mtx			tlb_mtx;
264 	volatile u_int			tlb_refcnt;
265 	volatile u_int			tlb_reqcnt;
266 	uint32_t			tlb_loglimit;
267 	uint8_t				tlb_logstate;
268 };
269 
270 struct tcp_log_id_node
271 {
272 	SLIST_ENTRY(tcp_log_id_node) tln_list;
273 	STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */
274 	sbintime_t		tln_expiretime;	/* Locked by the expireq lock */
275 
276 	/*
277 	 * If INP is NULL, that means the connection has closed. We've
278 	 * saved the connection endpoint information and the log entries
279 	 * in the tln_ie and tln_entries members. We've also saved a pointer
280 	 * to the enclosing bucket here. If INP is not NULL, the information is
281 	 * in the PCB and not here.
282 	 */
283 	struct inpcb		*tln_inp;
284 	struct tcpcb		*tln_tp;
285 	struct tcp_log_id_bucket *tln_bucket;
286 	struct in_endpoints	tln_ie;
287 	struct tcp_log_stailq	tln_entries;
288 	int			tln_count;
289 	volatile int		tln_closed;
290 	uint8_t			tln_af;
291 };
292 
293 enum tree_lock_state {
294 	TREE_UNLOCKED = 0,
295 	TREE_RLOCKED,
296 	TREE_WLOCKED,
297 };
298 
299 /* Do we want to select this session for auto-logging? */
300 static __inline bool
301 tcp_log_selectauto(void)
302 {
303 
304 	/*
305 	 * If we are doing auto-capturing, figure out whether we will capture
306 	 * this session.
307 	 */
308 	if (tcp_log_auto_ratio &&
309 	    (tcp_disable_all_bb_logs == 0) &&
310 	    (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) %
311 	    tcp_log_auto_ratio) == 0)
312 		return (true);
313 	return (false);
314 }
315 
316 static __inline int
317 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b)
318 {
319 	KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL"));
320 	KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL"));
321 	return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN);
322 }
323 
324 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp)
325 
326 static __inline void
327 tcp_log_id_validate_tree_lock(int tree_locked)
328 {
329 
330 #ifdef INVARIANTS
331 	switch (tree_locked) {
332 	case TREE_WLOCKED:
333 		TCPID_TREE_WLOCK_ASSERT();
334 		break;
335 	case TREE_RLOCKED:
336 		TCPID_TREE_RLOCK_ASSERT();
337 		break;
338 	case TREE_UNLOCKED:
339 		TCPID_TREE_UNLOCK_ASSERT();
340 		break;
341 	default:
342 		kassert_panic("%s:%d: unknown tree lock state", __func__,
343 		    __LINE__);
344 	}
345 #endif
346 }
347 
348 static __inline void
349 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb)
350 {
351 
352 	TCPID_TREE_WLOCK_ASSERT();
353 	KASSERT(SLIST_EMPTY(&tlb->tlb_head),
354 	    ("%s: Attempt to remove non-empty bucket", __func__));
355 	if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) {
356 #ifdef INVARIANTS
357 		kassert_panic("%s:%d: error removing element from tree",
358 			    __func__, __LINE__);
359 #endif
360 	}
361 	TCPID_BUCKET_LOCK_DESTROY(tlb);
362 	counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);
363 	uma_zfree(tcp_log_id_bucket_zone, tlb);
364 }
365 
366 /*
367  * Call with a referenced and locked bucket.
368  * Will return true if the bucket was freed; otherwise, false.
369  * tlb: The bucket to unreference.
370  * tree_locked: A pointer to the state of the tree lock. If the tree lock
371  *    state changes, the function will update it.
372  * inp: If not NULL and the function needs to drop the inp lock to relock the
373  *    tree, it will do so. (The caller must ensure inp will not become invalid,
374  *    probably by holding a reference to it.)
375  */
376 static bool
377 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked,
378     struct inpcb *inp)
379 {
380 
381 	KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__));
382 	KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
383 	    __func__));
384 
385 	tcp_log_id_validate_tree_lock(*tree_locked);
386 
387 	/*
388 	 * Did we hold the last reference on the tlb? If so, we may need
389 	 * to free it. (Note that we can realistically only execute the
390 	 * loop twice: once without a write lock and once with a write
391 	 * lock.)
392 	 */
393 	while (TCPID_BUCKET_UNREF(tlb)) {
394 		/*
395 		 * We need a write lock on the tree to free this.
396 		 * If we can upgrade the tree lock, this is "easy". If we
397 		 * can't upgrade the tree lock, we need to do this the
398 		 * "hard" way: unwind all our locks and relock everything.
399 		 * In the meantime, anything could have changed. We even
400 		 * need to validate that we still need to free the bucket.
401 		 */
402 		if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE())
403 			*tree_locked = TREE_WLOCKED;
404 		else if (*tree_locked != TREE_WLOCKED) {
405 			TCPID_BUCKET_REF(tlb);
406 			if (inp != NULL)
407 				INP_WUNLOCK(inp);
408 			TCPID_BUCKET_UNLOCK(tlb);
409 			if (*tree_locked == TREE_RLOCKED)
410 				TCPID_TREE_RUNLOCK();
411 			TCPID_TREE_WLOCK();
412 			*tree_locked = TREE_WLOCKED;
413 			TCPID_BUCKET_LOCK(tlb);
414 			if (inp != NULL)
415 				INP_WLOCK(inp);
416 			continue;
417 		}
418 
419 		/*
420 		 * We have an empty bucket and a write lock on the tree.
421 		 * Remove the empty bucket.
422 		 */
423 		tcp_log_remove_bucket(tlb);
424 		return (true);
425 	}
426 	return (false);
427 }
428 
429 /*
430  * Call with a locked bucket. This function will release the lock on the
431  * bucket before returning.
432  *
433  * The caller is responsible for freeing the tp->t_lin/tln node!
434  *
435  * Note: one of tp or both tlb and tln must be supplied.
436  *
437  * inp: A pointer to the inp. If the function needs to drop the inp lock to
438  *    acquire the tree write lock, it will do so. (The caller must ensure inp
439  *    will not become invalid, probably by holding a reference to it.)
440  * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored)
441  * tlb: A pointer to the bucket. (optional; ignored if tp is specified)
442  * tln: A pointer to the node. (optional; ignored if tp is specified)
443  * tree_locked: A pointer to the state of the tree lock. If the tree lock
444  *    state changes, the function will update it.
445  *
446  * Will return true if the INP lock was reacquired; otherwise, false.
447  */
448 static bool
449 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp,
450     struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln,
451     int *tree_locked)
452 {
453 	int orig_tree_locked;
454 
455 	KASSERT(tp != NULL || (tlb != NULL && tln != NULL),
456 	    ("%s: called with tp=%p, tlb=%p, tln=%p", __func__,
457 	    tp, tlb, tln));
458 	KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked",
459 	    __func__));
460 
461 	if (tp != NULL) {
462 		tlb = tp->t_lib;
463 		tln = tp->t_lin;
464 		KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__));
465 		KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__));
466 	}
467 
468 	tcp_log_id_validate_tree_lock(*tree_locked);
469 	TCPID_BUCKET_LOCK_ASSERT(tlb);
470 
471 	/*
472 	 * Remove the node, clear the log bucket and node from the TCPCB, and
473 	 * decrement the bucket refcount. In the process, if this is the
474 	 * last reference, the bucket will be freed.
475 	 */
476 	SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list);
477 	if (tp != NULL) {
478 		tp->t_lib = NULL;
479 		tp->t_lin = NULL;
480 	}
481 	orig_tree_locked = *tree_locked;
482 	if (!tcp_log_unref_bucket(tlb, tree_locked, inp))
483 		TCPID_BUCKET_UNLOCK(tlb);
484 	return (*tree_locked != orig_tree_locked);
485 }
486 
487 #define	RECHECK_INP_CLEAN(cleanup)	do {			\
488 	if (inp->inp_flags & INP_DROPPED) {			\
489 		rv = ECONNRESET;				\
490 		cleanup;					\
491 		goto done;					\
492 	}							\
493 	tp = intotcpcb(inp);					\
494 } while (0)
495 
496 #define	RECHECK_INP()	RECHECK_INP_CLEAN(/* noop */)
497 
498 static void
499 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
500 {
501 
502 	INP_WLOCK_ASSERT(tptoinpcb(tp));
503 
504 #ifdef STATS
505 	if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
506 		(void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id));
507 #endif
508 }
509 
510 static void
511 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb)
512 {
513 
514 	atomic_fetchadd_int(&tlb->tlb_reqcnt, 1);
515 }
516 
517 /*
518  * Associate the specified tag with a particular TCP log ID.
519  * Called with INPCB locked. Returns with it unlocked.
520  * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID.
521  */
522 int
523 tcp_log_set_tag(struct tcpcb *tp, char *tag)
524 {
525 	struct inpcb *inp = tptoinpcb(tp);
526 	struct tcp_log_id_bucket *tlb;
527 	int tree_locked;
528 
529 	INP_WLOCK_ASSERT(inp);
530 
531 	tree_locked = TREE_UNLOCKED;
532 	tlb = tp->t_lib;
533 	if (tlb == NULL) {
534 		INP_WUNLOCK(inp);
535 		return (EOPNOTSUPP);
536 	}
537 
538 	TCPID_BUCKET_REF(tlb);
539 	INP_WUNLOCK(inp);
540 	TCPID_BUCKET_LOCK(tlb);
541 	strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN);
542 	if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
543 		TCPID_BUCKET_UNLOCK(tlb);
544 
545 	if (tree_locked == TREE_WLOCKED) {
546 		TCPID_TREE_WLOCK_ASSERT();
547 		TCPID_TREE_WUNLOCK();
548 	} else if (tree_locked == TREE_RLOCKED) {
549 		TCPID_TREE_RLOCK_ASSERT();
550 		TCPID_TREE_RUNLOCK();
551 	} else
552 		TCPID_TREE_UNLOCK_ASSERT();
553 
554 	return (0);
555 }
556 
557 /*
558  * Set the TCP log ID for a TCPCB.
559  * Called with INPCB locked. Returns with it unlocked.
560  */
561 int
562 tcp_log_set_id(struct tcpcb *tp, char *id)
563 {
564 	struct tcp_log_id_bucket *tlb, *tmp_tlb;
565 	struct tcp_log_id_node *tln;
566 	struct inpcb *inp = tptoinpcb(tp);
567 	int tree_locked, rv;
568 	bool bucket_locked;
569 
570 	tlb = NULL;
571 	tln = NULL;
572 	tree_locked = TREE_UNLOCKED;
573 	bucket_locked = false;
574 
575 restart:
576 	INP_WLOCK_ASSERT(inp);
577 
578 	/* See if the ID is unchanged. */
579 	if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) ||
580 	    (tp->t_lib == NULL && *id == 0)) {
581 		if (tp->t_lib != NULL) {
582 			tcp_log_increment_reqcnt(tp->t_lib);
583 			if ((tp->t_lib->tlb_logstate) &&
584 			    (tp->t_log_state_set == 0)) {
585 				/* Clone in any logging */
586 
587 				tp->t_logstate = tp->t_lib->tlb_logstate;
588 			}
589 			if ((tp->t_lib->tlb_loglimit) &&
590 			    (tp->t_log_state_set == 0)) {
591 				/* We also have a limit set */
592 
593 				tp->t_loglimit = tp->t_lib->tlb_loglimit;
594 			}
595 		}
596 		rv = 0;
597 		goto done;
598 	}
599 
600 	/*
601 	 * If the TCPCB had a previous ID, we need to extricate it from
602 	 * the previous list.
603 	 *
604 	 * Drop the TCPCB lock and lock the tree and the bucket.
605 	 * Because this is called in the socket context, we (theoretically)
606 	 * don't need to worry about the INPCB completely going away
607 	 * while we are gone.
608 	 */
609 	if (tp->t_lib != NULL) {
610 		tlb = tp->t_lib;
611 		TCPID_BUCKET_REF(tlb);
612 		INP_WUNLOCK(inp);
613 
614 		if (tree_locked == TREE_UNLOCKED) {
615 			TCPID_TREE_RLOCK();
616 			tree_locked = TREE_RLOCKED;
617 		}
618 		TCPID_BUCKET_LOCK(tlb);
619 		bucket_locked = true;
620 		INP_WLOCK(inp);
621 
622 		/*
623 		 * Unreference the bucket. If our bucket went away, it is no
624 		 * longer locked or valid.
625 		 */
626 		if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) {
627 			bucket_locked = false;
628 			tlb = NULL;
629 		}
630 
631 		/* Validate the INP. */
632 		RECHECK_INP();
633 
634 		/*
635 		 * Evaluate whether the bucket changed while we were unlocked.
636 		 *
637 		 * Possible scenarios here:
638 		 * 1. Bucket is unchanged and the same one we started with.
639 		 * 2. The TCPCB no longer has a bucket and our bucket was
640 		 *    freed.
641 		 * 3. The TCPCB has a new bucket, whether ours was freed.
642 		 * 4. The TCPCB no longer has a bucket and our bucket was
643 		 *    not freed.
644 		 *
645 		 * In cases 2-4, we will start over. In case 1, we will
646 		 * proceed here to remove the bucket.
647 		 */
648 		if (tlb == NULL || tp->t_lib != tlb) {
649 			KASSERT(bucket_locked || tlb == NULL,
650 			    ("%s: bucket_locked (%d) and tlb (%p) are "
651 			    "inconsistent", __func__, bucket_locked, tlb));
652 
653 			if (bucket_locked) {
654 				TCPID_BUCKET_UNLOCK(tlb);
655 				bucket_locked = false;
656 				tlb = NULL;
657 			}
658 			goto restart;
659 		}
660 
661 		/*
662 		 * Store the (struct tcp_log_id_node) for reuse. Then, remove
663 		 * it from the bucket. In the process, we may end up relocking.
664 		 * If so, we need to validate that the INP is still valid, and
665 		 * the TCPCB entries match we expect.
666 		 *
667 		 * We will clear tlb and change the bucket_locked state just
668 		 * before calling tcp_log_remove_id_node(), since that function
669 		 * will unlock the bucket.
670 		 */
671 		if (tln != NULL)
672 			uma_zfree(tcp_log_id_node_zone, tln);
673 		tln = tp->t_lin;
674 		tlb = NULL;
675 		bucket_locked = false;
676 		if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) {
677 			RECHECK_INP();
678 
679 			/*
680 			 * If the TCPCB moved to a new bucket while we had
681 			 * dropped the lock, restart.
682 			 */
683 			if (tp->t_lib != NULL || tp->t_lin != NULL)
684 				goto restart;
685 		}
686 
687 		/*
688 		 * Yay! We successfully removed the TCPCB from its old
689 		 * bucket. Phew!
690 		 *
691 		 * On to bigger and better things...
692 		 */
693 	}
694 
695 	/* At this point, the TCPCB should not be in any bucket. */
696 	KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__));
697 
698 	/*
699 	 * If the new ID is not empty, we need to now assign this TCPCB to a
700 	 * new bucket.
701 	 */
702 	if (*id) {
703 		/* Get a new tln, if we don't already have one to reuse. */
704 		if (tln == NULL) {
705 			tln = uma_zalloc(tcp_log_id_node_zone,
706 				M_NOWAIT | M_ZERO);
707 			if (tln == NULL) {
708 				rv = ENOBUFS;
709 				goto done;
710 			}
711 			tln->tln_inp = inp;
712 			tln->tln_tp = tp;
713 		}
714 
715 		/*
716 		 * Drop the INP lock for a bit. We don't need it, and dropping
717 		 * it prevents lock order reversals.
718 		 */
719 		INP_WUNLOCK(inp);
720 
721 		/* Make sure we have at least a read lock on the tree. */
722 		tcp_log_id_validate_tree_lock(tree_locked);
723 		if (tree_locked == TREE_UNLOCKED) {
724 			TCPID_TREE_RLOCK();
725 			tree_locked = TREE_RLOCKED;
726 		}
727 
728 refind:
729 		/*
730 		 * Remember that we constructed (struct tcp_log_id_node) so
731 		 * we can safely cast the id to it for the purposes of finding.
732 		 */
733 		KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL",
734 		    __func__, __LINE__));
735 		tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head,
736 		    (struct tcp_log_id_bucket *) id);
737 
738 		/*
739 		 * If we didn't find a matching bucket, we need to add a new
740 		 * one. This requires a write lock. But, of course, we will
741 		 * need to recheck some things when we re-acquire the lock.
742 		 */
743 		if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) {
744 			tree_locked = TREE_WLOCKED;
745 			if (!TCPID_TREE_UPGRADE()) {
746 				TCPID_TREE_RUNLOCK();
747 				TCPID_TREE_WLOCK();
748 
749 				/*
750 				 * The tree may have changed while we were
751 				 * unlocked.
752 				 */
753 				goto refind;
754 			}
755 		}
756 
757 		/* If we need to add a new bucket, do it now. */
758 		if (tmp_tlb == NULL) {
759 			/* Allocate new bucket. */
760 			tlb = uma_zalloc(tcp_log_id_bucket_zone, M_NOWAIT);
761 			if (tlb == NULL) {
762 				rv = ENOBUFS;
763 				goto done_noinp;
764 			}
765 			counter_u64_add(tcp_log_pcb_ids_cur, 1);
766 			counter_u64_add(tcp_log_pcb_ids_tot, 1);
767 
768 			if ((tcp_log_auto_all == false) &&
769 			    tcp_log_auto_mode &&
770 			    tcp_log_selectauto()) {
771 				/* Save off the log state */
772 				tlb->tlb_logstate = tcp_log_auto_mode;
773 			} else
774 				tlb->tlb_logstate = TCP_LOG_STATE_OFF;
775 			tlb->tlb_loglimit = 0;
776 			tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */
777 
778 			/*
779 			 * Copy the ID to the bucket.
780 			 * NB: Don't use strlcpy() unless you are sure
781 			 * we've always validated NULL termination.
782 			 *
783 			 * TODO: When I'm done writing this, see if we
784 			 * we have correctly validated NULL termination and
785 			 * can use strlcpy(). :-)
786 			 */
787 			strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1);
788 			tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0';
789 
790 			/*
791 			 * Take the refcount for the first node and go ahead
792 			 * and lock this. Note that we zero the tlb_mtx
793 			 * structure, since 0xdeadc0de flips the right bits
794 			 * for the code to think that this mutex has already
795 			 * been initialized. :-(
796 			 */
797 			SLIST_INIT(&tlb->tlb_head);
798 			refcount_init(&tlb->tlb_refcnt, 1);
799 			tlb->tlb_reqcnt = 1;
800 			memset(&tlb->tlb_mtx, 0, sizeof(struct mtx));
801 			TCPID_BUCKET_LOCK_INIT(tlb);
802 			TCPID_BUCKET_LOCK(tlb);
803 			bucket_locked = true;
804 
805 #define	FREE_NEW_TLB()	do {				\
806 	TCPID_BUCKET_LOCK_DESTROY(tlb);			\
807 	uma_zfree(tcp_log_id_bucket_zone, tlb);		\
808 	counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1);	\
809 	counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1);	\
810 	bucket_locked = false;				\
811 	tlb = NULL;					\
812 } while (0)
813 			/*
814 			 * Relock the INP and make sure we are still
815 			 * unassigned.
816 			 */
817 			INP_WLOCK(inp);
818 			RECHECK_INP_CLEAN(FREE_NEW_TLB());
819 			if (tp->t_lib != NULL) {
820 				FREE_NEW_TLB();
821 				goto restart;
822 			}
823 
824 			/* Add the new bucket to the tree. */
825 			tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head,
826 			    tlb);
827 			KASSERT(tmp_tlb == NULL,
828 			    ("%s: Unexpected conflicting bucket (%p) while "
829 			    "adding new bucket (%p)", __func__, tmp_tlb, tlb));
830 
831 			/*
832 			 * If we found a conflicting bucket, free the new
833 			 * one we made and fall through to use the existing
834 			 * bucket.
835 			 */
836 			if (tmp_tlb != NULL) {
837 				FREE_NEW_TLB();
838 				INP_WUNLOCK(inp);
839 			}
840 #undef	FREE_NEW_TLB
841 		}
842 
843 		/* If we found an existing bucket, use it. */
844 		if (tmp_tlb != NULL) {
845 			tlb = tmp_tlb;
846 			TCPID_BUCKET_LOCK(tlb);
847 			bucket_locked = true;
848 
849 			/*
850 			 * Relock the INP and make sure we are still
851 			 * unassigned.
852 			 */
853 			INP_UNLOCK_ASSERT(inp);
854 			INP_WLOCK(inp);
855 			RECHECK_INP();
856 			if (tp->t_lib != NULL) {
857 				TCPID_BUCKET_UNLOCK(tlb);
858 				bucket_locked = false;
859 				tlb = NULL;
860 				goto restart;
861 			}
862 
863 			/* Take a reference on the bucket. */
864 			TCPID_BUCKET_REF(tlb);
865 
866 			/* Record the request. */
867 			tcp_log_increment_reqcnt(tlb);
868 		}
869 
870 		tcp_log_grow_tlb(tlb->tlb_id, tp);
871 
872 		/* Add the new node to the list. */
873 		SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list);
874 		tp->t_lib = tlb;
875 		tp->t_lin = tln;
876 		if (tp->t_lib->tlb_logstate) {
877 			/* Clone in any logging */
878 
879 			tp->t_logstate = tp->t_lib->tlb_logstate;
880 		}
881 		if (tp->t_lib->tlb_loglimit) {
882 			/* The loglimit too */
883 
884 			tp->t_loglimit = tp->t_lib->tlb_loglimit;
885 		}
886 		tln = NULL;
887 	}
888 
889 	rv = 0;
890 
891 done:
892 	/* Unlock things, as needed, and return. */
893 	INP_WUNLOCK(inp);
894 done_noinp:
895 	INP_UNLOCK_ASSERT(inp);
896 	if (bucket_locked) {
897 		TCPID_BUCKET_LOCK_ASSERT(tlb);
898 		TCPID_BUCKET_UNLOCK(tlb);
899 	} else if (tlb != NULL)
900 		TCPID_BUCKET_UNLOCK_ASSERT(tlb);
901 	if (tree_locked == TREE_WLOCKED) {
902 		TCPID_TREE_WLOCK_ASSERT();
903 		TCPID_TREE_WUNLOCK();
904 	} else if (tree_locked == TREE_RLOCKED) {
905 		TCPID_TREE_RLOCK_ASSERT();
906 		TCPID_TREE_RUNLOCK();
907 	} else
908 		TCPID_TREE_UNLOCK_ASSERT();
909 	if (tln != NULL)
910 		uma_zfree(tcp_log_id_node_zone, tln);
911 	return (rv);
912 }
913 
914 /*
915  * Get the TCP log ID for a TCPCB.
916  * Called with INPCB locked.
917  * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long.
918  * Returns number of bytes copied.
919  */
920 size_t
921 tcp_log_get_id(struct tcpcb *tp, char *buf)
922 {
923 	size_t len;
924 
925 	INP_LOCK_ASSERT(tptoinpcb(tp));
926 	if (tp->t_lib != NULL) {
927 		len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
928 		KASSERT(len < TCP_LOG_ID_LEN,
929 		    ("%s:%d: tp->t_lib->tlb_id too long (%zu)",
930 		    __func__, __LINE__, len));
931 	} else {
932 		*buf = '\0';
933 		len = 0;
934 	}
935 	return (len);
936 }
937 
938 /*
939  * Get the tag associated with the TCPCB's log ID.
940  * Called with INPCB locked. Returns with it unlocked.
941  * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long.
942  * Returns number of bytes copied.
943  */
944 size_t
945 tcp_log_get_tag(struct tcpcb *tp, char *buf)
946 {
947 	struct inpcb *inp = tptoinpcb(tp);
948 	struct tcp_log_id_bucket *tlb;
949 	size_t len;
950 	int tree_locked;
951 
952 	INP_WLOCK_ASSERT(inp);
953 
954 	tree_locked = TREE_UNLOCKED;
955 	tlb = tp->t_lib;
956 
957 	if (tlb != NULL) {
958 		TCPID_BUCKET_REF(tlb);
959 		INP_WUNLOCK(inp);
960 		TCPID_BUCKET_LOCK(tlb);
961 		len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN);
962 		KASSERT(len < TCP_LOG_TAG_LEN,
963 		    ("%s:%d: tp->t_lib->tlb_tag too long (%zu)",
964 		    __func__, __LINE__, len));
965 		if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
966 			TCPID_BUCKET_UNLOCK(tlb);
967 
968 		if (tree_locked == TREE_WLOCKED) {
969 			TCPID_TREE_WLOCK_ASSERT();
970 			TCPID_TREE_WUNLOCK();
971 		} else if (tree_locked == TREE_RLOCKED) {
972 			TCPID_TREE_RLOCK_ASSERT();
973 			TCPID_TREE_RUNLOCK();
974 		} else
975 			TCPID_TREE_UNLOCK_ASSERT();
976 	} else {
977 		INP_WUNLOCK(inp);
978 		*buf = '\0';
979 		len = 0;
980 	}
981 
982 	return (len);
983 }
984 
985 /*
986  * Get number of connections with the same log ID.
987  * Log ID is taken from given TCPCB.
988  * Called with INPCB locked.
989  */
990 u_int
991 tcp_log_get_id_cnt(struct tcpcb *tp)
992 {
993 
994 	INP_WLOCK_ASSERT(tptoinpcb(tp));
995 	return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
996 }
997 
998 #ifdef TCPLOG_DEBUG_RINGBUF
999 /*
1000  * Functions/macros to increment/decrement reference count for a log
1001  * entry. This should catch when we do a double-free/double-remove or
1002  * a double-add.
1003  */
1004 static inline void
1005 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func,
1006     int line)
1007 {
1008 	int refcnt;
1009 
1010 	refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1);
1011 	if (refcnt != 0)
1012 		panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)",
1013 		    func, line, log_entry, refcnt);
1014 }
1015 #define	tcp_log_entry_refcnt_add(l)	\
1016     _tcp_log_entry_refcnt_add((l), __func__, __LINE__)
1017 
1018 static inline void
1019 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func,
1020     int line)
1021 {
1022 	int refcnt;
1023 
1024 	refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1);
1025 	if (refcnt != 1)
1026 		panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)",
1027 		    func, line, log_entry, refcnt);
1028 }
1029 #define	tcp_log_entry_refcnt_rem(l)	\
1030     _tcp_log_entry_refcnt_rem((l), __func__, __LINE__)
1031 
1032 #else /* !TCPLOG_DEBUG_RINGBUF */
1033 
1034 #define	tcp_log_entry_refcnt_add(l)
1035 #define	tcp_log_entry_refcnt_rem(l)
1036 
1037 #endif
1038 
1039 /*
1040  * Cleanup after removing a log entry, but only decrement the count if we
1041  * are running INVARIANTS.
1042  */
1043 static inline void
1044 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused)
1045 {
1046 
1047 	uma_zfree(tcp_log_zone, log_entry);
1048 #ifdef INVARIANTS
1049 	(*count)--;
1050 	KASSERT(*count >= 0,
1051 	    ("%s: count unexpectedly negative", __func__));
1052 #endif
1053 }
1054 
1055 static void
1056 tcp_log_free_entries(struct tcp_log_stailq *head, int *count)
1057 {
1058 	struct tcp_log_mem *log_entry;
1059 
1060 	/* Free the entries. */
1061 	while ((log_entry = STAILQ_FIRST(head)) != NULL) {
1062 		STAILQ_REMOVE_HEAD(head, tlm_queue);
1063 		tcp_log_entry_refcnt_rem(log_entry);
1064 		tcp_log_free_log_common(log_entry, count);
1065 	}
1066 }
1067 
1068 /* Cleanup after removing a log entry. */
1069 static inline void
1070 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1071 {
1072 	uma_zfree(tcp_log_zone, log_entry);
1073 	tp->t_lognum--;
1074 	KASSERT(tp->t_lognum >= 0,
1075 	    ("%s: tp->t_lognum unexpectedly negative", __func__));
1076 }
1077 
1078 /* Remove a log entry from the head of a list. */
1079 static inline void
1080 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry)
1081 {
1082 
1083 	KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs),
1084 	    ("%s: attempt to remove non-HEAD log entry", __func__));
1085 	STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1086 	tcp_log_entry_refcnt_rem(log_entry);
1087 	tcp_log_remove_log_cleanup(tp, log_entry);
1088 }
1089 
1090 #ifdef TCPLOG_DEBUG_RINGBUF
1091 /*
1092  * Initialize the log entry's reference count, which we want to
1093  * survive allocations.
1094  */
1095 static int
1096 tcp_log_zone_init(void *mem, int size, int flags __unused)
1097 {
1098 	struct tcp_log_mem *tlm;
1099 
1100 	KASSERT(size >= sizeof(struct tcp_log_mem),
1101 	    ("%s: unexpectedly short (%d) allocation", __func__, size));
1102 	tlm = (struct tcp_log_mem *)mem;
1103 	tlm->tlm_refcnt = 0;
1104 	return (0);
1105 }
1106 
1107 /*
1108  * Double check that the refcnt is zero on allocation and return.
1109  */
1110 static int
1111 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused)
1112 {
1113 	struct tcp_log_mem *tlm;
1114 
1115 	KASSERT(size >= sizeof(struct tcp_log_mem),
1116 	    ("%s: unexpectedly short (%d) allocation", __func__, size));
1117 	tlm = (struct tcp_log_mem *)mem;
1118 	if (tlm->tlm_refcnt != 0)
1119 		panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1120 		    __func__, __LINE__, tlm, tlm->tlm_refcnt);
1121 	return (0);
1122 }
1123 
1124 static void
1125 tcp_log_zone_dtor(void *mem, int size, void *args __unused)
1126 {
1127 	struct tcp_log_mem *tlm;
1128 
1129 	KASSERT(size >= sizeof(struct tcp_log_mem),
1130 	    ("%s: unexpectedly short (%d) allocation", __func__, size));
1131 	tlm = (struct tcp_log_mem *)mem;
1132 	if (tlm->tlm_refcnt != 0)
1133 		panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)",
1134 		    __func__, __LINE__, tlm, tlm->tlm_refcnt);
1135 }
1136 #endif /* TCPLOG_DEBUG_RINGBUF */
1137 
1138 /* Do global initialization. */
1139 void
1140 tcp_log_init(void)
1141 {
1142 
1143 	tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem),
1144 #ifdef TCPLOG_DEBUG_RINGBUF
1145 	    tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init,
1146 #else
1147 	    NULL, NULL, NULL,
1148 #endif
1149 	    NULL, UMA_ALIGN_PTR, 0);
1150 	(void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT);
1151 	tcp_log_id_bucket_zone = uma_zcreate("tcp_log_id_bucket",
1152 	    sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL,
1153 	    UMA_ALIGN_PTR, 0);
1154 	tcp_log_id_node_zone = uma_zcreate("tcp_log_id_node",
1155 	    sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL,
1156 	    UMA_ALIGN_PTR, 0);
1157 #ifdef TCPLOG_DEBUG_COUNTERS
1158 	tcp_log_queued = counter_u64_alloc(M_WAITOK);
1159 	tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK);
1160 	tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK);
1161 	tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK);
1162 	tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK);
1163 	tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK);
1164 	tcp_log_que_copyout = counter_u64_alloc(M_WAITOK);
1165 	tcp_log_que_read = counter_u64_alloc(M_WAITOK);
1166 	tcp_log_que_freed = counter_u64_alloc(M_WAITOK);
1167 #endif
1168 	tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK);
1169 	tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK);
1170 
1171 	rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW);
1172 	mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF);
1173 	callout_init(&tcp_log_expireq_callout, 1);
1174 }
1175 
1176 /* Do per-TCPCB initialization. */
1177 void
1178 tcp_log_tcpcbinit(struct tcpcb *tp)
1179 {
1180 
1181 	/* A new TCPCB should start out zero-initialized. */
1182 	STAILQ_INIT(&tp->t_logs);
1183 
1184 	/*
1185 	 * If we are doing auto-capturing, figure out whether we will capture
1186 	 * this session.
1187 	 */
1188 	tp->t_loglimit = tcp_log_session_limit;
1189 	if ((tcp_log_auto_all == true) &&
1190 	    tcp_log_auto_mode &&
1191 	    tcp_log_selectauto()) {
1192 		tp->t_logstate = tcp_log_auto_mode;
1193 		tp->t_flags2 |= TF2_LOG_AUTO;
1194 	}
1195 }
1196 
1197 /* Remove entries */
1198 static void
1199 tcp_log_expire(void *unused __unused)
1200 {
1201 	struct tcp_log_id_bucket *tlb;
1202 	struct tcp_log_id_node *tln;
1203 	sbintime_t expiry_limit;
1204 	int tree_locked;
1205 
1206 	TCPLOG_EXPIREQ_LOCK();
1207 	if (callout_pending(&tcp_log_expireq_callout)) {
1208 		/* Callout was reset. */
1209 		TCPLOG_EXPIREQ_UNLOCK();
1210 		return;
1211 	}
1212 
1213 	/*
1214 	 * Process entries until we reach one that expires too far in the
1215 	 * future. Look one second in the future.
1216 	 */
1217 	expiry_limit = getsbinuptime() + SBT_1S;
1218 	tree_locked = TREE_UNLOCKED;
1219 
1220 	while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL &&
1221 	    tln->tln_expiretime <= expiry_limit) {
1222 		if (!callout_active(&tcp_log_expireq_callout)) {
1223 			/*
1224 			 * Callout was stopped. I guess we should
1225 			 * just quit at this point.
1226 			 */
1227 			TCPLOG_EXPIREQ_UNLOCK();
1228 			return;
1229 		}
1230 
1231 		/*
1232 		 * Remove the node from the head of the list and unlock
1233 		 * the list. Change the expiry time to SBT_MAX as a signal
1234 		 * to other threads that we now own this.
1235 		 */
1236 		STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq);
1237 		tln->tln_expiretime = SBT_MAX;
1238 		TCPLOG_EXPIREQ_UNLOCK();
1239 
1240 		/*
1241 		 * Remove the node from the bucket.
1242 		 */
1243 		tlb = tln->tln_bucket;
1244 		TCPID_BUCKET_LOCK(tlb);
1245 		if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) {
1246 			tcp_log_id_validate_tree_lock(tree_locked);
1247 			if (tree_locked == TREE_WLOCKED)
1248 				TCPID_TREE_WUNLOCK();
1249 			else
1250 				TCPID_TREE_RUNLOCK();
1251 			tree_locked = TREE_UNLOCKED;
1252 		}
1253 
1254 		/* Drop the INP reference. */
1255 		INP_WLOCK(tln->tln_inp);
1256 		if (!in_pcbrele_wlocked(tln->tln_inp))
1257 			INP_WUNLOCK(tln->tln_inp);
1258 
1259 		/* Free the log records. */
1260 		tcp_log_free_entries(&tln->tln_entries, &tln->tln_count);
1261 
1262 		/* Free the node. */
1263 		uma_zfree(tcp_log_id_node_zone, tln);
1264 
1265 		/* Relock the expiry queue. */
1266 		TCPLOG_EXPIREQ_LOCK();
1267 	}
1268 
1269 	/*
1270 	 * We've expired all the entries we can. Do we need to reschedule
1271 	 * ourselves?
1272 	 */
1273 	callout_deactivate(&tcp_log_expireq_callout);
1274 	if (tln != NULL) {
1275 		/*
1276 		 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and
1277 		 * set the next callout to that. (This helps ensure we generally
1278 		 * run the callout no more often than desired.)
1279 		 */
1280 		expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL;
1281 		if (expiry_limit < tln->tln_expiretime)
1282 			expiry_limit = tln->tln_expiretime;
1283 		callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit,
1284 		    SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE);
1285 	}
1286 
1287 	/* We're done. */
1288 	TCPLOG_EXPIREQ_UNLOCK();
1289 	return;
1290 }
1291 
1292 /*
1293  * Move log data from the TCPCB to a new node. This will reset the TCPCB log
1294  * entries and log count; however, it will not touch other things from the
1295  * TCPCB (e.g. t_lin, t_lib).
1296  *
1297  * NOTE: Must hold a lock on the INP.
1298  */
1299 static void
1300 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
1301 {
1302 	struct inpcb *inp = tptoinpcb(tp);
1303 
1304 	INP_WLOCK_ASSERT(inp);
1305 
1306 	tln->tln_ie = inp->inp_inc.inc_ie;
1307 	if (inp->inp_inc.inc_flags & INC_ISIPV6)
1308 		tln->tln_af = AF_INET6;
1309 	else
1310 		tln->tln_af = AF_INET;
1311 	tln->tln_entries = tp->t_logs;
1312 	tln->tln_count = tp->t_lognum;
1313 	tln->tln_bucket = tp->t_lib;
1314 
1315 	/* Clear information from the PCB. */
1316 	STAILQ_INIT(&tp->t_logs);
1317 	tp->t_lognum = 0;
1318 }
1319 
1320 /* Do per-TCPCB cleanup */
1321 void
1322 tcp_log_tcpcbfini(struct tcpcb *tp)
1323 {
1324 	struct tcp_log_id_node *tln, *tln_first;
1325 	struct tcp_log_mem *log_entry;
1326 	sbintime_t callouttime;
1327 
1328 	INP_WLOCK_ASSERT(tptoinpcb(tp));
1329 
1330 	TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false);
1331 
1332 	/*
1333 	 * If we were gathering packets to be automatically dumped, try to do
1334 	 * it now. If this succeeds, the log information in the TCPCB will be
1335 	 * cleared. Otherwise, we'll handle the log information as we do
1336 	 * for other states.
1337 	 */
1338 	switch(tp->t_logstate) {
1339 	case TCP_LOG_STATE_HEAD_AUTO:
1340 		(void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1341 		    M_NOWAIT, false);
1342 		break;
1343 	case TCP_LOG_STATE_TAIL_AUTO:
1344 		(void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail",
1345 		    M_NOWAIT, false);
1346 		break;
1347 	case TCP_LOG_STATE_CONTINUAL:
1348 		(void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1349 		    M_NOWAIT, false);
1350 		break;
1351 	}
1352 
1353 	/*
1354 	 * There are two ways we could keep logs: per-socket or per-ID. If
1355 	 * we are tracking logs with an ID, then the logs survive the
1356 	 * destruction of the TCPCB.
1357 	 *
1358 	 * If the TCPCB is associated with an ID node, move the logs from the
1359 	 * TCPCB to the ID node. In theory, this is safe, for reasons which I
1360 	 * will now explain for my own benefit when I next need to figure out
1361 	 * this code. :-)
1362 	 *
1363 	 * We own the INP lock. Therefore, no one else can change the contents
1364 	 * of this node (Rule C). Further, no one can remove this node from
1365 	 * the bucket while we hold the lock (Rule D). Basically, no one can
1366 	 * mess with this node. That leaves two states in which we could be:
1367 	 *
1368 	 * 1. Another thread is currently waiting to acquire the INP lock, with
1369 	 *    plans to do something with this node. When we drop the INP lock,
1370 	 *    they will have a chance to do that. They will recheck the
1371 	 *    tln_closed field (see note to Rule C) and then acquire the
1372 	 *    bucket lock before proceeding further.
1373 	 *
1374 	 * 2. Another thread will try to acquire a lock at some point in the
1375 	 *    future. If they try to acquire a lock before we set the
1376 	 *    tln_closed field, they will follow state #1. If they try to
1377 	 *    acquire a lock after we set the tln_closed field, they will be
1378 	 *    able to make changes to the node, at will, following Rule C.
1379 	 *
1380 	 * Therefore, we currently own this node and can make any changes
1381 	 * we want. But, as soon as we set the tln_closed field to true, we
1382 	 * have effectively dropped our lock on the node. (For this reason, we
1383 	 * also need to make sure our writes are ordered correctly. An atomic
1384 	 * operation with "release" semantics should be sufficient.)
1385 	 */
1386 
1387 	if (tp->t_lin != NULL) {
1388 		struct inpcb *inp = tptoinpcb(tp);
1389 
1390 		/* Copy the relevant information to the log entry. */
1391 		tln = tp->t_lin;
1392 		KASSERT(tln->tln_inp == inp,
1393 		    ("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)",
1394 		    __func__, tln->tln_inp, inp));
1395 		tcp_log_move_tp_to_node(tp, tln);
1396 
1397 		/* Clear information from the PCB. */
1398 		tp->t_lin = NULL;
1399 		tp->t_lib = NULL;
1400 
1401 		/*
1402 		 * Take a reference on the INP. This ensures that the INP
1403 		 * remains valid while the node is on the expiry queue. This
1404 		 * ensures the INP is valid for other threads that may be
1405 		 * racing to lock this node when we move it to the expire
1406 		 * queue.
1407 		 */
1408 		in_pcbref(inp);
1409 
1410 		/*
1411 		 * Store the entry on the expiry list. The exact behavior
1412 		 * depends on whether we have entries to keep. If so, we
1413 		 * put the entry at the tail of the list and expire in
1414 		 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put
1415 		 * the entry at the head of the list. (Handling the cleanup
1416 		 * via the expiry timer lets us avoid locking messy-ness here.)
1417 		 */
1418 		tln->tln_expiretime = getsbinuptime();
1419 		TCPLOG_EXPIREQ_LOCK();
1420 		if (tln->tln_count) {
1421 			tln->tln_expiretime += TCP_LOG_EXPIRE_TIME;
1422 			if (STAILQ_EMPTY(&tcp_log_expireq_head) &&
1423 			    !callout_active(&tcp_log_expireq_callout)) {
1424 				/*
1425 				 * We are adding the first entry and a callout
1426 				 * is not currently scheduled; therefore, we
1427 				 * need to schedule one.
1428 				 */
1429 				callout_reset_sbt(&tcp_log_expireq_callout,
1430 				    tln->tln_expiretime, SBT_1S, tcp_log_expire,
1431 				    NULL, C_ABSOLUTE);
1432 			}
1433 			STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln,
1434 			    tln_expireq);
1435 		} else {
1436 			callouttime = tln->tln_expiretime +
1437 			    TCP_LOG_EXPIRE_INTVL;
1438 			tln_first = STAILQ_FIRST(&tcp_log_expireq_head);
1439 
1440 			if ((tln_first == NULL ||
1441 			    callouttime < tln_first->tln_expiretime) &&
1442 			    (callout_pending(&tcp_log_expireq_callout) ||
1443 			    !callout_active(&tcp_log_expireq_callout))) {
1444 				/*
1445 				 * The list is empty, or we want to run the
1446 				 * expire code before the first entry's timer
1447 				 * fires. Also, we are in a case where a callout
1448 				 * is not actively running. We want to reset
1449 				 * the callout to occur sooner.
1450 				 */
1451 				callout_reset_sbt(&tcp_log_expireq_callout,
1452 				    callouttime, SBT_1S, tcp_log_expire, NULL,
1453 				    C_ABSOLUTE);
1454 			}
1455 
1456 			/*
1457 			 * Insert to the head, or just after the head, as
1458 			 * appropriate. (This might result in small
1459 			 * mis-orderings as a bunch of "expire now" entries
1460 			 * gather at the start of the list, but that should
1461 			 * not produce big problems, since the expire timer
1462 			 * will walk through all of them.)
1463 			 */
1464 			if (tln_first == NULL ||
1465 			    tln->tln_expiretime < tln_first->tln_expiretime)
1466 				STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln,
1467 				    tln_expireq);
1468 			else
1469 				STAILQ_INSERT_AFTER(&tcp_log_expireq_head,
1470 				    tln_first, tln, tln_expireq);
1471 		}
1472 		TCPLOG_EXPIREQ_UNLOCK();
1473 
1474 		/*
1475 		 * We are done messing with the tln. After this point, we
1476 		 * can't touch it. (Note that the "release" semantics should
1477 		 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above.
1478 		 * Therefore, they should be unnecessary here. However, it
1479 		 * seems like a good idea to include them anyway, since we
1480 		 * really are releasing a lock here.)
1481 		 */
1482 		atomic_store_rel_int(&tln->tln_closed, 1);
1483 	} else {
1484 		/* Remove log entries. */
1485 		while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1486 			tcp_log_remove_log_head(tp, log_entry);
1487 		KASSERT(tp->t_lognum == 0,
1488 		    ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1489 			__func__, tp->t_lognum));
1490 	}
1491 
1492 	/*
1493 	 * Change the log state to off (just in case anything tries to sneak
1494 	 * in a last-minute log).
1495 	 */
1496 	tp->t_logstate = TCP_LOG_STATE_OFF;
1497 }
1498 
1499 static void
1500 tcp_log_purge_tp_logbuf(struct tcpcb *tp)
1501 {
1502 	struct tcp_log_mem *log_entry;
1503 
1504 	INP_WLOCK_ASSERT(tptoinpcb(tp));
1505 	if (tp->t_lognum == 0)
1506 		return;
1507 
1508 	while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1509 		tcp_log_remove_log_head(tp, log_entry);
1510 	KASSERT(tp->t_lognum == 0,
1511 		("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
1512 		 __func__, tp->t_lognum));
1513 	tp->t_logstate = TCP_LOG_STATE_OFF;
1514 }
1515 
1516 /*
1517  * This logs an event for a TCP socket. Normally, this is called via
1518  * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for
1519  * TCP_LOG_EVENT().
1520  */
1521 
1522 struct tcp_log_buffer *
1523 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
1524     struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len,
1525     union tcp_log_stackspecific *stackinfo, int th_hostorder,
1526     const char *output_caller, const char *func, int line, const struct timeval *itv)
1527 {
1528 	struct tcp_log_mem *log_entry;
1529 	struct tcp_log_buffer *log_buf;
1530 	int attempt_count = 0;
1531 	struct tcp_log_verbose *log_verbose;
1532 	uint32_t logsn;
1533 
1534 	KASSERT((func == NULL && line == 0) || (func != NULL && line > 0),
1535 	    ("%s called with inconsistent func (%p) and line (%d) arguments",
1536 		__func__, func, line));
1537 
1538 	INP_WLOCK_ASSERT(tptoinpcb(tp));
1539 	if (tcp_disable_all_bb_logs) {
1540 		/*
1541 		 * The global shutdown logging
1542 		 * switch has been thrown. Call
1543 		 * the purge function that frees
1544 		 * purges out the logs and
1545 		 * turns off logging.
1546 		 */
1547 		tcp_log_purge_tp_logbuf(tp);
1548 		return (NULL);
1549 	}
1550 	KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD ||
1551 	    tp->t_logstate == TCP_LOG_STATE_TAIL ||
1552 	    tp->t_logstate == TCP_LOG_STATE_CONTINUAL ||
1553 	    tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO ||
1554 	    tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO,
1555 	    ("%s called with unexpected tp->t_logstate (%d)", __func__,
1556 		tp->t_logstate));
1557 
1558 	/*
1559 	 * Get the serial number. We do this early so it will
1560 	 * increment even if we end up skipping the log entry for some
1561 	 * reason.
1562 	 */
1563 	logsn = tp->t_logsn++;
1564 
1565 	/*
1566 	 * Can we get a new log entry? If so, increment the lognum counter
1567 	 * here.
1568 	 */
1569 retry:
1570 	if (tp->t_lognum < tp->t_loglimit) {
1571 		if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL)
1572 			tp->t_lognum++;
1573 	} else
1574 		log_entry = NULL;
1575 
1576 	/* Do we need to try to reuse? */
1577 	if (log_entry == NULL) {
1578 		/*
1579 		 * Sacrifice auto-logged sessions without a log ID if
1580 		 * tcp_log_auto_all is false. (If they don't have a log
1581 		 * ID by now, it is probable that either they won't get one
1582 		 * or we are resource-constrained.)
1583 		 */
1584 		if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
1585 		    !tcp_log_auto_all) {
1586 			if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) {
1587 #ifdef INVARIANTS
1588 				panic("%s:%d: tcp_log_state_change() failed "
1589 				    "to set tp %p to TCP_LOG_STATE_CLEAR",
1590 				    __func__, __LINE__, tp);
1591 #endif
1592 				tp->t_logstate = TCP_LOG_STATE_OFF;
1593 			}
1594 			return (NULL);
1595 		}
1596 		/*
1597 		 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump
1598 		 * the buffers. If successful, deactivate tracing. Otherwise,
1599 		 * leave it active so we will retry.
1600 		 */
1601 		if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO &&
1602 		    !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head",
1603 		    M_NOWAIT, false)) {
1604 			tp->t_logstate = TCP_LOG_STATE_OFF;
1605 			return(NULL);
1606 		} else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) &&
1607 		    !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1608 		    M_NOWAIT, false)) {
1609 			if (attempt_count == 0) {
1610 				attempt_count++;
1611 				goto retry;
1612 			}
1613 #ifdef TCPLOG_DEBUG_COUNTERS
1614 			counter_u64_add(tcp_log_que_fail4, 1);
1615 #endif
1616 			return(NULL);
1617 		} else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO)
1618 			return(NULL);
1619 
1620 		/* If in HEAD state, just deactivate the tracing and return. */
1621 		if (tp->t_logstate == TCP_LOG_STATE_HEAD) {
1622 			tp->t_logstate = TCP_LOG_STATE_OFF;
1623 			return(NULL);
1624 		}
1625 
1626 		/*
1627 		 * Get a buffer to reuse. If that fails, just give up.
1628 		 * (We can't log anything without a buffer in which to
1629 		 * put it.)
1630 		 *
1631 		 * Note that we don't change the t_lognum counter
1632 		 * here. Because we are re-using the buffer, the total
1633 		 * number won't change.
1634 		 */
1635 		if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL)
1636 			return(NULL);
1637 		STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue);
1638 		tcp_log_entry_refcnt_rem(log_entry);
1639 	}
1640 
1641 	KASSERT(log_entry != NULL,
1642 	    ("%s: log_entry unexpectedly NULL", __func__));
1643 
1644 	/* Extract the log buffer and verbose buffer pointers. */
1645 	log_buf = &log_entry->tlm_buf;
1646 	log_verbose = &log_entry->tlm_v;
1647 
1648 	/* Basic entries. */
1649 	if (itv == NULL)
1650 		getmicrouptime(&log_buf->tlb_tv);
1651 	else
1652 		memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval));
1653 	log_buf->tlb_ticks = ticks;
1654 	log_buf->tlb_sn = logsn;
1655 	log_buf->tlb_stackid = tp->t_fb->tfb_id;
1656 	log_buf->tlb_eventid = eventid;
1657 	log_buf->tlb_eventflags = 0;
1658 	log_buf->tlb_errno = errornum;
1659 
1660 	/* Socket buffers */
1661 	if (rxbuf != NULL) {
1662 		log_buf->tlb_eventflags |= TLB_FLAG_RXBUF;
1663 		log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc;
1664 		log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc;
1665 		log_buf->tlb_rxbuf.tls_sb_spare = 0;
1666 	}
1667 	if (txbuf != NULL) {
1668 		log_buf->tlb_eventflags |= TLB_FLAG_TXBUF;
1669 		log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc;
1670 		log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc;
1671 		log_buf->tlb_txbuf.tls_sb_spare = 0;
1672 	}
1673 	/* Copy values from tp to the log entry. */
1674 #define	COPY_STAT(f)	log_buf->tlb_ ## f = tp->f
1675 #define	COPY_STAT_T(f)	log_buf->tlb_ ## f = tp->t_ ## f
1676 	COPY_STAT_T(state);
1677 	COPY_STAT_T(starttime);
1678 	COPY_STAT(iss);
1679 	COPY_STAT_T(flags);
1680 	COPY_STAT(snd_una);
1681 	COPY_STAT(snd_max);
1682 	COPY_STAT(snd_cwnd);
1683 	COPY_STAT(snd_nxt);
1684 	COPY_STAT(snd_recover);
1685 	COPY_STAT(snd_wnd);
1686 	COPY_STAT(snd_ssthresh);
1687 	COPY_STAT_T(srtt);
1688 	COPY_STAT_T(rttvar);
1689 	COPY_STAT(rcv_up);
1690 	COPY_STAT(rcv_adv);
1691 	COPY_STAT(rcv_nxt);
1692 	COPY_STAT(rcv_wnd);
1693 	COPY_STAT_T(dupacks);
1694 	COPY_STAT_T(segqlen);
1695 	COPY_STAT(snd_numholes);
1696 	COPY_STAT(snd_scale);
1697 	COPY_STAT(rcv_scale);
1698 	COPY_STAT_T(flags2);
1699 	COPY_STAT_T(fbyte_in);
1700 	COPY_STAT_T(fbyte_out);
1701 #undef COPY_STAT
1702 #undef COPY_STAT_T
1703 	log_buf->tlb_flex1 = 0;
1704 	log_buf->tlb_flex2 = 0;
1705 	/* Copy stack-specific info. */
1706 	if (stackinfo != NULL) {
1707 		memcpy(&log_buf->tlb_stackinfo, stackinfo,
1708 		    sizeof(log_buf->tlb_stackinfo));
1709 		log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO;
1710 	}
1711 
1712 	/* The packet */
1713 	log_buf->tlb_len = len;
1714 	if (th) {
1715 		int optlen;
1716 
1717 		log_buf->tlb_eventflags |= TLB_FLAG_HDR;
1718 		log_buf->tlb_th = *th;
1719 		if (th_hostorder)
1720 			tcp_fields_to_net(&log_buf->tlb_th);
1721 		optlen = (th->th_off << 2) - sizeof (struct tcphdr);
1722 		if (optlen > 0)
1723 			memcpy(log_buf->tlb_opts, th + 1, optlen);
1724 	}
1725 
1726 	/* Verbose information */
1727 	if (func != NULL) {
1728 		log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE;
1729 		if (output_caller != NULL)
1730 			strlcpy(log_verbose->tlv_snd_frm, output_caller,
1731 			    TCP_FUNC_LEN);
1732 		else
1733 			*log_verbose->tlv_snd_frm = 0;
1734 		strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN);
1735 		log_verbose->tlv_trace_line = line;
1736 	}
1737 
1738 	/* Insert the new log at the tail. */
1739 	STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue);
1740 	tcp_log_entry_refcnt_add(log_entry);
1741 	return (log_buf);
1742 }
1743 
1744 /*
1745  * Change the logging state for a TCPCB. Returns 0 on success or an
1746  * error code on failure.
1747  */
1748 int
1749 tcp_log_state_change(struct tcpcb *tp, int state)
1750 {
1751 	struct tcp_log_mem *log_entry;
1752 
1753 	INP_WLOCK_ASSERT(tptoinpcb(tp));
1754 	switch(state) {
1755 	case TCP_LOG_STATE_CLEAR:
1756 		while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
1757 			tcp_log_remove_log_head(tp, log_entry);
1758 		/* Fall through */
1759 
1760 	case TCP_LOG_STATE_OFF:
1761 		tp->t_logstate = TCP_LOG_STATE_OFF;
1762 		break;
1763 
1764 	case TCP_LOG_STATE_TAIL:
1765 	case TCP_LOG_STATE_HEAD:
1766 	case TCP_LOG_STATE_CONTINUAL:
1767 	case TCP_LOG_STATE_HEAD_AUTO:
1768 	case TCP_LOG_STATE_TAIL_AUTO:
1769 		tp->t_logstate = state;
1770 		break;
1771 
1772 	default:
1773 		return (EINVAL);
1774 	}
1775 	if (tcp_disable_all_bb_logs) {
1776 		/* We are prohibited from doing any logs */
1777 		tp->t_logstate = TCP_LOG_STATE_OFF;
1778 	}
1779 	tp->t_flags2 &= ~(TF2_LOG_AUTO);
1780 
1781 	return (0);
1782 }
1783 
1784 /* If tcp_drain() is called, flush half the log entries. */
1785 void
1786 tcp_log_drain(struct tcpcb *tp)
1787 {
1788 	struct tcp_log_mem *log_entry, *next;
1789 	int target, skip;
1790 
1791 	INP_WLOCK_ASSERT(tptoinpcb(tp));
1792 	if ((target = tp->t_lognum / 2) == 0)
1793 		return;
1794 
1795 	/*
1796 	 * If we are logging the "head" packets, we want to discard
1797 	 * from the tail of the queue. Otherwise, we want to discard
1798 	 * from the head.
1799 	 */
1800 	if (tp->t_logstate == TCP_LOG_STATE_HEAD ||
1801 	    tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) {
1802 		skip = tp->t_lognum - target;
1803 		STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue)
1804 			if (!--skip)
1805 				break;
1806 		KASSERT(log_entry != NULL,
1807 		    ("%s: skipped through all entries!", __func__));
1808 		if (log_entry == NULL)
1809 			return;
1810 		while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) {
1811 			STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue);
1812 			tcp_log_entry_refcnt_rem(next);
1813 			tcp_log_remove_log_cleanup(tp, next);
1814 #ifdef INVARIANTS
1815 			target--;
1816 #endif
1817 		}
1818 		KASSERT(target == 0,
1819 		    ("%s: After removing from tail, target was %d", __func__,
1820 			target));
1821 	} else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) {
1822 		(void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual",
1823 		    M_NOWAIT, false);
1824 	} else {
1825 		while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL &&
1826 		    target--)
1827 			tcp_log_remove_log_head(tp, log_entry);
1828 		KASSERT(target <= 0,
1829 		    ("%s: After removing from head, target was %d", __func__,
1830 			target));
1831 		KASSERT(tp->t_lognum > 0,
1832 		    ("%s: After removing from head, tp->t_lognum was %d",
1833 			__func__, target));
1834 		KASSERT(log_entry != NULL,
1835 		    ("%s: After removing from head, the tailq was empty",
1836 			__func__));
1837 	}
1838 }
1839 
1840 static inline int
1841 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len)
1842 {
1843 
1844 	if (sopt->sopt_td != NULL)
1845 		return (copyout(src, dst, len));
1846 	bcopy(src, dst, len);
1847 	return (0);
1848 }
1849 
1850 static int
1851 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp,
1852     struct tcp_log_buffer **end, int count)
1853 {
1854 	struct tcp_log_buffer *out_entry;
1855 	struct tcp_log_mem *log_entry;
1856 	size_t entrysize;
1857 	int error;
1858 #ifdef INVARIANTS
1859 	int orig_count = count;
1860 #endif
1861 
1862 	/* Copy the data out. */
1863 	error = 0;
1864 	out_entry = (struct tcp_log_buffer *) sopt->sopt_val;
1865 	STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) {
1866 		count--;
1867 		KASSERT(count >= 0,
1868 		    ("%s:%d: Exceeded expected count (%d) processing list %p",
1869 		    __func__, __LINE__, orig_count, log_tailqp));
1870 
1871 #ifdef TCPLOG_DEBUG_COUNTERS
1872 		counter_u64_add(tcp_log_que_copyout, 1);
1873 #endif
1874 
1875 		/*
1876 		 * Skip copying out the header if it isn't present.
1877 		 * Instead, copy out zeros (to ensure we don't leak info).
1878 		 * TODO: Make sure we truly do zero everything we don't
1879 		 * explicitly set.
1880 		 */
1881 		if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)
1882 			entrysize = sizeof(struct tcp_log_buffer);
1883 		else
1884 			entrysize = offsetof(struct tcp_log_buffer, tlb_th);
1885 		error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry,
1886 		    entrysize);
1887 		if (error)
1888 			break;
1889 		if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) {
1890 			error = tcp_log_copyout(sopt, zerobuf,
1891 			    ((uint8_t *)out_entry) + entrysize,
1892 			    sizeof(struct tcp_log_buffer) - entrysize);
1893 		}
1894 
1895 		/*
1896 		 * Copy out the verbose bit, if needed. Either way,
1897 		 * increment the output pointer the correct amount.
1898 		 */
1899 		if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) {
1900 			error = tcp_log_copyout(sopt, &log_entry->tlm_v,
1901 			    out_entry->tlb_verbose,
1902 			    sizeof(struct tcp_log_verbose));
1903 			if (error)
1904 				break;
1905 			out_entry = (struct tcp_log_buffer *)
1906 			    (((uint8_t *) (out_entry + 1)) +
1907 			    sizeof(struct tcp_log_verbose));
1908 		} else
1909 			out_entry++;
1910 	}
1911 	*end = out_entry;
1912 	KASSERT(error || count == 0,
1913 	    ("%s:%d: Less than expected count (%d) processing list %p"
1914 	    " (%d remain)", __func__, __LINE__, orig_count,
1915 	    log_tailqp, count));
1916 
1917 	return (error);
1918 }
1919 
1920 /*
1921  * Copy out the buffer. Note that we do incremental copying, so
1922  * sooptcopyout() won't work. However, the goal is to produce the same
1923  * end result as if we copied in the entire user buffer, updated it,
1924  * and then used sooptcopyout() to copy it out.
1925  *
1926  * NOTE: This should be called with a write lock on the PCB; however,
1927  * the function will drop it after it extracts the data from the TCPCB.
1928  */
1929 int
1930 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
1931 {
1932 	struct tcp_log_stailq log_tailq;
1933 	struct tcp_log_mem *log_entry, *log_next;
1934 	struct tcp_log_buffer *out_entry;
1935 	struct inpcb *inp = tptoinpcb(tp);
1936 	size_t outsize, entrysize;
1937 	int error, outnum;
1938 
1939 	INP_WLOCK_ASSERT(inp);
1940 
1941 	/*
1942 	 * Determine which log entries will fit in the buffer. As an
1943 	 * optimization, skip this if all the entries will clearly fit
1944 	 * in the buffer. (However, get an exact size if we are using
1945 	 * INVARIANTS.)
1946 	 */
1947 #ifndef INVARIANTS
1948 	if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) +
1949 	    sizeof(struct tcp_log_verbose)) >= tp->t_lognum) {
1950 		log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue);
1951 		log_next = NULL;
1952 		outsize = 0;
1953 		outnum = tp->t_lognum;
1954 	} else {
1955 #endif
1956 		outsize = outnum = 0;
1957 		log_entry = NULL;
1958 		STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) {
1959 			entrysize = sizeof(struct tcp_log_buffer);
1960 			if (log_next->tlm_buf.tlb_eventflags &
1961 			    TLB_FLAG_VERBOSE)
1962 				entrysize += sizeof(struct tcp_log_verbose);
1963 			if ((sopt->sopt_valsize - outsize) < entrysize)
1964 				break;
1965 			outsize += entrysize;
1966 			outnum++;
1967 			log_entry = log_next;
1968 		}
1969 		KASSERT(outsize <= sopt->sopt_valsize,
1970 		    ("%s: calculated output size (%zu) greater than available"
1971 			"space (%zu)", __func__, outsize, sopt->sopt_valsize));
1972 #ifndef INVARIANTS
1973 	}
1974 #endif
1975 
1976 	/*
1977 	 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val
1978 	 * is NULL, silently skip the copy. However, in this case, we
1979 	 * will leave the list alone and return. Functionally, this
1980 	 * gives userspace a way to poll for an approximate buffer
1981 	 * size they will need to get the log entries.
1982 	 */
1983 	if (sopt->sopt_val == NULL) {
1984 		INP_WUNLOCK(inp);
1985 		if (outsize == 0) {
1986 			outsize = outnum * (sizeof(struct tcp_log_buffer) +
1987 			    sizeof(struct tcp_log_verbose));
1988 		}
1989 		if (sopt->sopt_valsize > outsize)
1990 			sopt->sopt_valsize = outsize;
1991 		return (0);
1992 	}
1993 
1994 	/*
1995 	 * Break apart the list. We'll save the ones we want to copy
1996 	 * out locally and remove them from the TCPCB list. We can
1997 	 * then drop the INPCB lock while we do the copyout.
1998 	 *
1999 	 * There are roughly three cases:
2000 	 * 1. There was nothing to copy out. That's easy: drop the
2001 	 * lock and return.
2002 	 * 2. We are copying out the entire list. Again, that's easy:
2003 	 * move the whole list.
2004 	 * 3. We are copying out a partial list. That's harder. We
2005 	 * need to update the list book-keeping entries.
2006 	 */
2007 	if (log_entry != NULL && log_next == NULL) {
2008 		/* Move entire list. */
2009 		KASSERT(outnum == tp->t_lognum,
2010 		    ("%s:%d: outnum (%d) should match tp->t_lognum (%d)",
2011 			__func__, __LINE__, outnum, tp->t_lognum));
2012 		log_tailq = tp->t_logs;
2013 		tp->t_lognum = 0;
2014 		STAILQ_INIT(&tp->t_logs);
2015 	} else if (log_entry != NULL) {
2016 		/* Move partial list. */
2017 		KASSERT(outnum < tp->t_lognum,
2018 		    ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)",
2019 			__func__, __LINE__, outnum, tp->t_lognum));
2020 		STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs);
2021 		STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue);
2022 		KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL,
2023 		    ("%s:%d: tp->t_logs is unexpectedly shorter than expected"
2024 		    "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)",
2025 		    __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum));
2026 		STAILQ_NEXT(log_entry, tlm_queue) = NULL;
2027 		log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue);
2028 		tp->t_lognum -= outnum;
2029 	} else
2030 		STAILQ_INIT(&log_tailq);
2031 
2032 	/* Drop the PCB lock. */
2033 	INP_WUNLOCK(inp);
2034 
2035 	/* Copy the data out. */
2036 	error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum);
2037 
2038 	if (error) {
2039 		/* Restore list */
2040 		INP_WLOCK(inp);
2041 		if ((inp->inp_flags & INP_DROPPED) == 0) {
2042 			tp = intotcpcb(inp);
2043 
2044 			/* Merge the two lists. */
2045 			STAILQ_CONCAT(&log_tailq, &tp->t_logs);
2046 			tp->t_logs = log_tailq;
2047 			tp->t_lognum += outnum;
2048 		}
2049 		INP_WUNLOCK(inp);
2050 	} else {
2051 		/* Sanity check entries */
2052 		KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val)  ==
2053 		    outsize, ("%s: Actual output size (%zu) != "
2054 			"calculated output size (%zu)", __func__,
2055 			(size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val),
2056 			outsize));
2057 
2058 		/* Free the entries we just copied out. */
2059 		STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) {
2060 			tcp_log_entry_refcnt_rem(log_entry);
2061 			uma_zfree(tcp_log_zone, log_entry);
2062 		}
2063 	}
2064 
2065 	sopt->sopt_valsize = (size_t)((caddr_t)out_entry -
2066 	    (caddr_t)sopt->sopt_val);
2067 	return (error);
2068 }
2069 
2070 static void
2071 tcp_log_free_queue(struct tcp_log_dev_queue *param)
2072 {
2073 	struct tcp_log_dev_log_queue *entry;
2074 
2075 	KASSERT(param != NULL, ("%s: called with NULL param", __func__));
2076 	if (param == NULL)
2077 		return;
2078 
2079 	entry = (struct tcp_log_dev_log_queue *)param;
2080 
2081 	/* Free the entries. */
2082 	tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2083 
2084 	/* Free the buffer, if it is allocated. */
2085 	if (entry->tldl_common.tldq_buf != NULL)
2086 		free(entry->tldl_common.tldq_buf, M_TCPLOGDEV);
2087 
2088 	/* Free the queue entry. */
2089 	free(entry, M_TCPLOGDEV);
2090 }
2091 
2092 static struct tcp_log_common_header *
2093 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param)
2094 {
2095 	struct tcp_log_dev_log_queue *entry;
2096 	struct tcp_log_header *hdr;
2097 	uint8_t *end;
2098 	struct sockopt sopt;
2099 	int error;
2100 
2101 	entry = (struct tcp_log_dev_log_queue *)param;
2102 
2103 	/* Take a worst-case guess at space needs. */
2104 	sopt.sopt_valsize = sizeof(struct tcp_log_header) +
2105 	    entry->tldl_count * (sizeof(struct tcp_log_buffer) +
2106 	    sizeof(struct tcp_log_verbose));
2107 	hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT);
2108 	if (hdr == NULL) {
2109 #ifdef TCPLOG_DEBUG_COUNTERS
2110 		counter_u64_add(tcp_log_que_fail5, entry->tldl_count);
2111 #endif
2112 		return (NULL);
2113 	}
2114 	sopt.sopt_val = hdr + 1;
2115 	sopt.sopt_valsize -= sizeof(struct tcp_log_header);
2116 	sopt.sopt_td = NULL;
2117 
2118 	error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries,
2119 	    (struct tcp_log_buffer **)&end, entry->tldl_count);
2120 	if (error) {
2121 		free(hdr, M_TCPLOGDEV);
2122 		return (NULL);
2123 	}
2124 
2125 	/* Free the entries. */
2126 	tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count);
2127 	entry->tldl_count = 0;
2128 
2129 	memset(hdr, 0, sizeof(struct tcp_log_header));
2130 	hdr->tlh_version = TCP_LOG_BUF_VER;
2131 	hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR;
2132 	hdr->tlh_length = end - (uint8_t *)hdr;
2133 	hdr->tlh_ie = entry->tldl_ie;
2134 	hdr->tlh_af = entry->tldl_af;
2135 	getboottime(&hdr->tlh_offset);
2136 	strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN);
2137 	strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN);
2138 	strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN);
2139 	return ((struct tcp_log_common_header *)hdr);
2140 }
2141 
2142 /*
2143  * Queue the tcpcb's log buffer for transmission via the log buffer facility.
2144  *
2145  * NOTE: This should be called with a write lock on the PCB.
2146  *
2147  * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2148  * and reacquire the INP lock if it needs to do so.
2149  *
2150  * If force is false, this will only dump auto-logged sessions if
2151  * tcp_log_auto_all is true or if there is a log ID defined for the session.
2152  */
2153 int
2154 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
2155 {
2156 	struct tcp_log_dev_log_queue *entry;
2157 	struct inpcb *inp = tptoinpcb(tp);
2158 #ifdef TCPLOG_DEBUG_COUNTERS
2159 	int num_entries;
2160 #endif
2161 
2162 	INP_WLOCK_ASSERT(inp);
2163 
2164 	/* If there are no log entries, there is nothing to do. */
2165 	if (tp->t_lognum == 0)
2166 		return (0);
2167 
2168 	/* Check for a log ID. */
2169 	if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) &&
2170 	    !tcp_log_auto_all && !force) {
2171 		struct tcp_log_mem *log_entry;
2172 
2173 		/*
2174 		 * We needed a log ID and none was found. Free the log entries
2175 		 * and return success. Also, cancel further logging. If the
2176 		 * session doesn't have a log ID by now, we'll assume it isn't
2177 		 * going to get one.
2178 		 */
2179 		while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
2180 			tcp_log_remove_log_head(tp, log_entry);
2181 		KASSERT(tp->t_lognum == 0,
2182 		    ("%s: After freeing entries, tp->t_lognum=%d (expected 0)",
2183 			__func__, tp->t_lognum));
2184 		tp->t_logstate = TCP_LOG_STATE_OFF;
2185 		return (0);
2186 	}
2187 
2188 	/*
2189 	 * Allocate memory. If we must wait, we'll need to drop the locks
2190 	 * and reacquire them (and do all the related business that goes
2191 	 * along with that).
2192 	 */
2193 	entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2194 	    M_NOWAIT);
2195 	if (entry == NULL && (how & M_NOWAIT)) {
2196 #ifdef TCPLOG_DEBUG_COUNTERS
2197 		counter_u64_add(tcp_log_que_fail3, 1);
2198 #endif
2199 		return (ENOBUFS);
2200 	}
2201 	if (entry == NULL) {
2202 		INP_WUNLOCK(inp);
2203 		entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2204 		    M_TCPLOGDEV, M_WAITOK);
2205 		INP_WLOCK(inp);
2206 		/*
2207 		 * Note that this check is slightly overly-restrictive in
2208 		 * that the TCB can survive either of these events.
2209 		 * However, there is currently not a good way to ensure
2210 		 * that is the case. So, if we hit this M_WAIT path, we
2211 		 * may end up dropping some entries. That seems like a
2212 		 * small price to pay for safety.
2213 		 */
2214 		if (inp->inp_flags & INP_DROPPED) {
2215 			free(entry, M_TCPLOGDEV);
2216 #ifdef TCPLOG_DEBUG_COUNTERS
2217 			counter_u64_add(tcp_log_que_fail2, 1);
2218 #endif
2219 			return (ECONNRESET);
2220 		}
2221 		tp = intotcpcb(inp);
2222 		if (tp->t_lognum == 0) {
2223 			free(entry, M_TCPLOGDEV);
2224 			return (0);
2225 		}
2226 	}
2227 
2228 	/* Fill in the unique parts of the queue entry. */
2229 	if (tp->t_lib != NULL) {
2230 		strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
2231 		strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN);
2232 	} else {
2233 		strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN);
2234 		strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN);
2235 	}
2236 	if (reason != NULL)
2237 		strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2238 	else
2239 		strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2240 	entry->tldl_ie = inp->inp_inc.inc_ie;
2241 	if (inp->inp_inc.inc_flags & INC_ISIPV6)
2242 		entry->tldl_af = AF_INET6;
2243 	else
2244 		entry->tldl_af = AF_INET;
2245 	entry->tldl_entries = tp->t_logs;
2246 	entry->tldl_count = tp->t_lognum;
2247 
2248 	/* Fill in the common parts of the queue entry. */
2249 	entry->tldl_common.tldq_buf = NULL;
2250 	entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2251 	entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2252 
2253 	/* Clear the log data from the TCPCB. */
2254 #ifdef TCPLOG_DEBUG_COUNTERS
2255 	num_entries = tp->t_lognum;
2256 #endif
2257 	tp->t_lognum = 0;
2258 	STAILQ_INIT(&tp->t_logs);
2259 
2260 	/* Add the entry. If no one is listening, free the entry. */
2261 	if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) {
2262 		tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2263 #ifdef TCPLOG_DEBUG_COUNTERS
2264 		counter_u64_add(tcp_log_que_fail1, num_entries);
2265 	} else {
2266 		counter_u64_add(tcp_log_queued, num_entries);
2267 #endif
2268 	}
2269 	return (0);
2270 }
2271 
2272 /*
2273  * Queue the log_id_node's log buffers for transmission via the log buffer
2274  * facility.
2275  *
2276  * NOTE: This should be called with the bucket locked and referenced.
2277  *
2278  * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop
2279  * and reacquire the bucket lock if it needs to do so. (The caller must
2280  * ensure that the tln is no longer on any lists so no one else will mess
2281  * with this while the lock is dropped!)
2282  */
2283 static int
2284 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how)
2285 {
2286 	struct tcp_log_dev_log_queue *entry;
2287 	struct tcp_log_id_bucket *tlb;
2288 
2289 	tlb = tln->tln_bucket;
2290 	TCPID_BUCKET_LOCK_ASSERT(tlb);
2291 	KASSERT(tlb->tlb_refcnt > 0,
2292 	    ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)",
2293 	    __func__, __LINE__, tln, tlb));
2294 	KASSERT(tln->tln_closed,
2295 	    ("%s:%d: Called for node with tln_closed==false (tln=%p)",
2296 	    __func__, __LINE__, tln));
2297 
2298 	/* If there are no log entries, there is nothing to do. */
2299 	if (tln->tln_count == 0)
2300 		return (0);
2301 
2302 	/*
2303 	 * Allocate memory. If we must wait, we'll need to drop the locks
2304 	 * and reacquire them (and do all the related business that goes
2305 	 * along with that).
2306 	 */
2307 	entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV,
2308 	    M_NOWAIT);
2309 	if (entry == NULL && (how & M_NOWAIT))
2310 		return (ENOBUFS);
2311 	if (entry == NULL) {
2312 		TCPID_BUCKET_UNLOCK(tlb);
2313 		entry = malloc(sizeof(struct tcp_log_dev_log_queue),
2314 		    M_TCPLOGDEV, M_WAITOK);
2315 		TCPID_BUCKET_LOCK(tlb);
2316 	}
2317 
2318 	/* Fill in the common parts of the queue entry.. */
2319 	entry->tldl_common.tldq_buf = NULL;
2320 	entry->tldl_common.tldq_xform = tcp_log_expandlogbuf;
2321 	entry->tldl_common.tldq_dtor = tcp_log_free_queue;
2322 
2323 	/* Fill in the unique parts of the queue entry. */
2324 	strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN);
2325 	strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN);
2326 	if (reason != NULL)
2327 		strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN);
2328 	else
2329 		strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN);
2330 	entry->tldl_ie = tln->tln_ie;
2331 	entry->tldl_entries = tln->tln_entries;
2332 	entry->tldl_count = tln->tln_count;
2333 	entry->tldl_af = tln->tln_af;
2334 
2335 	/* Add the entry. If no one is listening, free the entry. */
2336 	if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry))
2337 		tcp_log_free_queue((struct tcp_log_dev_queue *)entry);
2338 
2339 	return (0);
2340 }
2341 
2342 /*
2343  * Queue the log buffers for all sessions in a bucket for transmissions via
2344  * the log buffer facility.
2345  *
2346  * NOTE: This should be called with a locked bucket; however, the function
2347  * will drop the lock.
2348  */
2349 #define	LOCAL_SAVE	10
2350 static void
2351 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
2352 {
2353 	struct tcp_log_id_node local_entries[LOCAL_SAVE];
2354 	struct inpcb *inp;
2355 	struct tcpcb *tp;
2356 	struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln;
2357 	int i, num_local_entries, tree_locked;
2358 	bool expireq_locked;
2359 
2360 	TCPID_BUCKET_LOCK_ASSERT(tlb);
2361 
2362 	/*
2363 	 * Take a reference on the bucket to keep it from disappearing until
2364 	 * we are done.
2365 	 */
2366 	TCPID_BUCKET_REF(tlb);
2367 
2368 	/*
2369 	 * We'll try to create these without dropping locks. However, we
2370 	 * might very well need to drop locks to get memory. If that's the
2371 	 * case, we'll save up to 10 on the stack, and sacrifice the rest.
2372 	 * (Otherwise, we need to worry about finding our place again in a
2373 	 * potentially changed list. It just doesn't seem worth the trouble
2374 	 * to do that.
2375 	 */
2376 	expireq_locked = false;
2377 	num_local_entries = 0;
2378 	prev_tln = NULL;
2379 	tree_locked = TREE_UNLOCKED;
2380 	SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) {
2381 		/*
2382 		 * If this isn't associated with a TCPCB, we can pull it off
2383 		 * the list now. We need to be careful that the expire timer
2384 		 * hasn't already taken ownership (tln_expiretime == SBT_MAX).
2385 		 * If so, we let the expire timer code free the data.
2386 		 */
2387 		if (cur_tln->tln_closed) {
2388 no_inp:
2389 			/*
2390 			 * Get the expireq lock so we can get a consistent
2391 			 * read of tln_expiretime and so we can remove this
2392 			 * from the expireq.
2393 			 */
2394 			if (!expireq_locked) {
2395 				TCPLOG_EXPIREQ_LOCK();
2396 				expireq_locked = true;
2397 			}
2398 
2399 			/*
2400 			 * We ignore entries with tln_expiretime == SBT_MAX.
2401 			 * The expire timer code already owns those.
2402 			 */
2403 			KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0,
2404 			    ("%s:%d: node on the expire queue without positive "
2405 			    "expire time", __func__, __LINE__));
2406 			if (cur_tln->tln_expiretime == SBT_MAX) {
2407 				prev_tln = cur_tln;
2408 				continue;
2409 			}
2410 
2411 			/* Remove the entry from the expireq. */
2412 			STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln,
2413 			    tcp_log_id_node, tln_expireq);
2414 
2415 			/* Remove the entry from the bucket. */
2416 			if (prev_tln != NULL)
2417 				SLIST_REMOVE_AFTER(prev_tln, tln_list);
2418 			else
2419 				SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list);
2420 
2421 			/*
2422 			 * Drop the INP and bucket reference counts. Due to
2423 			 * lock-ordering rules, we need to drop the expire
2424 			 * queue lock.
2425 			 */
2426 			TCPLOG_EXPIREQ_UNLOCK();
2427 			expireq_locked = false;
2428 
2429 			/* Drop the INP reference. */
2430 			INP_WLOCK(cur_tln->tln_inp);
2431 			if (!in_pcbrele_wlocked(cur_tln->tln_inp))
2432 				INP_WUNLOCK(cur_tln->tln_inp);
2433 
2434 			if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2435 #ifdef INVARIANTS
2436 				panic("%s: Bucket refcount unexpectedly 0.",
2437 				    __func__);
2438 #endif
2439 				/*
2440 				 * Recover as best we can: free the entry we
2441 				 * own.
2442 				 */
2443 				tcp_log_free_entries(&cur_tln->tln_entries,
2444 				    &cur_tln->tln_count);
2445 				uma_zfree(tcp_log_id_node_zone, cur_tln);
2446 				goto done;
2447 			}
2448 
2449 			if (tcp_log_dump_node_logbuf(cur_tln, reason,
2450 			    M_NOWAIT)) {
2451 				/*
2452 				 * If we have sapce, save the entries locally.
2453 				 * Otherwise, free them.
2454 				 */
2455 				if (num_local_entries < LOCAL_SAVE) {
2456 					local_entries[num_local_entries] =
2457 					    *cur_tln;
2458 					num_local_entries++;
2459 				} else {
2460 					tcp_log_free_entries(
2461 					    &cur_tln->tln_entries,
2462 					    &cur_tln->tln_count);
2463 				}
2464 			}
2465 
2466 			/* No matter what, we are done with the node now. */
2467 			uma_zfree(tcp_log_id_node_zone, cur_tln);
2468 
2469 			/*
2470 			 * Because we removed this entry from the list, prev_tln
2471 			 * (which tracks the previous entry still on the tlb
2472 			 * list) remains unchanged.
2473 			 */
2474 			continue;
2475 		}
2476 
2477 		/*
2478 		 * If we get to this point, the session data is still held in
2479 		 * the TCPCB. So, we need to pull the data out of that.
2480 		 *
2481 		 * We will need to drop the expireq lock so we can lock the INP.
2482 		 * We can then try to extract the data the "easy" way. If that
2483 		 * fails, we'll save the log entries for later.
2484 		 */
2485 		if (expireq_locked) {
2486 			TCPLOG_EXPIREQ_UNLOCK();
2487 			expireq_locked = false;
2488 		}
2489 
2490 		/* Lock the INP and then re-check the state. */
2491 		inp = cur_tln->tln_inp;
2492 		INP_WLOCK(inp);
2493 		/*
2494 		 * If we caught this while it was transitioning, the data
2495 		 * might have moved from the TCPCB to the tln (signified by
2496 		 * setting tln_closed to true. If so, treat this like an
2497 		 * inactive connection.
2498 		 */
2499 		if (cur_tln->tln_closed) {
2500 			/*
2501 			 * It looks like we may have caught this connection
2502 			 * while it was transitioning from active to inactive.
2503 			 * Treat this like an inactive connection.
2504 			 */
2505 			INP_WUNLOCK(inp);
2506 			goto no_inp;
2507 		}
2508 
2509 		/*
2510 		 * Try to dump the data from the tp without dropping the lock.
2511 		 * If this fails, try to save off the data locally.
2512 		 */
2513 		tp = cur_tln->tln_tp;
2514 		if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) &&
2515 		    num_local_entries < LOCAL_SAVE) {
2516 			tcp_log_move_tp_to_node(tp,
2517 			    &local_entries[num_local_entries]);
2518 			local_entries[num_local_entries].tln_closed = 1;
2519 			KASSERT(local_entries[num_local_entries].tln_bucket ==
2520 			    tlb, ("%s: %d: bucket mismatch for node %p",
2521 			    __func__, __LINE__, cur_tln));
2522 			num_local_entries++;
2523 		}
2524 
2525 		INP_WUNLOCK(inp);
2526 
2527 		/*
2528 		 * We are goint to leave the current tln on the list. It will
2529 		 * become the previous tln.
2530 		 */
2531 		prev_tln = cur_tln;
2532 	}
2533 
2534 	/* Drop our locks, if any. */
2535 	KASSERT(tree_locked == TREE_UNLOCKED,
2536 	    ("%s: %d: tree unexpectedly locked", __func__, __LINE__));
2537 	switch (tree_locked) {
2538 	case TREE_WLOCKED:
2539 		TCPID_TREE_WUNLOCK();
2540 		tree_locked = TREE_UNLOCKED;
2541 		break;
2542 	case TREE_RLOCKED:
2543 		TCPID_TREE_RUNLOCK();
2544 		tree_locked = TREE_UNLOCKED;
2545 		break;
2546 	}
2547 	if (expireq_locked) {
2548 		TCPLOG_EXPIREQ_UNLOCK();
2549 		expireq_locked = false;
2550 	}
2551 
2552 	/*
2553 	 * Try again for any saved entries. tcp_log_dump_node_logbuf() is
2554 	 * guaranteed to free the log entries within the node. And, since
2555 	 * the node itself is on our stack, we don't need to free it.
2556 	 */
2557 	for (i = 0; i < num_local_entries; i++)
2558 		tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK);
2559 
2560 	/* Drop our reference. */
2561 	if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
2562 		TCPID_BUCKET_UNLOCK(tlb);
2563 
2564 done:
2565 	/* Drop our locks, if any. */
2566 	switch (tree_locked) {
2567 	case TREE_WLOCKED:
2568 		TCPID_TREE_WUNLOCK();
2569 		break;
2570 	case TREE_RLOCKED:
2571 		TCPID_TREE_RUNLOCK();
2572 		break;
2573 	}
2574 	if (expireq_locked)
2575 		TCPLOG_EXPIREQ_UNLOCK();
2576 }
2577 #undef	LOCAL_SAVE
2578 
2579 /*
2580  * Queue the log buffers for all sessions in a bucket for transmissions via
2581  * the log buffer facility.
2582  *
2583  * NOTE: This should be called with a locked INP; however, the function
2584  * will drop the lock.
2585  */
2586 void
2587 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
2588 {
2589 	struct inpcb *inp = tptoinpcb(tp);
2590 	struct tcp_log_id_bucket *tlb;
2591 	int tree_locked;
2592 
2593 	/* Figure out our bucket and lock it. */
2594 	INP_WLOCK_ASSERT(inp);
2595 	tlb = tp->t_lib;
2596 	if (tlb == NULL) {
2597 		/*
2598 		 * No bucket; treat this like a request to dump a single
2599 		 * session's traces.
2600 		 */
2601 		(void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
2602 		INP_WUNLOCK(inp);
2603 		return;
2604 	}
2605 	TCPID_BUCKET_REF(tlb);
2606 	INP_WUNLOCK(inp);
2607 	TCPID_BUCKET_LOCK(tlb);
2608 
2609 	/* If we are the last reference, we have nothing more to do here. */
2610 	tree_locked = TREE_UNLOCKED;
2611 	if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) {
2612 		switch (tree_locked) {
2613 		case TREE_WLOCKED:
2614 			TCPID_TREE_WUNLOCK();
2615 			break;
2616 		case TREE_RLOCKED:
2617 			TCPID_TREE_RUNLOCK();
2618 			break;
2619 		}
2620 		return;
2621 	}
2622 
2623 	/* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */
2624 	tcp_log_dumpbucketlogs(tlb, reason);
2625 }
2626 
2627 /*
2628  * Mark the end of a flow with the current stack. A stack can add
2629  * stack-specific info to this trace event by overriding this
2630  * function (see bbr_log_flowend() for example).
2631  */
2632 void
2633 tcp_log_flowend(struct tcpcb *tp)
2634 {
2635 	if (tp->t_logstate != TCP_LOG_STATE_OFF) {
2636 		struct socket *so = tptosocket(tp);
2637 		TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
2638 				TCP_LOG_FLOWEND, 0, 0, NULL, false);
2639 	}
2640 }
2641