1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016-2018 Netflix, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/arb.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mutex.h> 38 #include <sys/qmath.h> 39 #include <sys/queue.h> 40 #include <sys/refcount.h> 41 #include <sys/rwlock.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/sysctl.h> 45 #include <sys/tree.h> 46 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 47 #include <sys/counter.h> 48 49 #include <dev/tcp_log/tcp_log_dev.h> 50 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/vnet.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_pcb.h> 57 #include <netinet/in_var.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/tcp_log_buf.h> 60 61 /* Default expiry time */ 62 #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S) 63 64 /* Max interval at which to run the expiry timer */ 65 #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S) 66 67 bool tcp_log_verbose; 68 static uma_zone_t tcp_log_id_bucket_zone, tcp_log_id_node_zone, tcp_log_zone; 69 static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT; 70 static uint32_t tcp_log_version = TCP_LOG_BUF_VER; 71 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket); 72 static struct tcp_log_id_tree tcp_log_id_head; 73 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head = 74 STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head); 75 static struct mtx tcp_log_expireq_mtx; 76 static struct callout tcp_log_expireq_callout; 77 static u_long tcp_log_auto_ratio = 0; 78 static volatile u_long tcp_log_auto_ratio_cur = 0; 79 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL; 80 static bool tcp_log_auto_all = false; 81 static uint32_t tcp_disable_all_bb_logs = 0; 82 83 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 84 85 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 86 "TCP Black Box controls"); 87 88 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose, 89 0, "Force verbose logging for TCP traces"); 90 91 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit, 92 CTLFLAG_RW, &tcp_log_session_limit, 0, 93 "Maximum number of events maintained for each TCP session"); 94 95 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW, 96 &tcp_log_zone, "Maximum number of events maintained for all TCP sessions"); 97 98 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD, 99 &tcp_log_zone, "Current number of events maintained for all TCP sessions"); 100 101 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW, 102 &tcp_log_id_bucket_zone, "Maximum number of log IDs"); 103 104 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD, 105 &tcp_log_id_bucket_zone, "Current number of log IDs"); 106 107 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW, 108 &tcp_log_id_node_zone, "Maximum number of tcpcbs with log IDs"); 109 110 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD, 111 &tcp_log_id_node_zone, "Current number of tcpcbs with log IDs"); 112 113 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version, 114 0, "Version of log formats exported"); 115 116 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW, 117 &tcp_disable_all_bb_logs, TCP_LOG_STATE_HEAD_AUTO, 118 "Disable all BB logging for all connections"); 119 120 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW, 121 &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions"); 122 123 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW, 124 &tcp_log_auto_mode, TCP_LOG_STATE_HEAD_AUTO, 125 "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_HEAD_AUTO)"); 126 127 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW, 128 &tcp_log_auto_all, false, 129 "Auto-select from all sessions (rather than just those with IDs)"); 130 131 #ifdef TCPLOG_DEBUG_COUNTERS 132 counter_u64_t tcp_log_queued; 133 counter_u64_t tcp_log_que_fail1; 134 counter_u64_t tcp_log_que_fail2; 135 counter_u64_t tcp_log_que_fail3; 136 counter_u64_t tcp_log_que_fail4; 137 counter_u64_t tcp_log_que_fail5; 138 counter_u64_t tcp_log_que_copyout; 139 counter_u64_t tcp_log_que_read; 140 counter_u64_t tcp_log_que_freed; 141 142 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD, 143 &tcp_log_queued, "Number of entries queued"); 144 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD, 145 &tcp_log_que_fail1, "Number of entries queued but fail 1"); 146 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD, 147 &tcp_log_que_fail2, "Number of entries queued but fail 2"); 148 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD, 149 &tcp_log_que_fail3, "Number of entries queued but fail 3"); 150 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD, 151 &tcp_log_que_fail4, "Number of entries queued but fail 4"); 152 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD, 153 &tcp_log_que_fail5, "Number of entries queued but fail 4"); 154 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD, 155 &tcp_log_que_copyout, "Number of entries copied out"); 156 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD, 157 &tcp_log_que_read, "Number of entries read from the queue"); 158 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD, 159 &tcp_log_que_freed, "Number of entries freed after reading"); 160 #endif 161 162 #ifdef INVARIANTS 163 #define TCPLOG_DEBUG_RINGBUF 164 #endif 165 /* Number of requests to consider a PBCID "active". */ 166 #define ACTIVE_REQUEST_COUNT 10 167 168 /* Statistic tracking for "active" PBCIDs. */ 169 static counter_u64_t tcp_log_pcb_ids_cur; 170 static counter_u64_t tcp_log_pcb_ids_tot; 171 172 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD, 173 &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system"); 174 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD, 175 &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated"); 176 177 struct tcp_log_mem 178 { 179 STAILQ_ENTRY(tcp_log_mem) tlm_queue; 180 struct tcp_log_buffer tlm_buf; 181 struct tcp_log_verbose tlm_v; 182 #ifdef TCPLOG_DEBUG_RINGBUF 183 volatile int tlm_refcnt; 184 #endif 185 }; 186 187 /* 60 bytes for the header, + 16 bytes for padding */ 188 static uint8_t zerobuf[76]; 189 190 /* 191 * Lock order: 192 * 1. TCPID_TREE 193 * 2. TCPID_BUCKET 194 * 3. INP 195 * 196 * Rules: 197 * A. You need a lock on the Tree to add/remove buckets. 198 * B. You need a lock on the bucket to add/remove nodes from the bucket. 199 * C. To change information in a node, you need the INP lock if the tln_closed 200 * field is false. Otherwise, you need the bucket lock. (Note that the 201 * tln_closed field can change at any point, so you need to recheck the 202 * entry after acquiring the INP lock.) 203 * D. To remove a node from the bucket, you must have that entry locked, 204 * according to the criteria of Rule C. Also, the node must not be on 205 * the expiry queue. 206 * E. The exception to C is the expiry queue fields, which are locked by 207 * the TCPLOG_EXPIREQ lock. 208 * 209 * Buckets have a reference count. Each node is a reference. Further, 210 * other callers may add reference counts to keep a bucket from disappearing. 211 * You can add a reference as long as you own a lock sufficient to keep the 212 * bucket from disappearing. For example, a common use is: 213 * a. Have a locked INP, but need to lock the TCPID_BUCKET. 214 * b. Add a refcount on the bucket. (Safe because the INP lock prevents 215 * the TCPID_BUCKET from going away.) 216 * c. Drop the INP lock. 217 * d. Acquire a lock on the TCPID_BUCKET. 218 * e. Acquire a lock on the INP. 219 * f. Drop the refcount on the bucket. 220 * (At this point, the bucket may disappear.) 221 * 222 * Expire queue lock: 223 * You can acquire this with either the bucket or INP lock. Don't reverse it. 224 * When the expire code has committed to freeing a node, it resets the expiry 225 * time to SBT_MAX. That is the signal to everyone else that they should 226 * leave that node alone. 227 */ 228 static struct rwlock tcp_id_tree_lock; 229 #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock) 230 #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock) 231 #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock) 232 #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock) 233 #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock) 234 #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED) 235 #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED) 236 #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED) 237 238 #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF) 239 #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx)) 240 #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx)) 241 #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx)) 242 #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED) 243 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED) 244 245 #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt)) 246 #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt)) 247 248 #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx) 249 #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx) 250 251 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node); 252 253 struct tcp_log_id_bucket 254 { 255 /* 256 * tlb_id must be first. This lets us use strcmp on 257 * (struct tcp_log_id_bucket *) and (char *) interchangeably. 258 */ 259 char tlb_id[TCP_LOG_ID_LEN]; 260 char tlb_tag[TCP_LOG_TAG_LEN]; 261 RB_ENTRY(tcp_log_id_bucket) tlb_rb; 262 struct tcp_log_id_head tlb_head; 263 struct mtx tlb_mtx; 264 volatile u_int tlb_refcnt; 265 volatile u_int tlb_reqcnt; 266 uint32_t tlb_loglimit; 267 uint8_t tlb_logstate; 268 }; 269 270 struct tcp_log_id_node 271 { 272 SLIST_ENTRY(tcp_log_id_node) tln_list; 273 STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */ 274 sbintime_t tln_expiretime; /* Locked by the expireq lock */ 275 276 /* 277 * If INP is NULL, that means the connection has closed. We've 278 * saved the connection endpoint information and the log entries 279 * in the tln_ie and tln_entries members. We've also saved a pointer 280 * to the enclosing bucket here. If INP is not NULL, the information is 281 * in the PCB and not here. 282 */ 283 struct inpcb *tln_inp; 284 struct tcpcb *tln_tp; 285 struct tcp_log_id_bucket *tln_bucket; 286 struct in_endpoints tln_ie; 287 struct tcp_log_stailq tln_entries; 288 int tln_count; 289 volatile int tln_closed; 290 uint8_t tln_af; 291 }; 292 293 enum tree_lock_state { 294 TREE_UNLOCKED = 0, 295 TREE_RLOCKED, 296 TREE_WLOCKED, 297 }; 298 299 /* Do we want to select this session for auto-logging? */ 300 static __inline bool 301 tcp_log_selectauto(void) 302 { 303 304 /* 305 * If we are doing auto-capturing, figure out whether we will capture 306 * this session. 307 */ 308 if (tcp_log_auto_ratio && 309 (tcp_disable_all_bb_logs == 0) && 310 (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) % 311 tcp_log_auto_ratio) == 0) 312 return (true); 313 return (false); 314 } 315 316 static __inline int 317 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b) 318 { 319 KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL")); 320 KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL")); 321 return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN); 322 } 323 324 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 325 326 static __inline void 327 tcp_log_id_validate_tree_lock(int tree_locked) 328 { 329 330 #ifdef INVARIANTS 331 switch (tree_locked) { 332 case TREE_WLOCKED: 333 TCPID_TREE_WLOCK_ASSERT(); 334 break; 335 case TREE_RLOCKED: 336 TCPID_TREE_RLOCK_ASSERT(); 337 break; 338 case TREE_UNLOCKED: 339 TCPID_TREE_UNLOCK_ASSERT(); 340 break; 341 default: 342 kassert_panic("%s:%d: unknown tree lock state", __func__, 343 __LINE__); 344 } 345 #endif 346 } 347 348 static __inline void 349 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb) 350 { 351 352 TCPID_TREE_WLOCK_ASSERT(); 353 KASSERT(SLIST_EMPTY(&tlb->tlb_head), 354 ("%s: Attempt to remove non-empty bucket", __func__)); 355 if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) { 356 #ifdef INVARIANTS 357 kassert_panic("%s:%d: error removing element from tree", 358 __func__, __LINE__); 359 #endif 360 } 361 TCPID_BUCKET_LOCK_DESTROY(tlb); 362 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); 363 uma_zfree(tcp_log_id_bucket_zone, tlb); 364 } 365 366 /* 367 * Call with a referenced and locked bucket. 368 * Will return true if the bucket was freed; otherwise, false. 369 * tlb: The bucket to unreference. 370 * tree_locked: A pointer to the state of the tree lock. If the tree lock 371 * state changes, the function will update it. 372 * inp: If not NULL and the function needs to drop the inp lock to relock the 373 * tree, it will do so. (The caller must ensure inp will not become invalid, 374 * probably by holding a reference to it.) 375 */ 376 static bool 377 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked, 378 struct inpcb *inp) 379 { 380 381 KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__)); 382 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 383 __func__)); 384 385 tcp_log_id_validate_tree_lock(*tree_locked); 386 387 /* 388 * Did we hold the last reference on the tlb? If so, we may need 389 * to free it. (Note that we can realistically only execute the 390 * loop twice: once without a write lock and once with a write 391 * lock.) 392 */ 393 while (TCPID_BUCKET_UNREF(tlb)) { 394 /* 395 * We need a write lock on the tree to free this. 396 * If we can upgrade the tree lock, this is "easy". If we 397 * can't upgrade the tree lock, we need to do this the 398 * "hard" way: unwind all our locks and relock everything. 399 * In the meantime, anything could have changed. We even 400 * need to validate that we still need to free the bucket. 401 */ 402 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE()) 403 *tree_locked = TREE_WLOCKED; 404 else if (*tree_locked != TREE_WLOCKED) { 405 TCPID_BUCKET_REF(tlb); 406 if (inp != NULL) 407 INP_WUNLOCK(inp); 408 TCPID_BUCKET_UNLOCK(tlb); 409 if (*tree_locked == TREE_RLOCKED) 410 TCPID_TREE_RUNLOCK(); 411 TCPID_TREE_WLOCK(); 412 *tree_locked = TREE_WLOCKED; 413 TCPID_BUCKET_LOCK(tlb); 414 if (inp != NULL) 415 INP_WLOCK(inp); 416 continue; 417 } 418 419 /* 420 * We have an empty bucket and a write lock on the tree. 421 * Remove the empty bucket. 422 */ 423 tcp_log_remove_bucket(tlb); 424 return (true); 425 } 426 return (false); 427 } 428 429 /* 430 * Call with a locked bucket. This function will release the lock on the 431 * bucket before returning. 432 * 433 * The caller is responsible for freeing the tp->t_lin/tln node! 434 * 435 * Note: one of tp or both tlb and tln must be supplied. 436 * 437 * inp: A pointer to the inp. If the function needs to drop the inp lock to 438 * acquire the tree write lock, it will do so. (The caller must ensure inp 439 * will not become invalid, probably by holding a reference to it.) 440 * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored) 441 * tlb: A pointer to the bucket. (optional; ignored if tp is specified) 442 * tln: A pointer to the node. (optional; ignored if tp is specified) 443 * tree_locked: A pointer to the state of the tree lock. If the tree lock 444 * state changes, the function will update it. 445 * 446 * Will return true if the INP lock was reacquired; otherwise, false. 447 */ 448 static bool 449 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp, 450 struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln, 451 int *tree_locked) 452 { 453 int orig_tree_locked; 454 455 KASSERT(tp != NULL || (tlb != NULL && tln != NULL), 456 ("%s: called with tp=%p, tlb=%p, tln=%p", __func__, 457 tp, tlb, tln)); 458 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 459 __func__)); 460 461 if (tp != NULL) { 462 tlb = tp->t_lib; 463 tln = tp->t_lin; 464 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__)); 465 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__)); 466 } 467 468 tcp_log_id_validate_tree_lock(*tree_locked); 469 TCPID_BUCKET_LOCK_ASSERT(tlb); 470 471 /* 472 * Remove the node, clear the log bucket and node from the TCPCB, and 473 * decrement the bucket refcount. In the process, if this is the 474 * last reference, the bucket will be freed. 475 */ 476 SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list); 477 if (tp != NULL) { 478 tp->t_lib = NULL; 479 tp->t_lin = NULL; 480 } 481 orig_tree_locked = *tree_locked; 482 if (!tcp_log_unref_bucket(tlb, tree_locked, inp)) 483 TCPID_BUCKET_UNLOCK(tlb); 484 return (*tree_locked != orig_tree_locked); 485 } 486 487 #define RECHECK_INP_CLEAN(cleanup) do { \ 488 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { \ 489 rv = ECONNRESET; \ 490 cleanup; \ 491 goto done; \ 492 } \ 493 tp = intotcpcb(inp); \ 494 } while (0) 495 496 #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */) 497 498 static void 499 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp) 500 { 501 502 INP_WLOCK_ASSERT(tp->t_inpcb); 503 504 #ifdef STATS 505 if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL) 506 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id)); 507 #endif 508 } 509 510 static void 511 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb) 512 { 513 514 atomic_fetchadd_int(&tlb->tlb_reqcnt, 1); 515 } 516 517 /* 518 * Associate the specified tag with a particular TCP log ID. 519 * Called with INPCB locked. Returns with it unlocked. 520 * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID. 521 */ 522 int 523 tcp_log_set_tag(struct tcpcb *tp, char *tag) 524 { 525 struct tcp_log_id_bucket *tlb; 526 int tree_locked; 527 528 INP_WLOCK_ASSERT(tp->t_inpcb); 529 530 tree_locked = TREE_UNLOCKED; 531 tlb = tp->t_lib; 532 if (tlb == NULL) { 533 INP_WUNLOCK(tp->t_inpcb); 534 return (EOPNOTSUPP); 535 } 536 537 TCPID_BUCKET_REF(tlb); 538 INP_WUNLOCK(tp->t_inpcb); 539 TCPID_BUCKET_LOCK(tlb); 540 strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN); 541 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 542 TCPID_BUCKET_UNLOCK(tlb); 543 544 if (tree_locked == TREE_WLOCKED) { 545 TCPID_TREE_WLOCK_ASSERT(); 546 TCPID_TREE_WUNLOCK(); 547 } else if (tree_locked == TREE_RLOCKED) { 548 TCPID_TREE_RLOCK_ASSERT(); 549 TCPID_TREE_RUNLOCK(); 550 } else 551 TCPID_TREE_UNLOCK_ASSERT(); 552 553 return (0); 554 } 555 556 /* 557 * Set the TCP log ID for a TCPCB. 558 * Called with INPCB locked. Returns with it unlocked. 559 */ 560 int 561 tcp_log_set_id(struct tcpcb *tp, char *id) 562 { 563 struct tcp_log_id_bucket *tlb, *tmp_tlb; 564 struct tcp_log_id_node *tln; 565 struct inpcb *inp; 566 int tree_locked, rv; 567 bool bucket_locked; 568 569 tlb = NULL; 570 tln = NULL; 571 inp = tp->t_inpcb; 572 tree_locked = TREE_UNLOCKED; 573 bucket_locked = false; 574 575 restart: 576 INP_WLOCK_ASSERT(inp); 577 578 /* See if the ID is unchanged. */ 579 if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) || 580 (tp->t_lib == NULL && *id == 0)) { 581 if (tp->t_lib != NULL) { 582 tcp_log_increment_reqcnt(tp->t_lib); 583 if ((tp->t_lib->tlb_logstate) && 584 (tp->t_log_state_set == 0)) { 585 /* Clone in any logging */ 586 587 tp->t_logstate = tp->t_lib->tlb_logstate; 588 } 589 if ((tp->t_lib->tlb_loglimit) && 590 (tp->t_log_state_set == 0)) { 591 /* We also have a limit set */ 592 593 tp->t_loglimit = tp->t_lib->tlb_loglimit; 594 } 595 } 596 rv = 0; 597 goto done; 598 } 599 600 /* 601 * If the TCPCB had a previous ID, we need to extricate it from 602 * the previous list. 603 * 604 * Drop the TCPCB lock and lock the tree and the bucket. 605 * Because this is called in the socket context, we (theoretically) 606 * don't need to worry about the INPCB completely going away 607 * while we are gone. 608 */ 609 if (tp->t_lib != NULL) { 610 tlb = tp->t_lib; 611 TCPID_BUCKET_REF(tlb); 612 INP_WUNLOCK(inp); 613 614 if (tree_locked == TREE_UNLOCKED) { 615 TCPID_TREE_RLOCK(); 616 tree_locked = TREE_RLOCKED; 617 } 618 TCPID_BUCKET_LOCK(tlb); 619 bucket_locked = true; 620 INP_WLOCK(inp); 621 622 /* 623 * Unreference the bucket. If our bucket went away, it is no 624 * longer locked or valid. 625 */ 626 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) { 627 bucket_locked = false; 628 tlb = NULL; 629 } 630 631 /* Validate the INP. */ 632 RECHECK_INP(); 633 634 /* 635 * Evaluate whether the bucket changed while we were unlocked. 636 * 637 * Possible scenarios here: 638 * 1. Bucket is unchanged and the same one we started with. 639 * 2. The TCPCB no longer has a bucket and our bucket was 640 * freed. 641 * 3. The TCPCB has a new bucket, whether ours was freed. 642 * 4. The TCPCB no longer has a bucket and our bucket was 643 * not freed. 644 * 645 * In cases 2-4, we will start over. In case 1, we will 646 * proceed here to remove the bucket. 647 */ 648 if (tlb == NULL || tp->t_lib != tlb) { 649 KASSERT(bucket_locked || tlb == NULL, 650 ("%s: bucket_locked (%d) and tlb (%p) are " 651 "inconsistent", __func__, bucket_locked, tlb)); 652 653 if (bucket_locked) { 654 TCPID_BUCKET_UNLOCK(tlb); 655 bucket_locked = false; 656 tlb = NULL; 657 } 658 goto restart; 659 } 660 661 /* 662 * Store the (struct tcp_log_id_node) for reuse. Then, remove 663 * it from the bucket. In the process, we may end up relocking. 664 * If so, we need to validate that the INP is still valid, and 665 * the TCPCB entries match we expect. 666 * 667 * We will clear tlb and change the bucket_locked state just 668 * before calling tcp_log_remove_id_node(), since that function 669 * will unlock the bucket. 670 */ 671 if (tln != NULL) 672 uma_zfree(tcp_log_id_node_zone, tln); 673 tln = tp->t_lin; 674 tlb = NULL; 675 bucket_locked = false; 676 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) { 677 RECHECK_INP(); 678 679 /* 680 * If the TCPCB moved to a new bucket while we had 681 * dropped the lock, restart. 682 */ 683 if (tp->t_lib != NULL || tp->t_lin != NULL) 684 goto restart; 685 } 686 687 /* 688 * Yay! We successfully removed the TCPCB from its old 689 * bucket. Phew! 690 * 691 * On to bigger and better things... 692 */ 693 } 694 695 /* At this point, the TCPCB should not be in any bucket. */ 696 KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__)); 697 698 /* 699 * If the new ID is not empty, we need to now assign this TCPCB to a 700 * new bucket. 701 */ 702 if (*id) { 703 /* Get a new tln, if we don't already have one to reuse. */ 704 if (tln == NULL) { 705 tln = uma_zalloc(tcp_log_id_node_zone, 706 M_NOWAIT | M_ZERO); 707 if (tln == NULL) { 708 rv = ENOBUFS; 709 goto done; 710 } 711 tln->tln_inp = inp; 712 tln->tln_tp = tp; 713 } 714 715 /* 716 * Drop the INP lock for a bit. We don't need it, and dropping 717 * it prevents lock order reversals. 718 */ 719 INP_WUNLOCK(inp); 720 721 /* Make sure we have at least a read lock on the tree. */ 722 tcp_log_id_validate_tree_lock(tree_locked); 723 if (tree_locked == TREE_UNLOCKED) { 724 TCPID_TREE_RLOCK(); 725 tree_locked = TREE_RLOCKED; 726 } 727 728 refind: 729 /* 730 * Remember that we constructed (struct tcp_log_id_node) so 731 * we can safely cast the id to it for the purposes of finding. 732 */ 733 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL", 734 __func__, __LINE__)); 735 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head, 736 (struct tcp_log_id_bucket *) id); 737 738 /* 739 * If we didn't find a matching bucket, we need to add a new 740 * one. This requires a write lock. But, of course, we will 741 * need to recheck some things when we re-acquire the lock. 742 */ 743 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) { 744 tree_locked = TREE_WLOCKED; 745 if (!TCPID_TREE_UPGRADE()) { 746 TCPID_TREE_RUNLOCK(); 747 TCPID_TREE_WLOCK(); 748 749 /* 750 * The tree may have changed while we were 751 * unlocked. 752 */ 753 goto refind; 754 } 755 } 756 757 /* If we need to add a new bucket, do it now. */ 758 if (tmp_tlb == NULL) { 759 /* Allocate new bucket. */ 760 tlb = uma_zalloc(tcp_log_id_bucket_zone, M_NOWAIT); 761 if (tlb == NULL) { 762 rv = ENOBUFS; 763 goto done_noinp; 764 } 765 counter_u64_add(tcp_log_pcb_ids_cur, 1); 766 counter_u64_add(tcp_log_pcb_ids_tot, 1); 767 768 if ((tcp_log_auto_all == false) && 769 tcp_log_auto_mode && 770 tcp_log_selectauto()) { 771 /* Save off the log state */ 772 tlb->tlb_logstate = tcp_log_auto_mode; 773 } else 774 tlb->tlb_logstate = TCP_LOG_STATE_OFF; 775 tlb->tlb_loglimit = 0; 776 tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */ 777 778 /* 779 * Copy the ID to the bucket. 780 * NB: Don't use strlcpy() unless you are sure 781 * we've always validated NULL termination. 782 * 783 * TODO: When I'm done writing this, see if we 784 * we have correctly validated NULL termination and 785 * can use strlcpy(). :-) 786 */ 787 strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1); 788 tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0'; 789 790 /* 791 * Take the refcount for the first node and go ahead 792 * and lock this. Note that we zero the tlb_mtx 793 * structure, since 0xdeadc0de flips the right bits 794 * for the code to think that this mutex has already 795 * been initialized. :-( 796 */ 797 SLIST_INIT(&tlb->tlb_head); 798 refcount_init(&tlb->tlb_refcnt, 1); 799 tlb->tlb_reqcnt = 1; 800 memset(&tlb->tlb_mtx, 0, sizeof(struct mtx)); 801 TCPID_BUCKET_LOCK_INIT(tlb); 802 TCPID_BUCKET_LOCK(tlb); 803 bucket_locked = true; 804 805 #define FREE_NEW_TLB() do { \ 806 TCPID_BUCKET_LOCK_DESTROY(tlb); \ 807 uma_zfree(tcp_log_id_bucket_zone, tlb); \ 808 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \ 809 counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \ 810 bucket_locked = false; \ 811 tlb = NULL; \ 812 } while (0) 813 /* 814 * Relock the INP and make sure we are still 815 * unassigned. 816 */ 817 INP_WLOCK(inp); 818 RECHECK_INP_CLEAN(FREE_NEW_TLB()); 819 if (tp->t_lib != NULL) { 820 FREE_NEW_TLB(); 821 goto restart; 822 } 823 824 /* Add the new bucket to the tree. */ 825 tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head, 826 tlb); 827 KASSERT(tmp_tlb == NULL, 828 ("%s: Unexpected conflicting bucket (%p) while " 829 "adding new bucket (%p)", __func__, tmp_tlb, tlb)); 830 831 /* 832 * If we found a conflicting bucket, free the new 833 * one we made and fall through to use the existing 834 * bucket. 835 */ 836 if (tmp_tlb != NULL) { 837 FREE_NEW_TLB(); 838 INP_WUNLOCK(inp); 839 } 840 #undef FREE_NEW_TLB 841 } 842 843 /* If we found an existing bucket, use it. */ 844 if (tmp_tlb != NULL) { 845 tlb = tmp_tlb; 846 TCPID_BUCKET_LOCK(tlb); 847 bucket_locked = true; 848 849 /* 850 * Relock the INP and make sure we are still 851 * unassigned. 852 */ 853 INP_UNLOCK_ASSERT(inp); 854 INP_WLOCK(inp); 855 RECHECK_INP(); 856 if (tp->t_lib != NULL) { 857 TCPID_BUCKET_UNLOCK(tlb); 858 bucket_locked = false; 859 tlb = NULL; 860 goto restart; 861 } 862 863 /* Take a reference on the bucket. */ 864 TCPID_BUCKET_REF(tlb); 865 866 /* Record the request. */ 867 tcp_log_increment_reqcnt(tlb); 868 } 869 870 tcp_log_grow_tlb(tlb->tlb_id, tp); 871 872 /* Add the new node to the list. */ 873 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list); 874 tp->t_lib = tlb; 875 tp->t_lin = tln; 876 if (tp->t_lib->tlb_logstate) { 877 /* Clone in any logging */ 878 879 tp->t_logstate = tp->t_lib->tlb_logstate; 880 } 881 if (tp->t_lib->tlb_loglimit) { 882 /* The loglimit too */ 883 884 tp->t_loglimit = tp->t_lib->tlb_loglimit; 885 } 886 tln = NULL; 887 } 888 889 rv = 0; 890 891 done: 892 /* Unlock things, as needed, and return. */ 893 INP_WUNLOCK(inp); 894 done_noinp: 895 INP_UNLOCK_ASSERT(inp); 896 if (bucket_locked) { 897 TCPID_BUCKET_LOCK_ASSERT(tlb); 898 TCPID_BUCKET_UNLOCK(tlb); 899 } else if (tlb != NULL) 900 TCPID_BUCKET_UNLOCK_ASSERT(tlb); 901 if (tree_locked == TREE_WLOCKED) { 902 TCPID_TREE_WLOCK_ASSERT(); 903 TCPID_TREE_WUNLOCK(); 904 } else if (tree_locked == TREE_RLOCKED) { 905 TCPID_TREE_RLOCK_ASSERT(); 906 TCPID_TREE_RUNLOCK(); 907 } else 908 TCPID_TREE_UNLOCK_ASSERT(); 909 if (tln != NULL) 910 uma_zfree(tcp_log_id_node_zone, tln); 911 return (rv); 912 } 913 914 /* 915 * Get the TCP log ID for a TCPCB. 916 * Called with INPCB locked. 917 * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long. 918 * Returns number of bytes copied. 919 */ 920 size_t 921 tcp_log_get_id(struct tcpcb *tp, char *buf) 922 { 923 size_t len; 924 925 INP_LOCK_ASSERT(tp->t_inpcb); 926 if (tp->t_lib != NULL) { 927 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 928 KASSERT(len < TCP_LOG_ID_LEN, 929 ("%s:%d: tp->t_lib->tlb_id too long (%zu)", 930 __func__, __LINE__, len)); 931 } else { 932 *buf = '\0'; 933 len = 0; 934 } 935 return (len); 936 } 937 938 /* 939 * Get the tag associated with the TCPCB's log ID. 940 * Called with INPCB locked. Returns with it unlocked. 941 * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long. 942 * Returns number of bytes copied. 943 */ 944 size_t 945 tcp_log_get_tag(struct tcpcb *tp, char *buf) 946 { 947 struct tcp_log_id_bucket *tlb; 948 size_t len; 949 int tree_locked; 950 951 INP_WLOCK_ASSERT(tp->t_inpcb); 952 953 tree_locked = TREE_UNLOCKED; 954 tlb = tp->t_lib; 955 956 if (tlb != NULL) { 957 TCPID_BUCKET_REF(tlb); 958 INP_WUNLOCK(tp->t_inpcb); 959 TCPID_BUCKET_LOCK(tlb); 960 len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN); 961 KASSERT(len < TCP_LOG_TAG_LEN, 962 ("%s:%d: tp->t_lib->tlb_tag too long (%zu)", 963 __func__, __LINE__, len)); 964 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 965 TCPID_BUCKET_UNLOCK(tlb); 966 967 if (tree_locked == TREE_WLOCKED) { 968 TCPID_TREE_WLOCK_ASSERT(); 969 TCPID_TREE_WUNLOCK(); 970 } else if (tree_locked == TREE_RLOCKED) { 971 TCPID_TREE_RLOCK_ASSERT(); 972 TCPID_TREE_RUNLOCK(); 973 } else 974 TCPID_TREE_UNLOCK_ASSERT(); 975 } else { 976 INP_WUNLOCK(tp->t_inpcb); 977 *buf = '\0'; 978 len = 0; 979 } 980 981 return (len); 982 } 983 984 /* 985 * Get number of connections with the same log ID. 986 * Log ID is taken from given TCPCB. 987 * Called with INPCB locked. 988 */ 989 u_int 990 tcp_log_get_id_cnt(struct tcpcb *tp) 991 { 992 993 INP_WLOCK_ASSERT(tp->t_inpcb); 994 return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt); 995 } 996 997 #ifdef TCPLOG_DEBUG_RINGBUF 998 /* 999 * Functions/macros to increment/decrement reference count for a log 1000 * entry. This should catch when we do a double-free/double-remove or 1001 * a double-add. 1002 */ 1003 static inline void 1004 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func, 1005 int line) 1006 { 1007 int refcnt; 1008 1009 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1); 1010 if (refcnt != 0) 1011 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)", 1012 func, line, log_entry, refcnt); 1013 } 1014 #define tcp_log_entry_refcnt_add(l) \ 1015 _tcp_log_entry_refcnt_add((l), __func__, __LINE__) 1016 1017 static inline void 1018 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func, 1019 int line) 1020 { 1021 int refcnt; 1022 1023 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1); 1024 if (refcnt != 1) 1025 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)", 1026 func, line, log_entry, refcnt); 1027 } 1028 #define tcp_log_entry_refcnt_rem(l) \ 1029 _tcp_log_entry_refcnt_rem((l), __func__, __LINE__) 1030 1031 #else /* !TCPLOG_DEBUG_RINGBUF */ 1032 1033 #define tcp_log_entry_refcnt_add(l) 1034 #define tcp_log_entry_refcnt_rem(l) 1035 1036 #endif 1037 1038 /* 1039 * Cleanup after removing a log entry, but only decrement the count if we 1040 * are running INVARIANTS. 1041 */ 1042 static inline void 1043 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused) 1044 { 1045 1046 uma_zfree(tcp_log_zone, log_entry); 1047 #ifdef INVARIANTS 1048 (*count)--; 1049 KASSERT(*count >= 0, 1050 ("%s: count unexpectedly negative", __func__)); 1051 #endif 1052 } 1053 1054 static void 1055 tcp_log_free_entries(struct tcp_log_stailq *head, int *count) 1056 { 1057 struct tcp_log_mem *log_entry; 1058 1059 /* Free the entries. */ 1060 while ((log_entry = STAILQ_FIRST(head)) != NULL) { 1061 STAILQ_REMOVE_HEAD(head, tlm_queue); 1062 tcp_log_entry_refcnt_rem(log_entry); 1063 tcp_log_free_log_common(log_entry, count); 1064 } 1065 } 1066 1067 /* Cleanup after removing a log entry. */ 1068 static inline void 1069 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry) 1070 { 1071 uma_zfree(tcp_log_zone, log_entry); 1072 tp->t_lognum--; 1073 KASSERT(tp->t_lognum >= 0, 1074 ("%s: tp->t_lognum unexpectedly negative", __func__)); 1075 } 1076 1077 /* Remove a log entry from the head of a list. */ 1078 static inline void 1079 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry) 1080 { 1081 1082 KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs), 1083 ("%s: attempt to remove non-HEAD log entry", __func__)); 1084 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 1085 tcp_log_entry_refcnt_rem(log_entry); 1086 tcp_log_remove_log_cleanup(tp, log_entry); 1087 } 1088 1089 #ifdef TCPLOG_DEBUG_RINGBUF 1090 /* 1091 * Initialize the log entry's reference count, which we want to 1092 * survive allocations. 1093 */ 1094 static int 1095 tcp_log_zone_init(void *mem, int size, int flags __unused) 1096 { 1097 struct tcp_log_mem *tlm; 1098 1099 KASSERT(size >= sizeof(struct tcp_log_mem), 1100 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1101 tlm = (struct tcp_log_mem *)mem; 1102 tlm->tlm_refcnt = 0; 1103 return (0); 1104 } 1105 1106 /* 1107 * Double check that the refcnt is zero on allocation and return. 1108 */ 1109 static int 1110 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused) 1111 { 1112 struct tcp_log_mem *tlm; 1113 1114 KASSERT(size >= sizeof(struct tcp_log_mem), 1115 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1116 tlm = (struct tcp_log_mem *)mem; 1117 if (tlm->tlm_refcnt != 0) 1118 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 1119 __func__, __LINE__, tlm, tlm->tlm_refcnt); 1120 return (0); 1121 } 1122 1123 static void 1124 tcp_log_zone_dtor(void *mem, int size, void *args __unused) 1125 { 1126 struct tcp_log_mem *tlm; 1127 1128 KASSERT(size >= sizeof(struct tcp_log_mem), 1129 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1130 tlm = (struct tcp_log_mem *)mem; 1131 if (tlm->tlm_refcnt != 0) 1132 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 1133 __func__, __LINE__, tlm, tlm->tlm_refcnt); 1134 } 1135 #endif /* TCPLOG_DEBUG_RINGBUF */ 1136 1137 /* Do global initialization. */ 1138 void 1139 tcp_log_init(void) 1140 { 1141 1142 tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem), 1143 #ifdef TCPLOG_DEBUG_RINGBUF 1144 tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init, 1145 #else 1146 NULL, NULL, NULL, 1147 #endif 1148 NULL, UMA_ALIGN_PTR, 0); 1149 (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT); 1150 tcp_log_id_bucket_zone = uma_zcreate("tcp_log_id_bucket", 1151 sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL, 1152 UMA_ALIGN_PTR, 0); 1153 tcp_log_id_node_zone = uma_zcreate("tcp_log_id_node", 1154 sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL, 1155 UMA_ALIGN_PTR, 0); 1156 #ifdef TCPLOG_DEBUG_COUNTERS 1157 tcp_log_queued = counter_u64_alloc(M_WAITOK); 1158 tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK); 1159 tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK); 1160 tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK); 1161 tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK); 1162 tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK); 1163 tcp_log_que_copyout = counter_u64_alloc(M_WAITOK); 1164 tcp_log_que_read = counter_u64_alloc(M_WAITOK); 1165 tcp_log_que_freed = counter_u64_alloc(M_WAITOK); 1166 #endif 1167 tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK); 1168 tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK); 1169 1170 rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW); 1171 mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF); 1172 callout_init(&tcp_log_expireq_callout, 1); 1173 } 1174 1175 /* Do per-TCPCB initialization. */ 1176 void 1177 tcp_log_tcpcbinit(struct tcpcb *tp) 1178 { 1179 1180 /* A new TCPCB should start out zero-initialized. */ 1181 STAILQ_INIT(&tp->t_logs); 1182 1183 /* 1184 * If we are doing auto-capturing, figure out whether we will capture 1185 * this session. 1186 */ 1187 tp->t_loglimit = tcp_log_session_limit; 1188 if ((tcp_log_auto_all == true) && 1189 tcp_log_auto_mode && 1190 tcp_log_selectauto()) { 1191 tp->t_logstate = tcp_log_auto_mode; 1192 tp->t_flags2 |= TF2_LOG_AUTO; 1193 } 1194 } 1195 1196 /* Remove entries */ 1197 static void 1198 tcp_log_expire(void *unused __unused) 1199 { 1200 struct tcp_log_id_bucket *tlb; 1201 struct tcp_log_id_node *tln; 1202 sbintime_t expiry_limit; 1203 int tree_locked; 1204 1205 TCPLOG_EXPIREQ_LOCK(); 1206 if (callout_pending(&tcp_log_expireq_callout)) { 1207 /* Callout was reset. */ 1208 TCPLOG_EXPIREQ_UNLOCK(); 1209 return; 1210 } 1211 1212 /* 1213 * Process entries until we reach one that expires too far in the 1214 * future. Look one second in the future. 1215 */ 1216 expiry_limit = getsbinuptime() + SBT_1S; 1217 tree_locked = TREE_UNLOCKED; 1218 1219 while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL && 1220 tln->tln_expiretime <= expiry_limit) { 1221 if (!callout_active(&tcp_log_expireq_callout)) { 1222 /* 1223 * Callout was stopped. I guess we should 1224 * just quit at this point. 1225 */ 1226 TCPLOG_EXPIREQ_UNLOCK(); 1227 return; 1228 } 1229 1230 /* 1231 * Remove the node from the head of the list and unlock 1232 * the list. Change the expiry time to SBT_MAX as a signal 1233 * to other threads that we now own this. 1234 */ 1235 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq); 1236 tln->tln_expiretime = SBT_MAX; 1237 TCPLOG_EXPIREQ_UNLOCK(); 1238 1239 /* 1240 * Remove the node from the bucket. 1241 */ 1242 tlb = tln->tln_bucket; 1243 TCPID_BUCKET_LOCK(tlb); 1244 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) { 1245 tcp_log_id_validate_tree_lock(tree_locked); 1246 if (tree_locked == TREE_WLOCKED) 1247 TCPID_TREE_WUNLOCK(); 1248 else 1249 TCPID_TREE_RUNLOCK(); 1250 tree_locked = TREE_UNLOCKED; 1251 } 1252 1253 /* Drop the INP reference. */ 1254 INP_WLOCK(tln->tln_inp); 1255 if (!in_pcbrele_wlocked(tln->tln_inp)) 1256 INP_WUNLOCK(tln->tln_inp); 1257 1258 /* Free the log records. */ 1259 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count); 1260 1261 /* Free the node. */ 1262 uma_zfree(tcp_log_id_node_zone, tln); 1263 1264 /* Relock the expiry queue. */ 1265 TCPLOG_EXPIREQ_LOCK(); 1266 } 1267 1268 /* 1269 * We've expired all the entries we can. Do we need to reschedule 1270 * ourselves? 1271 */ 1272 callout_deactivate(&tcp_log_expireq_callout); 1273 if (tln != NULL) { 1274 /* 1275 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and 1276 * set the next callout to that. (This helps ensure we generally 1277 * run the callout no more often than desired.) 1278 */ 1279 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL; 1280 if (expiry_limit < tln->tln_expiretime) 1281 expiry_limit = tln->tln_expiretime; 1282 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit, 1283 SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE); 1284 } 1285 1286 /* We're done. */ 1287 TCPLOG_EXPIREQ_UNLOCK(); 1288 return; 1289 } 1290 1291 /* 1292 * Move log data from the TCPCB to a new node. This will reset the TCPCB log 1293 * entries and log count; however, it will not touch other things from the 1294 * TCPCB (e.g. t_lin, t_lib). 1295 * 1296 * NOTE: Must hold a lock on the INP. 1297 */ 1298 static void 1299 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln) 1300 { 1301 1302 INP_WLOCK_ASSERT(tp->t_inpcb); 1303 1304 tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie; 1305 if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6) 1306 tln->tln_af = AF_INET6; 1307 else 1308 tln->tln_af = AF_INET; 1309 tln->tln_entries = tp->t_logs; 1310 tln->tln_count = tp->t_lognum; 1311 tln->tln_bucket = tp->t_lib; 1312 1313 /* Clear information from the PCB. */ 1314 STAILQ_INIT(&tp->t_logs); 1315 tp->t_lognum = 0; 1316 } 1317 1318 /* Do per-TCPCB cleanup */ 1319 void 1320 tcp_log_tcpcbfini(struct tcpcb *tp) 1321 { 1322 struct tcp_log_id_node *tln, *tln_first; 1323 struct tcp_log_mem *log_entry; 1324 sbintime_t callouttime; 1325 1326 INP_WLOCK_ASSERT(tp->t_inpcb); 1327 1328 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false); 1329 1330 /* 1331 * If we were gathering packets to be automatically dumped, try to do 1332 * it now. If this succeeds, the log information in the TCPCB will be 1333 * cleared. Otherwise, we'll handle the log information as we do 1334 * for other states. 1335 */ 1336 switch(tp->t_logstate) { 1337 case TCP_LOG_STATE_HEAD_AUTO: 1338 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 1339 M_NOWAIT, false); 1340 break; 1341 case TCP_LOG_STATE_TAIL_AUTO: 1342 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail", 1343 M_NOWAIT, false); 1344 break; 1345 case TCP_LOG_STATE_CONTINUAL: 1346 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1347 M_NOWAIT, false); 1348 break; 1349 } 1350 1351 /* 1352 * There are two ways we could keep logs: per-socket or per-ID. If 1353 * we are tracking logs with an ID, then the logs survive the 1354 * destruction of the TCPCB. 1355 * 1356 * If the TCPCB is associated with an ID node, move the logs from the 1357 * TCPCB to the ID node. In theory, this is safe, for reasons which I 1358 * will now explain for my own benefit when I next need to figure out 1359 * this code. :-) 1360 * 1361 * We own the INP lock. Therefore, no one else can change the contents 1362 * of this node (Rule C). Further, no one can remove this node from 1363 * the bucket while we hold the lock (Rule D). Basically, no one can 1364 * mess with this node. That leaves two states in which we could be: 1365 * 1366 * 1. Another thread is currently waiting to acquire the INP lock, with 1367 * plans to do something with this node. When we drop the INP lock, 1368 * they will have a chance to do that. They will recheck the 1369 * tln_closed field (see note to Rule C) and then acquire the 1370 * bucket lock before proceeding further. 1371 * 1372 * 2. Another thread will try to acquire a lock at some point in the 1373 * future. If they try to acquire a lock before we set the 1374 * tln_closed field, they will follow state #1. If they try to 1375 * acquire a lock after we set the tln_closed field, they will be 1376 * able to make changes to the node, at will, following Rule C. 1377 * 1378 * Therefore, we currently own this node and can make any changes 1379 * we want. But, as soon as we set the tln_closed field to true, we 1380 * have effectively dropped our lock on the node. (For this reason, we 1381 * also need to make sure our writes are ordered correctly. An atomic 1382 * operation with "release" semantics should be sufficient.) 1383 */ 1384 1385 if (tp->t_lin != NULL) { 1386 /* Copy the relevant information to the log entry. */ 1387 tln = tp->t_lin; 1388 KASSERT(tln->tln_inp == tp->t_inpcb, 1389 ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)", 1390 __func__, tln->tln_inp, tp->t_inpcb)); 1391 tcp_log_move_tp_to_node(tp, tln); 1392 1393 /* Clear information from the PCB. */ 1394 tp->t_lin = NULL; 1395 tp->t_lib = NULL; 1396 1397 /* 1398 * Take a reference on the INP. This ensures that the INP 1399 * remains valid while the node is on the expiry queue. This 1400 * ensures the INP is valid for other threads that may be 1401 * racing to lock this node when we move it to the expire 1402 * queue. 1403 */ 1404 in_pcbref(tp->t_inpcb); 1405 1406 /* 1407 * Store the entry on the expiry list. The exact behavior 1408 * depends on whether we have entries to keep. If so, we 1409 * put the entry at the tail of the list and expire in 1410 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put 1411 * the entry at the head of the list. (Handling the cleanup 1412 * via the expiry timer lets us avoid locking messy-ness here.) 1413 */ 1414 tln->tln_expiretime = getsbinuptime(); 1415 TCPLOG_EXPIREQ_LOCK(); 1416 if (tln->tln_count) { 1417 tln->tln_expiretime += TCP_LOG_EXPIRE_TIME; 1418 if (STAILQ_EMPTY(&tcp_log_expireq_head) && 1419 !callout_active(&tcp_log_expireq_callout)) { 1420 /* 1421 * We are adding the first entry and a callout 1422 * is not currently scheduled; therefore, we 1423 * need to schedule one. 1424 */ 1425 callout_reset_sbt(&tcp_log_expireq_callout, 1426 tln->tln_expiretime, SBT_1S, tcp_log_expire, 1427 NULL, C_ABSOLUTE); 1428 } 1429 STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln, 1430 tln_expireq); 1431 } else { 1432 callouttime = tln->tln_expiretime + 1433 TCP_LOG_EXPIRE_INTVL; 1434 tln_first = STAILQ_FIRST(&tcp_log_expireq_head); 1435 1436 if ((tln_first == NULL || 1437 callouttime < tln_first->tln_expiretime) && 1438 (callout_pending(&tcp_log_expireq_callout) || 1439 !callout_active(&tcp_log_expireq_callout))) { 1440 /* 1441 * The list is empty, or we want to run the 1442 * expire code before the first entry's timer 1443 * fires. Also, we are in a case where a callout 1444 * is not actively running. We want to reset 1445 * the callout to occur sooner. 1446 */ 1447 callout_reset_sbt(&tcp_log_expireq_callout, 1448 callouttime, SBT_1S, tcp_log_expire, NULL, 1449 C_ABSOLUTE); 1450 } 1451 1452 /* 1453 * Insert to the head, or just after the head, as 1454 * appropriate. (This might result in small 1455 * mis-orderings as a bunch of "expire now" entries 1456 * gather at the start of the list, but that should 1457 * not produce big problems, since the expire timer 1458 * will walk through all of them.) 1459 */ 1460 if (tln_first == NULL || 1461 tln->tln_expiretime < tln_first->tln_expiretime) 1462 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln, 1463 tln_expireq); 1464 else 1465 STAILQ_INSERT_AFTER(&tcp_log_expireq_head, 1466 tln_first, tln, tln_expireq); 1467 } 1468 TCPLOG_EXPIREQ_UNLOCK(); 1469 1470 /* 1471 * We are done messing with the tln. After this point, we 1472 * can't touch it. (Note that the "release" semantics should 1473 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above. 1474 * Therefore, they should be unnecessary here. However, it 1475 * seems like a good idea to include them anyway, since we 1476 * really are releasing a lock here.) 1477 */ 1478 atomic_store_rel_int(&tln->tln_closed, 1); 1479 } else { 1480 /* Remove log entries. */ 1481 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1482 tcp_log_remove_log_head(tp, log_entry); 1483 KASSERT(tp->t_lognum == 0, 1484 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1485 __func__, tp->t_lognum)); 1486 } 1487 1488 /* 1489 * Change the log state to off (just in case anything tries to sneak 1490 * in a last-minute log). 1491 */ 1492 tp->t_logstate = TCP_LOG_STATE_OFF; 1493 } 1494 1495 static void 1496 tcp_log_purge_tp_logbuf(struct tcpcb *tp) 1497 { 1498 struct tcp_log_mem *log_entry; 1499 struct inpcb *inp __diagused; 1500 1501 inp = tp->t_inpcb; 1502 INP_WLOCK_ASSERT(inp); 1503 if (tp->t_lognum == 0) 1504 return; 1505 1506 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1507 tcp_log_remove_log_head(tp, log_entry); 1508 KASSERT(tp->t_lognum == 0, 1509 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1510 __func__, tp->t_lognum)); 1511 tp->t_logstate = TCP_LOG_STATE_OFF; 1512 } 1513 1514 /* 1515 * This logs an event for a TCP socket. Normally, this is called via 1516 * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for 1517 * TCP_LOG_EVENT(). 1518 */ 1519 1520 struct tcp_log_buffer * 1521 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf, 1522 struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len, 1523 union tcp_log_stackspecific *stackinfo, int th_hostorder, 1524 const char *output_caller, const char *func, int line, const struct timeval *itv) 1525 { 1526 struct tcp_log_mem *log_entry; 1527 struct tcp_log_buffer *log_buf; 1528 int attempt_count = 0; 1529 struct tcp_log_verbose *log_verbose; 1530 uint32_t logsn; 1531 1532 KASSERT((func == NULL && line == 0) || (func != NULL && line > 0), 1533 ("%s called with inconsistent func (%p) and line (%d) arguments", 1534 __func__, func, line)); 1535 1536 INP_WLOCK_ASSERT(tp->t_inpcb); 1537 if (tcp_disable_all_bb_logs) { 1538 /* 1539 * The global shutdown logging 1540 * switch has been thrown. Call 1541 * the purge function that frees 1542 * purges out the logs and 1543 * turns off logging. 1544 */ 1545 tcp_log_purge_tp_logbuf(tp); 1546 return (NULL); 1547 } 1548 KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD || 1549 tp->t_logstate == TCP_LOG_STATE_TAIL || 1550 tp->t_logstate == TCP_LOG_STATE_CONTINUAL || 1551 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO || 1552 tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO, 1553 ("%s called with unexpected tp->t_logstate (%d)", __func__, 1554 tp->t_logstate)); 1555 1556 /* 1557 * Get the serial number. We do this early so it will 1558 * increment even if we end up skipping the log entry for some 1559 * reason. 1560 */ 1561 logsn = tp->t_logsn++; 1562 1563 /* 1564 * Can we get a new log entry? If so, increment the lognum counter 1565 * here. 1566 */ 1567 retry: 1568 if (tp->t_lognum < tp->t_loglimit) { 1569 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL) 1570 tp->t_lognum++; 1571 } else 1572 log_entry = NULL; 1573 1574 /* Do we need to try to reuse? */ 1575 if (log_entry == NULL) { 1576 /* 1577 * Sacrifice auto-logged sessions without a log ID if 1578 * tcp_log_auto_all is false. (If they don't have a log 1579 * ID by now, it is probable that either they won't get one 1580 * or we are resource-constrained.) 1581 */ 1582 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 1583 !tcp_log_auto_all) { 1584 if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) { 1585 #ifdef INVARIANTS 1586 panic("%s:%d: tcp_log_state_change() failed " 1587 "to set tp %p to TCP_LOG_STATE_CLEAR", 1588 __func__, __LINE__, tp); 1589 #endif 1590 tp->t_logstate = TCP_LOG_STATE_OFF; 1591 } 1592 return (NULL); 1593 } 1594 /* 1595 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump 1596 * the buffers. If successful, deactivate tracing. Otherwise, 1597 * leave it active so we will retry. 1598 */ 1599 if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO && 1600 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 1601 M_NOWAIT, false)) { 1602 tp->t_logstate = TCP_LOG_STATE_OFF; 1603 return(NULL); 1604 } else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) && 1605 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1606 M_NOWAIT, false)) { 1607 if (attempt_count == 0) { 1608 attempt_count++; 1609 goto retry; 1610 } 1611 #ifdef TCPLOG_DEBUG_COUNTERS 1612 counter_u64_add(tcp_log_que_fail4, 1); 1613 #endif 1614 return(NULL); 1615 } else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) 1616 return(NULL); 1617 1618 /* If in HEAD state, just deactivate the tracing and return. */ 1619 if (tp->t_logstate == TCP_LOG_STATE_HEAD) { 1620 tp->t_logstate = TCP_LOG_STATE_OFF; 1621 return(NULL); 1622 } 1623 1624 /* 1625 * Get a buffer to reuse. If that fails, just give up. 1626 * (We can't log anything without a buffer in which to 1627 * put it.) 1628 * 1629 * Note that we don't change the t_lognum counter 1630 * here. Because we are re-using the buffer, the total 1631 * number won't change. 1632 */ 1633 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL) 1634 return(NULL); 1635 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 1636 tcp_log_entry_refcnt_rem(log_entry); 1637 } 1638 1639 KASSERT(log_entry != NULL, 1640 ("%s: log_entry unexpectedly NULL", __func__)); 1641 1642 /* Extract the log buffer and verbose buffer pointers. */ 1643 log_buf = &log_entry->tlm_buf; 1644 log_verbose = &log_entry->tlm_v; 1645 1646 /* Basic entries. */ 1647 if (itv == NULL) 1648 getmicrouptime(&log_buf->tlb_tv); 1649 else 1650 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval)); 1651 log_buf->tlb_ticks = ticks; 1652 log_buf->tlb_sn = logsn; 1653 log_buf->tlb_stackid = tp->t_fb->tfb_id; 1654 log_buf->tlb_eventid = eventid; 1655 log_buf->tlb_eventflags = 0; 1656 log_buf->tlb_errno = errornum; 1657 1658 /* Socket buffers */ 1659 if (rxbuf != NULL) { 1660 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF; 1661 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc; 1662 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc; 1663 log_buf->tlb_rxbuf.tls_sb_spare = 0; 1664 } 1665 if (txbuf != NULL) { 1666 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF; 1667 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc; 1668 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc; 1669 log_buf->tlb_txbuf.tls_sb_spare = 0; 1670 } 1671 /* Copy values from tp to the log entry. */ 1672 #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f 1673 #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f 1674 COPY_STAT_T(state); 1675 COPY_STAT_T(starttime); 1676 COPY_STAT(iss); 1677 COPY_STAT_T(flags); 1678 COPY_STAT(snd_una); 1679 COPY_STAT(snd_max); 1680 COPY_STAT(snd_cwnd); 1681 COPY_STAT(snd_nxt); 1682 COPY_STAT(snd_recover); 1683 COPY_STAT(snd_wnd); 1684 COPY_STAT(snd_ssthresh); 1685 COPY_STAT_T(srtt); 1686 COPY_STAT_T(rttvar); 1687 COPY_STAT(rcv_up); 1688 COPY_STAT(rcv_adv); 1689 COPY_STAT(rcv_nxt); 1690 COPY_STAT(rcv_wnd); 1691 COPY_STAT_T(dupacks); 1692 COPY_STAT_T(segqlen); 1693 COPY_STAT(snd_numholes); 1694 COPY_STAT(snd_scale); 1695 COPY_STAT(rcv_scale); 1696 COPY_STAT_T(flags2); 1697 COPY_STAT_T(fbyte_in); 1698 COPY_STAT_T(fbyte_out); 1699 #undef COPY_STAT 1700 #undef COPY_STAT_T 1701 log_buf->tlb_flex1 = 0; 1702 log_buf->tlb_flex2 = 0; 1703 /* Copy stack-specific info. */ 1704 if (stackinfo != NULL) { 1705 memcpy(&log_buf->tlb_stackinfo, stackinfo, 1706 sizeof(log_buf->tlb_stackinfo)); 1707 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO; 1708 } 1709 1710 /* The packet */ 1711 log_buf->tlb_len = len; 1712 if (th) { 1713 int optlen; 1714 1715 log_buf->tlb_eventflags |= TLB_FLAG_HDR; 1716 log_buf->tlb_th = *th; 1717 if (th_hostorder) 1718 tcp_fields_to_net(&log_buf->tlb_th); 1719 optlen = (th->th_off << 2) - sizeof (struct tcphdr); 1720 if (optlen > 0) 1721 memcpy(log_buf->tlb_opts, th + 1, optlen); 1722 } 1723 1724 /* Verbose information */ 1725 if (func != NULL) { 1726 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE; 1727 if (output_caller != NULL) 1728 strlcpy(log_verbose->tlv_snd_frm, output_caller, 1729 TCP_FUNC_LEN); 1730 else 1731 *log_verbose->tlv_snd_frm = 0; 1732 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN); 1733 log_verbose->tlv_trace_line = line; 1734 } 1735 1736 /* Insert the new log at the tail. */ 1737 STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue); 1738 tcp_log_entry_refcnt_add(log_entry); 1739 return (log_buf); 1740 } 1741 1742 /* 1743 * Change the logging state for a TCPCB. Returns 0 on success or an 1744 * error code on failure. 1745 */ 1746 int 1747 tcp_log_state_change(struct tcpcb *tp, int state) 1748 { 1749 struct tcp_log_mem *log_entry; 1750 1751 INP_WLOCK_ASSERT(tp->t_inpcb); 1752 switch(state) { 1753 case TCP_LOG_STATE_CLEAR: 1754 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1755 tcp_log_remove_log_head(tp, log_entry); 1756 /* Fall through */ 1757 1758 case TCP_LOG_STATE_OFF: 1759 tp->t_logstate = TCP_LOG_STATE_OFF; 1760 break; 1761 1762 case TCP_LOG_STATE_TAIL: 1763 case TCP_LOG_STATE_HEAD: 1764 case TCP_LOG_STATE_CONTINUAL: 1765 case TCP_LOG_STATE_HEAD_AUTO: 1766 case TCP_LOG_STATE_TAIL_AUTO: 1767 tp->t_logstate = state; 1768 break; 1769 1770 default: 1771 return (EINVAL); 1772 } 1773 if (tcp_disable_all_bb_logs) { 1774 /* We are prohibited from doing any logs */ 1775 tp->t_logstate = TCP_LOG_STATE_OFF; 1776 } 1777 tp->t_flags2 &= ~(TF2_LOG_AUTO); 1778 1779 return (0); 1780 } 1781 1782 /* If tcp_drain() is called, flush half the log entries. */ 1783 void 1784 tcp_log_drain(struct tcpcb *tp) 1785 { 1786 struct tcp_log_mem *log_entry, *next; 1787 int target, skip; 1788 1789 INP_WLOCK_ASSERT(tp->t_inpcb); 1790 if ((target = tp->t_lognum / 2) == 0) 1791 return; 1792 1793 /* 1794 * If we are logging the "head" packets, we want to discard 1795 * from the tail of the queue. Otherwise, we want to discard 1796 * from the head. 1797 */ 1798 if (tp->t_logstate == TCP_LOG_STATE_HEAD || 1799 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) { 1800 skip = tp->t_lognum - target; 1801 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue) 1802 if (!--skip) 1803 break; 1804 KASSERT(log_entry != NULL, 1805 ("%s: skipped through all entries!", __func__)); 1806 if (log_entry == NULL) 1807 return; 1808 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) { 1809 STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue); 1810 tcp_log_entry_refcnt_rem(next); 1811 tcp_log_remove_log_cleanup(tp, next); 1812 #ifdef INVARIANTS 1813 target--; 1814 #endif 1815 } 1816 KASSERT(target == 0, 1817 ("%s: After removing from tail, target was %d", __func__, 1818 target)); 1819 } else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) { 1820 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1821 M_NOWAIT, false); 1822 } else { 1823 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL && 1824 target--) 1825 tcp_log_remove_log_head(tp, log_entry); 1826 KASSERT(target <= 0, 1827 ("%s: After removing from head, target was %d", __func__, 1828 target)); 1829 KASSERT(tp->t_lognum > 0, 1830 ("%s: After removing from head, tp->t_lognum was %d", 1831 __func__, target)); 1832 KASSERT(log_entry != NULL, 1833 ("%s: After removing from head, the tailq was empty", 1834 __func__)); 1835 } 1836 } 1837 1838 static inline int 1839 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len) 1840 { 1841 1842 if (sopt->sopt_td != NULL) 1843 return (copyout(src, dst, len)); 1844 bcopy(src, dst, len); 1845 return (0); 1846 } 1847 1848 static int 1849 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp, 1850 struct tcp_log_buffer **end, int count) 1851 { 1852 struct tcp_log_buffer *out_entry; 1853 struct tcp_log_mem *log_entry; 1854 size_t entrysize; 1855 int error; 1856 #ifdef INVARIANTS 1857 int orig_count = count; 1858 #endif 1859 1860 /* Copy the data out. */ 1861 error = 0; 1862 out_entry = (struct tcp_log_buffer *) sopt->sopt_val; 1863 STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) { 1864 count--; 1865 KASSERT(count >= 0, 1866 ("%s:%d: Exceeded expected count (%d) processing list %p", 1867 __func__, __LINE__, orig_count, log_tailqp)); 1868 1869 #ifdef TCPLOG_DEBUG_COUNTERS 1870 counter_u64_add(tcp_log_que_copyout, 1); 1871 #endif 1872 1873 /* 1874 * Skip copying out the header if it isn't present. 1875 * Instead, copy out zeros (to ensure we don't leak info). 1876 * TODO: Make sure we truly do zero everything we don't 1877 * explicitly set. 1878 */ 1879 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR) 1880 entrysize = sizeof(struct tcp_log_buffer); 1881 else 1882 entrysize = offsetof(struct tcp_log_buffer, tlb_th); 1883 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry, 1884 entrysize); 1885 if (error) 1886 break; 1887 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) { 1888 error = tcp_log_copyout(sopt, zerobuf, 1889 ((uint8_t *)out_entry) + entrysize, 1890 sizeof(struct tcp_log_buffer) - entrysize); 1891 } 1892 1893 /* 1894 * Copy out the verbose bit, if needed. Either way, 1895 * increment the output pointer the correct amount. 1896 */ 1897 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) { 1898 error = tcp_log_copyout(sopt, &log_entry->tlm_v, 1899 out_entry->tlb_verbose, 1900 sizeof(struct tcp_log_verbose)); 1901 if (error) 1902 break; 1903 out_entry = (struct tcp_log_buffer *) 1904 (((uint8_t *) (out_entry + 1)) + 1905 sizeof(struct tcp_log_verbose)); 1906 } else 1907 out_entry++; 1908 } 1909 *end = out_entry; 1910 KASSERT(error || count == 0, 1911 ("%s:%d: Less than expected count (%d) processing list %p" 1912 " (%d remain)", __func__, __LINE__, orig_count, 1913 log_tailqp, count)); 1914 1915 return (error); 1916 } 1917 1918 /* 1919 * Copy out the buffer. Note that we do incremental copying, so 1920 * sooptcopyout() won't work. However, the goal is to produce the same 1921 * end result as if we copied in the entire user buffer, updated it, 1922 * and then used sooptcopyout() to copy it out. 1923 * 1924 * NOTE: This should be called with a write lock on the PCB; however, 1925 * the function will drop it after it extracts the data from the TCPCB. 1926 */ 1927 int 1928 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp) 1929 { 1930 struct tcp_log_stailq log_tailq; 1931 struct tcp_log_mem *log_entry, *log_next; 1932 struct tcp_log_buffer *out_entry; 1933 struct inpcb *inp; 1934 size_t outsize, entrysize; 1935 int error, outnum; 1936 1937 INP_WLOCK_ASSERT(tp->t_inpcb); 1938 inp = tp->t_inpcb; 1939 1940 /* 1941 * Determine which log entries will fit in the buffer. As an 1942 * optimization, skip this if all the entries will clearly fit 1943 * in the buffer. (However, get an exact size if we are using 1944 * INVARIANTS.) 1945 */ 1946 #ifndef INVARIANTS 1947 if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) + 1948 sizeof(struct tcp_log_verbose)) >= tp->t_lognum) { 1949 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue); 1950 log_next = NULL; 1951 outsize = 0; 1952 outnum = tp->t_lognum; 1953 } else { 1954 #endif 1955 outsize = outnum = 0; 1956 log_entry = NULL; 1957 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) { 1958 entrysize = sizeof(struct tcp_log_buffer); 1959 if (log_next->tlm_buf.tlb_eventflags & 1960 TLB_FLAG_VERBOSE) 1961 entrysize += sizeof(struct tcp_log_verbose); 1962 if ((sopt->sopt_valsize - outsize) < entrysize) 1963 break; 1964 outsize += entrysize; 1965 outnum++; 1966 log_entry = log_next; 1967 } 1968 KASSERT(outsize <= sopt->sopt_valsize, 1969 ("%s: calculated output size (%zu) greater than available" 1970 "space (%zu)", __func__, outsize, sopt->sopt_valsize)); 1971 #ifndef INVARIANTS 1972 } 1973 #endif 1974 1975 /* 1976 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val 1977 * is NULL, silently skip the copy. However, in this case, we 1978 * will leave the list alone and return. Functionally, this 1979 * gives userspace a way to poll for an approximate buffer 1980 * size they will need to get the log entries. 1981 */ 1982 if (sopt->sopt_val == NULL) { 1983 INP_WUNLOCK(inp); 1984 if (outsize == 0) { 1985 outsize = outnum * (sizeof(struct tcp_log_buffer) + 1986 sizeof(struct tcp_log_verbose)); 1987 } 1988 if (sopt->sopt_valsize > outsize) 1989 sopt->sopt_valsize = outsize; 1990 return (0); 1991 } 1992 1993 /* 1994 * Break apart the list. We'll save the ones we want to copy 1995 * out locally and remove them from the TCPCB list. We can 1996 * then drop the INPCB lock while we do the copyout. 1997 * 1998 * There are roughly three cases: 1999 * 1. There was nothing to copy out. That's easy: drop the 2000 * lock and return. 2001 * 2. We are copying out the entire list. Again, that's easy: 2002 * move the whole list. 2003 * 3. We are copying out a partial list. That's harder. We 2004 * need to update the list book-keeping entries. 2005 */ 2006 if (log_entry != NULL && log_next == NULL) { 2007 /* Move entire list. */ 2008 KASSERT(outnum == tp->t_lognum, 2009 ("%s:%d: outnum (%d) should match tp->t_lognum (%d)", 2010 __func__, __LINE__, outnum, tp->t_lognum)); 2011 log_tailq = tp->t_logs; 2012 tp->t_lognum = 0; 2013 STAILQ_INIT(&tp->t_logs); 2014 } else if (log_entry != NULL) { 2015 /* Move partial list. */ 2016 KASSERT(outnum < tp->t_lognum, 2017 ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)", 2018 __func__, __LINE__, outnum, tp->t_lognum)); 2019 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs); 2020 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue); 2021 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL, 2022 ("%s:%d: tp->t_logs is unexpectedly shorter than expected" 2023 "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)", 2024 __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum)); 2025 STAILQ_NEXT(log_entry, tlm_queue) = NULL; 2026 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue); 2027 tp->t_lognum -= outnum; 2028 } else 2029 STAILQ_INIT(&log_tailq); 2030 2031 /* Drop the PCB lock. */ 2032 INP_WUNLOCK(inp); 2033 2034 /* Copy the data out. */ 2035 error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum); 2036 2037 if (error) { 2038 /* Restore list */ 2039 INP_WLOCK(inp); 2040 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0) { 2041 tp = intotcpcb(inp); 2042 2043 /* Merge the two lists. */ 2044 STAILQ_CONCAT(&log_tailq, &tp->t_logs); 2045 tp->t_logs = log_tailq; 2046 tp->t_lognum += outnum; 2047 } 2048 INP_WUNLOCK(inp); 2049 } else { 2050 /* Sanity check entries */ 2051 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) == 2052 outsize, ("%s: Actual output size (%zu) != " 2053 "calculated output size (%zu)", __func__, 2054 (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val), 2055 outsize)); 2056 2057 /* Free the entries we just copied out. */ 2058 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) { 2059 tcp_log_entry_refcnt_rem(log_entry); 2060 uma_zfree(tcp_log_zone, log_entry); 2061 } 2062 } 2063 2064 sopt->sopt_valsize = (size_t)((caddr_t)out_entry - 2065 (caddr_t)sopt->sopt_val); 2066 return (error); 2067 } 2068 2069 static void 2070 tcp_log_free_queue(struct tcp_log_dev_queue *param) 2071 { 2072 struct tcp_log_dev_log_queue *entry; 2073 2074 KASSERT(param != NULL, ("%s: called with NULL param", __func__)); 2075 if (param == NULL) 2076 return; 2077 2078 entry = (struct tcp_log_dev_log_queue *)param; 2079 2080 /* Free the entries. */ 2081 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 2082 2083 /* Free the buffer, if it is allocated. */ 2084 if (entry->tldl_common.tldq_buf != NULL) 2085 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV); 2086 2087 /* Free the queue entry. */ 2088 free(entry, M_TCPLOGDEV); 2089 } 2090 2091 static struct tcp_log_common_header * 2092 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param) 2093 { 2094 struct tcp_log_dev_log_queue *entry; 2095 struct tcp_log_header *hdr; 2096 uint8_t *end; 2097 struct sockopt sopt; 2098 int error; 2099 2100 entry = (struct tcp_log_dev_log_queue *)param; 2101 2102 /* Take a worst-case guess at space needs. */ 2103 sopt.sopt_valsize = sizeof(struct tcp_log_header) + 2104 entry->tldl_count * (sizeof(struct tcp_log_buffer) + 2105 sizeof(struct tcp_log_verbose)); 2106 hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT); 2107 if (hdr == NULL) { 2108 #ifdef TCPLOG_DEBUG_COUNTERS 2109 counter_u64_add(tcp_log_que_fail5, entry->tldl_count); 2110 #endif 2111 return (NULL); 2112 } 2113 sopt.sopt_val = hdr + 1; 2114 sopt.sopt_valsize -= sizeof(struct tcp_log_header); 2115 sopt.sopt_td = NULL; 2116 2117 error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries, 2118 (struct tcp_log_buffer **)&end, entry->tldl_count); 2119 if (error) { 2120 free(hdr, M_TCPLOGDEV); 2121 return (NULL); 2122 } 2123 2124 /* Free the entries. */ 2125 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 2126 entry->tldl_count = 0; 2127 2128 memset(hdr, 0, sizeof(struct tcp_log_header)); 2129 hdr->tlh_version = TCP_LOG_BUF_VER; 2130 hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR; 2131 hdr->tlh_length = end - (uint8_t *)hdr; 2132 hdr->tlh_ie = entry->tldl_ie; 2133 hdr->tlh_af = entry->tldl_af; 2134 getboottime(&hdr->tlh_offset); 2135 strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN); 2136 strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN); 2137 strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN); 2138 return ((struct tcp_log_common_header *)hdr); 2139 } 2140 2141 /* 2142 * Queue the tcpcb's log buffer for transmission via the log buffer facility. 2143 * 2144 * NOTE: This should be called with a write lock on the PCB. 2145 * 2146 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 2147 * and reacquire the INP lock if it needs to do so. 2148 * 2149 * If force is false, this will only dump auto-logged sessions if 2150 * tcp_log_auto_all is true or if there is a log ID defined for the session. 2151 */ 2152 int 2153 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force) 2154 { 2155 struct tcp_log_dev_log_queue *entry; 2156 struct inpcb *inp; 2157 #ifdef TCPLOG_DEBUG_COUNTERS 2158 int num_entries; 2159 #endif 2160 2161 inp = tp->t_inpcb; 2162 INP_WLOCK_ASSERT(inp); 2163 2164 /* If there are no log entries, there is nothing to do. */ 2165 if (tp->t_lognum == 0) 2166 return (0); 2167 2168 /* Check for a log ID. */ 2169 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 2170 !tcp_log_auto_all && !force) { 2171 struct tcp_log_mem *log_entry; 2172 2173 /* 2174 * We needed a log ID and none was found. Free the log entries 2175 * and return success. Also, cancel further logging. If the 2176 * session doesn't have a log ID by now, we'll assume it isn't 2177 * going to get one. 2178 */ 2179 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 2180 tcp_log_remove_log_head(tp, log_entry); 2181 KASSERT(tp->t_lognum == 0, 2182 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 2183 __func__, tp->t_lognum)); 2184 tp->t_logstate = TCP_LOG_STATE_OFF; 2185 return (0); 2186 } 2187 2188 /* 2189 * Allocate memory. If we must wait, we'll need to drop the locks 2190 * and reacquire them (and do all the related business that goes 2191 * along with that). 2192 */ 2193 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 2194 M_NOWAIT); 2195 if (entry == NULL && (how & M_NOWAIT)) { 2196 #ifdef TCPLOG_DEBUG_COUNTERS 2197 counter_u64_add(tcp_log_que_fail3, 1); 2198 #endif 2199 return (ENOBUFS); 2200 } 2201 if (entry == NULL) { 2202 INP_WUNLOCK(inp); 2203 entry = malloc(sizeof(struct tcp_log_dev_log_queue), 2204 M_TCPLOGDEV, M_WAITOK); 2205 INP_WLOCK(inp); 2206 /* 2207 * Note that this check is slightly overly-restrictive in 2208 * that the TCB can survive either of these events. 2209 * However, there is currently not a good way to ensure 2210 * that is the case. So, if we hit this M_WAIT path, we 2211 * may end up dropping some entries. That seems like a 2212 * small price to pay for safety. 2213 */ 2214 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2215 free(entry, M_TCPLOGDEV); 2216 #ifdef TCPLOG_DEBUG_COUNTERS 2217 counter_u64_add(tcp_log_que_fail2, 1); 2218 #endif 2219 return (ECONNRESET); 2220 } 2221 tp = intotcpcb(inp); 2222 if (tp->t_lognum == 0) { 2223 free(entry, M_TCPLOGDEV); 2224 return (0); 2225 } 2226 } 2227 2228 /* Fill in the unique parts of the queue entry. */ 2229 if (tp->t_lib != NULL) { 2230 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 2231 strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN); 2232 } else { 2233 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN); 2234 strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN); 2235 } 2236 if (reason != NULL) 2237 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 2238 else 2239 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 2240 entry->tldl_ie = inp->inp_inc.inc_ie; 2241 if (inp->inp_inc.inc_flags & INC_ISIPV6) 2242 entry->tldl_af = AF_INET6; 2243 else 2244 entry->tldl_af = AF_INET; 2245 entry->tldl_entries = tp->t_logs; 2246 entry->tldl_count = tp->t_lognum; 2247 2248 /* Fill in the common parts of the queue entry. */ 2249 entry->tldl_common.tldq_buf = NULL; 2250 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 2251 entry->tldl_common.tldq_dtor = tcp_log_free_queue; 2252 2253 /* Clear the log data from the TCPCB. */ 2254 #ifdef TCPLOG_DEBUG_COUNTERS 2255 num_entries = tp->t_lognum; 2256 #endif 2257 tp->t_lognum = 0; 2258 STAILQ_INIT(&tp->t_logs); 2259 2260 /* Add the entry. If no one is listening, free the entry. */ 2261 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) { 2262 tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 2263 #ifdef TCPLOG_DEBUG_COUNTERS 2264 counter_u64_add(tcp_log_que_fail1, num_entries); 2265 } else { 2266 counter_u64_add(tcp_log_queued, num_entries); 2267 #endif 2268 } 2269 return (0); 2270 } 2271 2272 /* 2273 * Queue the log_id_node's log buffers for transmission via the log buffer 2274 * facility. 2275 * 2276 * NOTE: This should be called with the bucket locked and referenced. 2277 * 2278 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 2279 * and reacquire the bucket lock if it needs to do so. (The caller must 2280 * ensure that the tln is no longer on any lists so no one else will mess 2281 * with this while the lock is dropped!) 2282 */ 2283 static int 2284 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how) 2285 { 2286 struct tcp_log_dev_log_queue *entry; 2287 struct tcp_log_id_bucket *tlb; 2288 2289 tlb = tln->tln_bucket; 2290 TCPID_BUCKET_LOCK_ASSERT(tlb); 2291 KASSERT(tlb->tlb_refcnt > 0, 2292 ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)", 2293 __func__, __LINE__, tln, tlb)); 2294 KASSERT(tln->tln_closed, 2295 ("%s:%d: Called for node with tln_closed==false (tln=%p)", 2296 __func__, __LINE__, tln)); 2297 2298 /* If there are no log entries, there is nothing to do. */ 2299 if (tln->tln_count == 0) 2300 return (0); 2301 2302 /* 2303 * Allocate memory. If we must wait, we'll need to drop the locks 2304 * and reacquire them (and do all the related business that goes 2305 * along with that). 2306 */ 2307 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 2308 M_NOWAIT); 2309 if (entry == NULL && (how & M_NOWAIT)) 2310 return (ENOBUFS); 2311 if (entry == NULL) { 2312 TCPID_BUCKET_UNLOCK(tlb); 2313 entry = malloc(sizeof(struct tcp_log_dev_log_queue), 2314 M_TCPLOGDEV, M_WAITOK); 2315 TCPID_BUCKET_LOCK(tlb); 2316 } 2317 2318 /* Fill in the common parts of the queue entry.. */ 2319 entry->tldl_common.tldq_buf = NULL; 2320 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 2321 entry->tldl_common.tldq_dtor = tcp_log_free_queue; 2322 2323 /* Fill in the unique parts of the queue entry. */ 2324 strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN); 2325 strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN); 2326 if (reason != NULL) 2327 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 2328 else 2329 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 2330 entry->tldl_ie = tln->tln_ie; 2331 entry->tldl_entries = tln->tln_entries; 2332 entry->tldl_count = tln->tln_count; 2333 entry->tldl_af = tln->tln_af; 2334 2335 /* Add the entry. If no one is listening, free the entry. */ 2336 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) 2337 tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 2338 2339 return (0); 2340 } 2341 2342 /* 2343 * Queue the log buffers for all sessions in a bucket for transmissions via 2344 * the log buffer facility. 2345 * 2346 * NOTE: This should be called with a locked bucket; however, the function 2347 * will drop the lock. 2348 */ 2349 #define LOCAL_SAVE 10 2350 static void 2351 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason) 2352 { 2353 struct tcp_log_id_node local_entries[LOCAL_SAVE]; 2354 struct inpcb *inp; 2355 struct tcpcb *tp; 2356 struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln; 2357 int i, num_local_entries, tree_locked; 2358 bool expireq_locked; 2359 2360 TCPID_BUCKET_LOCK_ASSERT(tlb); 2361 2362 /* 2363 * Take a reference on the bucket to keep it from disappearing until 2364 * we are done. 2365 */ 2366 TCPID_BUCKET_REF(tlb); 2367 2368 /* 2369 * We'll try to create these without dropping locks. However, we 2370 * might very well need to drop locks to get memory. If that's the 2371 * case, we'll save up to 10 on the stack, and sacrifice the rest. 2372 * (Otherwise, we need to worry about finding our place again in a 2373 * potentially changed list. It just doesn't seem worth the trouble 2374 * to do that. 2375 */ 2376 expireq_locked = false; 2377 num_local_entries = 0; 2378 prev_tln = NULL; 2379 tree_locked = TREE_UNLOCKED; 2380 SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) { 2381 /* 2382 * If this isn't associated with a TCPCB, we can pull it off 2383 * the list now. We need to be careful that the expire timer 2384 * hasn't already taken ownership (tln_expiretime == SBT_MAX). 2385 * If so, we let the expire timer code free the data. 2386 */ 2387 if (cur_tln->tln_closed) { 2388 no_inp: 2389 /* 2390 * Get the expireq lock so we can get a consistent 2391 * read of tln_expiretime and so we can remove this 2392 * from the expireq. 2393 */ 2394 if (!expireq_locked) { 2395 TCPLOG_EXPIREQ_LOCK(); 2396 expireq_locked = true; 2397 } 2398 2399 /* 2400 * We ignore entries with tln_expiretime == SBT_MAX. 2401 * The expire timer code already owns those. 2402 */ 2403 KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0, 2404 ("%s:%d: node on the expire queue without positive " 2405 "expire time", __func__, __LINE__)); 2406 if (cur_tln->tln_expiretime == SBT_MAX) { 2407 prev_tln = cur_tln; 2408 continue; 2409 } 2410 2411 /* Remove the entry from the expireq. */ 2412 STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln, 2413 tcp_log_id_node, tln_expireq); 2414 2415 /* Remove the entry from the bucket. */ 2416 if (prev_tln != NULL) 2417 SLIST_REMOVE_AFTER(prev_tln, tln_list); 2418 else 2419 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list); 2420 2421 /* 2422 * Drop the INP and bucket reference counts. Due to 2423 * lock-ordering rules, we need to drop the expire 2424 * queue lock. 2425 */ 2426 TCPLOG_EXPIREQ_UNLOCK(); 2427 expireq_locked = false; 2428 2429 /* Drop the INP reference. */ 2430 INP_WLOCK(cur_tln->tln_inp); 2431 if (!in_pcbrele_wlocked(cur_tln->tln_inp)) 2432 INP_WUNLOCK(cur_tln->tln_inp); 2433 2434 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 2435 #ifdef INVARIANTS 2436 panic("%s: Bucket refcount unexpectedly 0.", 2437 __func__); 2438 #endif 2439 /* 2440 * Recover as best we can: free the entry we 2441 * own. 2442 */ 2443 tcp_log_free_entries(&cur_tln->tln_entries, 2444 &cur_tln->tln_count); 2445 uma_zfree(tcp_log_id_node_zone, cur_tln); 2446 goto done; 2447 } 2448 2449 if (tcp_log_dump_node_logbuf(cur_tln, reason, 2450 M_NOWAIT)) { 2451 /* 2452 * If we have sapce, save the entries locally. 2453 * Otherwise, free them. 2454 */ 2455 if (num_local_entries < LOCAL_SAVE) { 2456 local_entries[num_local_entries] = 2457 *cur_tln; 2458 num_local_entries++; 2459 } else { 2460 tcp_log_free_entries( 2461 &cur_tln->tln_entries, 2462 &cur_tln->tln_count); 2463 } 2464 } 2465 2466 /* No matter what, we are done with the node now. */ 2467 uma_zfree(tcp_log_id_node_zone, cur_tln); 2468 2469 /* 2470 * Because we removed this entry from the list, prev_tln 2471 * (which tracks the previous entry still on the tlb 2472 * list) remains unchanged. 2473 */ 2474 continue; 2475 } 2476 2477 /* 2478 * If we get to this point, the session data is still held in 2479 * the TCPCB. So, we need to pull the data out of that. 2480 * 2481 * We will need to drop the expireq lock so we can lock the INP. 2482 * We can then try to extract the data the "easy" way. If that 2483 * fails, we'll save the log entries for later. 2484 */ 2485 if (expireq_locked) { 2486 TCPLOG_EXPIREQ_UNLOCK(); 2487 expireq_locked = false; 2488 } 2489 2490 /* Lock the INP and then re-check the state. */ 2491 inp = cur_tln->tln_inp; 2492 INP_WLOCK(inp); 2493 /* 2494 * If we caught this while it was transitioning, the data 2495 * might have moved from the TCPCB to the tln (signified by 2496 * setting tln_closed to true. If so, treat this like an 2497 * inactive connection. 2498 */ 2499 if (cur_tln->tln_closed) { 2500 /* 2501 * It looks like we may have caught this connection 2502 * while it was transitioning from active to inactive. 2503 * Treat this like an inactive connection. 2504 */ 2505 INP_WUNLOCK(inp); 2506 goto no_inp; 2507 } 2508 2509 /* 2510 * Try to dump the data from the tp without dropping the lock. 2511 * If this fails, try to save off the data locally. 2512 */ 2513 tp = cur_tln->tln_tp; 2514 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) && 2515 num_local_entries < LOCAL_SAVE) { 2516 tcp_log_move_tp_to_node(tp, 2517 &local_entries[num_local_entries]); 2518 local_entries[num_local_entries].tln_closed = 1; 2519 KASSERT(local_entries[num_local_entries].tln_bucket == 2520 tlb, ("%s: %d: bucket mismatch for node %p", 2521 __func__, __LINE__, cur_tln)); 2522 num_local_entries++; 2523 } 2524 2525 INP_WUNLOCK(inp); 2526 2527 /* 2528 * We are goint to leave the current tln on the list. It will 2529 * become the previous tln. 2530 */ 2531 prev_tln = cur_tln; 2532 } 2533 2534 /* Drop our locks, if any. */ 2535 KASSERT(tree_locked == TREE_UNLOCKED, 2536 ("%s: %d: tree unexpectedly locked", __func__, __LINE__)); 2537 switch (tree_locked) { 2538 case TREE_WLOCKED: 2539 TCPID_TREE_WUNLOCK(); 2540 tree_locked = TREE_UNLOCKED; 2541 break; 2542 case TREE_RLOCKED: 2543 TCPID_TREE_RUNLOCK(); 2544 tree_locked = TREE_UNLOCKED; 2545 break; 2546 } 2547 if (expireq_locked) { 2548 TCPLOG_EXPIREQ_UNLOCK(); 2549 expireq_locked = false; 2550 } 2551 2552 /* 2553 * Try again for any saved entries. tcp_log_dump_node_logbuf() is 2554 * guaranteed to free the log entries within the node. And, since 2555 * the node itself is on our stack, we don't need to free it. 2556 */ 2557 for (i = 0; i < num_local_entries; i++) 2558 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK); 2559 2560 /* Drop our reference. */ 2561 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 2562 TCPID_BUCKET_UNLOCK(tlb); 2563 2564 done: 2565 /* Drop our locks, if any. */ 2566 switch (tree_locked) { 2567 case TREE_WLOCKED: 2568 TCPID_TREE_WUNLOCK(); 2569 break; 2570 case TREE_RLOCKED: 2571 TCPID_TREE_RUNLOCK(); 2572 break; 2573 } 2574 if (expireq_locked) 2575 TCPLOG_EXPIREQ_UNLOCK(); 2576 } 2577 #undef LOCAL_SAVE 2578 2579 /* 2580 * Queue the log buffers for all sessions in a bucket for transmissions via 2581 * the log buffer facility. 2582 * 2583 * NOTE: This should be called with a locked INP; however, the function 2584 * will drop the lock. 2585 */ 2586 void 2587 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason) 2588 { 2589 struct tcp_log_id_bucket *tlb; 2590 int tree_locked; 2591 2592 /* Figure out our bucket and lock it. */ 2593 INP_WLOCK_ASSERT(tp->t_inpcb); 2594 tlb = tp->t_lib; 2595 if (tlb == NULL) { 2596 /* 2597 * No bucket; treat this like a request to dump a single 2598 * session's traces. 2599 */ 2600 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true); 2601 INP_WUNLOCK(tp->t_inpcb); 2602 return; 2603 } 2604 TCPID_BUCKET_REF(tlb); 2605 INP_WUNLOCK(tp->t_inpcb); 2606 TCPID_BUCKET_LOCK(tlb); 2607 2608 /* If we are the last reference, we have nothing more to do here. */ 2609 tree_locked = TREE_UNLOCKED; 2610 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 2611 switch (tree_locked) { 2612 case TREE_WLOCKED: 2613 TCPID_TREE_WUNLOCK(); 2614 break; 2615 case TREE_RLOCKED: 2616 TCPID_TREE_RUNLOCK(); 2617 break; 2618 } 2619 return; 2620 } 2621 2622 /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */ 2623 tcp_log_dumpbucketlogs(tlb, reason); 2624 } 2625 2626 /* 2627 * Mark the end of a flow with the current stack. A stack can add 2628 * stack-specific info to this trace event by overriding this 2629 * function (see bbr_log_flowend() for example). 2630 */ 2631 void 2632 tcp_log_flowend(struct tcpcb *tp) 2633 { 2634 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2635 struct socket *so = tp->t_inpcb->inp_socket; 2636 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, 2637 TCP_LOG_FLOWEND, 0, 0, NULL, false); 2638 } 2639 } 2640