1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016-2018 Netflix, Inc. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/arb.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mutex.h> 38 #include <sys/qmath.h> 39 #include <sys/queue.h> 40 #include <sys/refcount.h> 41 #include <sys/rwlock.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/sysctl.h> 45 #include <sys/tree.h> 46 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 47 #include <sys/counter.h> 48 49 #include <dev/tcp_log/tcp_log_dev.h> 50 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/vnet.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_pcb.h> 57 #include <netinet/in_var.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/tcp_log_buf.h> 60 61 /* Default expiry time */ 62 #define TCP_LOG_EXPIRE_TIME ((sbintime_t)60 * SBT_1S) 63 64 /* Max interval at which to run the expiry timer */ 65 #define TCP_LOG_EXPIRE_INTVL ((sbintime_t)5 * SBT_1S) 66 67 bool tcp_log_verbose; 68 static uma_zone_t tcp_log_bucket_zone, tcp_log_node_zone, tcp_log_zone; 69 static int tcp_log_session_limit = TCP_LOG_BUF_DEFAULT_SESSION_LIMIT; 70 static uint32_t tcp_log_version = TCP_LOG_BUF_VER; 71 RB_HEAD(tcp_log_id_tree, tcp_log_id_bucket); 72 static struct tcp_log_id_tree tcp_log_id_head; 73 static STAILQ_HEAD(, tcp_log_id_node) tcp_log_expireq_head = 74 STAILQ_HEAD_INITIALIZER(tcp_log_expireq_head); 75 static struct mtx tcp_log_expireq_mtx; 76 static struct callout tcp_log_expireq_callout; 77 static u_long tcp_log_auto_ratio = 0; 78 static volatile u_long tcp_log_auto_ratio_cur = 0; 79 static uint32_t tcp_log_auto_mode = TCP_LOG_STATE_TAIL; 80 static bool tcp_log_auto_all = false; 81 static uint32_t tcp_disable_all_bb_logs = 0; 82 83 RB_PROTOTYPE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 84 85 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, bb, CTLFLAG_RW, 0, "TCP Black Box controls"); 86 87 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_verbose, CTLFLAG_RW, &tcp_log_verbose, 88 0, "Force verbose logging for TCP traces"); 89 90 SYSCTL_INT(_net_inet_tcp_bb, OID_AUTO, log_session_limit, 91 CTLFLAG_RW, &tcp_log_session_limit, 0, 92 "Maximum number of events maintained for each TCP session"); 93 94 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_global_limit, CTLFLAG_RW, 95 &tcp_log_zone, "Maximum number of events maintained for all TCP sessions"); 96 97 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_global_entries, CTLFLAG_RD, 98 &tcp_log_zone, "Current number of events maintained for all TCP sessions"); 99 100 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_limit, CTLFLAG_RW, 101 &tcp_log_bucket_zone, "Maximum number of log IDs"); 102 103 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_entries, CTLFLAG_RD, 104 &tcp_log_bucket_zone, "Current number of log IDs"); 105 106 SYSCTL_UMA_MAX(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_limit, CTLFLAG_RW, 107 &tcp_log_node_zone, "Maximum number of tcpcbs with log IDs"); 108 109 SYSCTL_UMA_CUR(_net_inet_tcp_bb, OID_AUTO, log_id_tcpcb_entries, CTLFLAG_RD, 110 &tcp_log_node_zone, "Current number of tcpcbs with log IDs"); 111 112 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_version, CTLFLAG_RD, &tcp_log_version, 113 0, "Version of log formats exported"); 114 115 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, disable_all, CTLFLAG_RW, 116 &tcp_disable_all_bb_logs, TCP_LOG_STATE_HEAD_AUTO, 117 "Disable all BB logging for all connections"); 118 119 SYSCTL_ULONG(_net_inet_tcp_bb, OID_AUTO, log_auto_ratio, CTLFLAG_RW, 120 &tcp_log_auto_ratio, 0, "Do auto capturing for 1 out of N sessions"); 121 122 SYSCTL_U32(_net_inet_tcp_bb, OID_AUTO, log_auto_mode, CTLFLAG_RW, 123 &tcp_log_auto_mode, TCP_LOG_STATE_HEAD_AUTO, 124 "Logging mode for auto-selected sessions (default is TCP_LOG_STATE_HEAD_AUTO)"); 125 126 SYSCTL_BOOL(_net_inet_tcp_bb, OID_AUTO, log_auto_all, CTLFLAG_RW, 127 &tcp_log_auto_all, false, 128 "Auto-select from all sessions (rather than just those with IDs)"); 129 130 #ifdef TCPLOG_DEBUG_COUNTERS 131 counter_u64_t tcp_log_queued; 132 counter_u64_t tcp_log_que_fail1; 133 counter_u64_t tcp_log_que_fail2; 134 counter_u64_t tcp_log_que_fail3; 135 counter_u64_t tcp_log_que_fail4; 136 counter_u64_t tcp_log_que_fail5; 137 counter_u64_t tcp_log_que_copyout; 138 counter_u64_t tcp_log_que_read; 139 counter_u64_t tcp_log_que_freed; 140 141 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, queued, CTLFLAG_RD, 142 &tcp_log_queued, "Number of entries queued"); 143 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail1, CTLFLAG_RD, 144 &tcp_log_que_fail1, "Number of entries queued but fail 1"); 145 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail2, CTLFLAG_RD, 146 &tcp_log_que_fail2, "Number of entries queued but fail 2"); 147 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail3, CTLFLAG_RD, 148 &tcp_log_que_fail3, "Number of entries queued but fail 3"); 149 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail4, CTLFLAG_RD, 150 &tcp_log_que_fail4, "Number of entries queued but fail 4"); 151 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, fail5, CTLFLAG_RD, 152 &tcp_log_que_fail5, "Number of entries queued but fail 4"); 153 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, copyout, CTLFLAG_RD, 154 &tcp_log_que_copyout, "Number of entries copied out"); 155 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, read, CTLFLAG_RD, 156 &tcp_log_que_read, "Number of entries read from the queue"); 157 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, freed, CTLFLAG_RD, 158 &tcp_log_que_freed, "Number of entries freed after reading"); 159 #endif 160 161 #ifdef INVARIANTS 162 #define TCPLOG_DEBUG_RINGBUF 163 #endif 164 /* Number of requests to consider a PBCID "active". */ 165 #define ACTIVE_REQUEST_COUNT 10 166 167 /* Statistic tracking for "active" PBCIDs. */ 168 static counter_u64_t tcp_log_pcb_ids_cur; 169 static counter_u64_t tcp_log_pcb_ids_tot; 170 171 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_cur, CTLFLAG_RD, 172 &tcp_log_pcb_ids_cur, "Number of pcb IDs allocated in the system"); 173 SYSCTL_COUNTER_U64(_net_inet_tcp_bb, OID_AUTO, pcb_ids_tot, CTLFLAG_RD, 174 &tcp_log_pcb_ids_tot, "Total number of pcb IDs that have been allocated"); 175 176 struct tcp_log_mem 177 { 178 STAILQ_ENTRY(tcp_log_mem) tlm_queue; 179 struct tcp_log_buffer tlm_buf; 180 struct tcp_log_verbose tlm_v; 181 #ifdef TCPLOG_DEBUG_RINGBUF 182 volatile int tlm_refcnt; 183 #endif 184 }; 185 186 /* 60 bytes for the header, + 16 bytes for padding */ 187 static uint8_t zerobuf[76]; 188 189 /* 190 * Lock order: 191 * 1. TCPID_TREE 192 * 2. TCPID_BUCKET 193 * 3. INP 194 * 195 * Rules: 196 * A. You need a lock on the Tree to add/remove buckets. 197 * B. You need a lock on the bucket to add/remove nodes from the bucket. 198 * C. To change information in a node, you need the INP lock if the tln_closed 199 * field is false. Otherwise, you need the bucket lock. (Note that the 200 * tln_closed field can change at any point, so you need to recheck the 201 * entry after acquiring the INP lock.) 202 * D. To remove a node from the bucket, you must have that entry locked, 203 * according to the criteria of Rule C. Also, the node must not be on 204 * the expiry queue. 205 * E. The exception to C is the expiry queue fields, which are locked by 206 * the TCPLOG_EXPIREQ lock. 207 * 208 * Buckets have a reference count. Each node is a reference. Further, 209 * other callers may add reference counts to keep a bucket from disappearing. 210 * You can add a reference as long as you own a lock sufficient to keep the 211 * bucket from disappearing. For example, a common use is: 212 * a. Have a locked INP, but need to lock the TCPID_BUCKET. 213 * b. Add a refcount on the bucket. (Safe because the INP lock prevents 214 * the TCPID_BUCKET from going away.) 215 * c. Drop the INP lock. 216 * d. Acquire a lock on the TCPID_BUCKET. 217 * e. Acquire a lock on the INP. 218 * f. Drop the refcount on the bucket. 219 * (At this point, the bucket may disappear.) 220 * 221 * Expire queue lock: 222 * You can acquire this with either the bucket or INP lock. Don't reverse it. 223 * When the expire code has committed to freeing a node, it resets the expiry 224 * time to SBT_MAX. That is the signal to everyone else that they should 225 * leave that node alone. 226 */ 227 static struct rwlock tcp_id_tree_lock; 228 #define TCPID_TREE_WLOCK() rw_wlock(&tcp_id_tree_lock) 229 #define TCPID_TREE_RLOCK() rw_rlock(&tcp_id_tree_lock) 230 #define TCPID_TREE_UPGRADE() rw_try_upgrade(&tcp_id_tree_lock) 231 #define TCPID_TREE_WUNLOCK() rw_wunlock(&tcp_id_tree_lock) 232 #define TCPID_TREE_RUNLOCK() rw_runlock(&tcp_id_tree_lock) 233 #define TCPID_TREE_WLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_WLOCKED) 234 #define TCPID_TREE_RLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_RLOCKED) 235 #define TCPID_TREE_UNLOCK_ASSERT() rw_assert(&tcp_id_tree_lock, RA_UNLOCKED) 236 237 #define TCPID_BUCKET_LOCK_INIT(tlb) mtx_init(&((tlb)->tlb_mtx), "tcp log id bucket", NULL, MTX_DEF) 238 #define TCPID_BUCKET_LOCK_DESTROY(tlb) mtx_destroy(&((tlb)->tlb_mtx)) 239 #define TCPID_BUCKET_LOCK(tlb) mtx_lock(&((tlb)->tlb_mtx)) 240 #define TCPID_BUCKET_UNLOCK(tlb) mtx_unlock(&((tlb)->tlb_mtx)) 241 #define TCPID_BUCKET_LOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_OWNED) 242 #define TCPID_BUCKET_UNLOCK_ASSERT(tlb) mtx_assert(&((tlb)->tlb_mtx), MA_NOTOWNED) 243 244 #define TCPID_BUCKET_REF(tlb) refcount_acquire(&((tlb)->tlb_refcnt)) 245 #define TCPID_BUCKET_UNREF(tlb) refcount_release(&((tlb)->tlb_refcnt)) 246 247 #define TCPLOG_EXPIREQ_LOCK() mtx_lock(&tcp_log_expireq_mtx) 248 #define TCPLOG_EXPIREQ_UNLOCK() mtx_unlock(&tcp_log_expireq_mtx) 249 250 SLIST_HEAD(tcp_log_id_head, tcp_log_id_node); 251 252 struct tcp_log_id_bucket 253 { 254 /* 255 * tlb_id must be first. This lets us use strcmp on 256 * (struct tcp_log_id_bucket *) and (char *) interchangeably. 257 */ 258 char tlb_id[TCP_LOG_ID_LEN]; 259 char tlb_tag[TCP_LOG_TAG_LEN]; 260 RB_ENTRY(tcp_log_id_bucket) tlb_rb; 261 struct tcp_log_id_head tlb_head; 262 struct mtx tlb_mtx; 263 volatile u_int tlb_refcnt; 264 volatile u_int tlb_reqcnt; 265 uint32_t tlb_loglimit; 266 uint8_t tlb_logstate; 267 }; 268 269 struct tcp_log_id_node 270 { 271 SLIST_ENTRY(tcp_log_id_node) tln_list; 272 STAILQ_ENTRY(tcp_log_id_node) tln_expireq; /* Locked by the expireq lock */ 273 sbintime_t tln_expiretime; /* Locked by the expireq lock */ 274 275 /* 276 * If INP is NULL, that means the connection has closed. We've 277 * saved the connection endpoint information and the log entries 278 * in the tln_ie and tln_entries members. We've also saved a pointer 279 * to the enclosing bucket here. If INP is not NULL, the information is 280 * in the PCB and not here. 281 */ 282 struct inpcb *tln_inp; 283 struct tcpcb *tln_tp; 284 struct tcp_log_id_bucket *tln_bucket; 285 struct in_endpoints tln_ie; 286 struct tcp_log_stailq tln_entries; 287 int tln_count; 288 volatile int tln_closed; 289 uint8_t tln_af; 290 }; 291 292 enum tree_lock_state { 293 TREE_UNLOCKED = 0, 294 TREE_RLOCKED, 295 TREE_WLOCKED, 296 }; 297 298 /* Do we want to select this session for auto-logging? */ 299 static __inline bool 300 tcp_log_selectauto(void) 301 { 302 303 /* 304 * If we are doing auto-capturing, figure out whether we will capture 305 * this session. 306 */ 307 if (tcp_log_auto_ratio && 308 (tcp_disable_all_bb_logs == 0) && 309 (atomic_fetchadd_long(&tcp_log_auto_ratio_cur, 1) % 310 tcp_log_auto_ratio) == 0) 311 return (true); 312 return (false); 313 } 314 315 static __inline int 316 tcp_log_id_cmp(struct tcp_log_id_bucket *a, struct tcp_log_id_bucket *b) 317 { 318 KASSERT(a != NULL, ("tcp_log_id_cmp: argument a is unexpectedly NULL")); 319 KASSERT(b != NULL, ("tcp_log_id_cmp: argument b is unexpectedly NULL")); 320 return strncmp(a->tlb_id, b->tlb_id, TCP_LOG_ID_LEN); 321 } 322 323 RB_GENERATE_STATIC(tcp_log_id_tree, tcp_log_id_bucket, tlb_rb, tcp_log_id_cmp) 324 325 static __inline void 326 tcp_log_id_validate_tree_lock(int tree_locked) 327 { 328 329 #ifdef INVARIANTS 330 switch (tree_locked) { 331 case TREE_WLOCKED: 332 TCPID_TREE_WLOCK_ASSERT(); 333 break; 334 case TREE_RLOCKED: 335 TCPID_TREE_RLOCK_ASSERT(); 336 break; 337 case TREE_UNLOCKED: 338 TCPID_TREE_UNLOCK_ASSERT(); 339 break; 340 default: 341 kassert_panic("%s:%d: unknown tree lock state", __func__, 342 __LINE__); 343 } 344 #endif 345 } 346 347 static __inline void 348 tcp_log_remove_bucket(struct tcp_log_id_bucket *tlb) 349 { 350 351 TCPID_TREE_WLOCK_ASSERT(); 352 KASSERT(SLIST_EMPTY(&tlb->tlb_head), 353 ("%s: Attempt to remove non-empty bucket", __func__)); 354 if (RB_REMOVE(tcp_log_id_tree, &tcp_log_id_head, tlb) == NULL) { 355 #ifdef INVARIANTS 356 kassert_panic("%s:%d: error removing element from tree", 357 __func__, __LINE__); 358 #endif 359 } 360 TCPID_BUCKET_LOCK_DESTROY(tlb); 361 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); 362 uma_zfree(tcp_log_bucket_zone, tlb); 363 } 364 365 /* 366 * Call with a referenced and locked bucket. 367 * Will return true if the bucket was freed; otherwise, false. 368 * tlb: The bucket to unreference. 369 * tree_locked: A pointer to the state of the tree lock. If the tree lock 370 * state changes, the function will update it. 371 * inp: If not NULL and the function needs to drop the inp lock to relock the 372 * tree, it will do so. (The caller must ensure inp will not become invalid, 373 * probably by holding a reference to it.) 374 */ 375 static bool 376 tcp_log_unref_bucket(struct tcp_log_id_bucket *tlb, int *tree_locked, 377 struct inpcb *inp) 378 { 379 380 KASSERT(tlb != NULL, ("%s: called with NULL tlb", __func__)); 381 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 382 __func__)); 383 384 tcp_log_id_validate_tree_lock(*tree_locked); 385 386 /* 387 * Did we hold the last reference on the tlb? If so, we may need 388 * to free it. (Note that we can realistically only execute the 389 * loop twice: once without a write lock and once with a write 390 * lock.) 391 */ 392 while (TCPID_BUCKET_UNREF(tlb)) { 393 /* 394 * We need a write lock on the tree to free this. 395 * If we can upgrade the tree lock, this is "easy". If we 396 * can't upgrade the tree lock, we need to do this the 397 * "hard" way: unwind all our locks and relock everything. 398 * In the meantime, anything could have changed. We even 399 * need to validate that we still need to free the bucket. 400 */ 401 if (*tree_locked == TREE_RLOCKED && TCPID_TREE_UPGRADE()) 402 *tree_locked = TREE_WLOCKED; 403 else if (*tree_locked != TREE_WLOCKED) { 404 TCPID_BUCKET_REF(tlb); 405 if (inp != NULL) 406 INP_WUNLOCK(inp); 407 TCPID_BUCKET_UNLOCK(tlb); 408 if (*tree_locked == TREE_RLOCKED) 409 TCPID_TREE_RUNLOCK(); 410 TCPID_TREE_WLOCK(); 411 *tree_locked = TREE_WLOCKED; 412 TCPID_BUCKET_LOCK(tlb); 413 if (inp != NULL) 414 INP_WLOCK(inp); 415 continue; 416 } 417 418 /* 419 * We have an empty bucket and a write lock on the tree. 420 * Remove the empty bucket. 421 */ 422 tcp_log_remove_bucket(tlb); 423 return (true); 424 } 425 return (false); 426 } 427 428 /* 429 * Call with a locked bucket. This function will release the lock on the 430 * bucket before returning. 431 * 432 * The caller is responsible for freeing the tp->t_lin/tln node! 433 * 434 * Note: one of tp or both tlb and tln must be supplied. 435 * 436 * inp: A pointer to the inp. If the function needs to drop the inp lock to 437 * acquire the tree write lock, it will do so. (The caller must ensure inp 438 * will not become invalid, probably by holding a reference to it.) 439 * tp: A pointer to the tcpcb. (optional; if specified, tlb and tln are ignored) 440 * tlb: A pointer to the bucket. (optional; ignored if tp is specified) 441 * tln: A pointer to the node. (optional; ignored if tp is specified) 442 * tree_locked: A pointer to the state of the tree lock. If the tree lock 443 * state changes, the function will update it. 444 * 445 * Will return true if the INP lock was reacquired; otherwise, false. 446 */ 447 static bool 448 tcp_log_remove_id_node(struct inpcb *inp, struct tcpcb *tp, 449 struct tcp_log_id_bucket *tlb, struct tcp_log_id_node *tln, 450 int *tree_locked) 451 { 452 int orig_tree_locked; 453 454 KASSERT(tp != NULL || (tlb != NULL && tln != NULL), 455 ("%s: called with tp=%p, tlb=%p, tln=%p", __func__, 456 tp, tlb, tln)); 457 KASSERT(tree_locked != NULL, ("%s: called with NULL tree_locked", 458 __func__)); 459 460 if (tp != NULL) { 461 tlb = tp->t_lib; 462 tln = tp->t_lin; 463 KASSERT(tlb != NULL, ("%s: unexpectedly NULL tlb", __func__)); 464 KASSERT(tln != NULL, ("%s: unexpectedly NULL tln", __func__)); 465 } 466 467 tcp_log_id_validate_tree_lock(*tree_locked); 468 TCPID_BUCKET_LOCK_ASSERT(tlb); 469 470 /* 471 * Remove the node, clear the log bucket and node from the TCPCB, and 472 * decrement the bucket refcount. In the process, if this is the 473 * last reference, the bucket will be freed. 474 */ 475 SLIST_REMOVE(&tlb->tlb_head, tln, tcp_log_id_node, tln_list); 476 if (tp != NULL) { 477 tp->t_lib = NULL; 478 tp->t_lin = NULL; 479 } 480 orig_tree_locked = *tree_locked; 481 if (!tcp_log_unref_bucket(tlb, tree_locked, inp)) 482 TCPID_BUCKET_UNLOCK(tlb); 483 return (*tree_locked != orig_tree_locked); 484 } 485 486 #define RECHECK_INP_CLEAN(cleanup) do { \ 487 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { \ 488 rv = ECONNRESET; \ 489 cleanup; \ 490 goto done; \ 491 } \ 492 tp = intotcpcb(inp); \ 493 } while (0) 494 495 #define RECHECK_INP() RECHECK_INP_CLEAN(/* noop */) 496 497 static void 498 tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp) 499 { 500 501 INP_WLOCK_ASSERT(tp->t_inpcb); 502 503 #ifdef STATS 504 if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL) 505 (void)tcp_stats_sample_rollthedice(tp, tlb_id, strlen(tlb_id)); 506 #endif 507 } 508 509 static void 510 tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb) 511 { 512 513 atomic_fetchadd_int(&tlb->tlb_reqcnt, 1); 514 } 515 516 /* 517 * Associate the specified tag with a particular TCP log ID. 518 * Called with INPCB locked. Returns with it unlocked. 519 * Returns 0 on success or EOPNOTSUPP if the connection has no TCP log ID. 520 */ 521 int 522 tcp_log_set_tag(struct tcpcb *tp, char *tag) 523 { 524 struct tcp_log_id_bucket *tlb; 525 int tree_locked; 526 527 INP_WLOCK_ASSERT(tp->t_inpcb); 528 529 tree_locked = TREE_UNLOCKED; 530 tlb = tp->t_lib; 531 if (tlb == NULL) { 532 INP_WUNLOCK(tp->t_inpcb); 533 return (EOPNOTSUPP); 534 } 535 536 TCPID_BUCKET_REF(tlb); 537 INP_WUNLOCK(tp->t_inpcb); 538 TCPID_BUCKET_LOCK(tlb); 539 strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN); 540 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 541 TCPID_BUCKET_UNLOCK(tlb); 542 543 if (tree_locked == TREE_WLOCKED) { 544 TCPID_TREE_WLOCK_ASSERT(); 545 TCPID_TREE_WUNLOCK(); 546 } else if (tree_locked == TREE_RLOCKED) { 547 TCPID_TREE_RLOCK_ASSERT(); 548 TCPID_TREE_RUNLOCK(); 549 } else 550 TCPID_TREE_UNLOCK_ASSERT(); 551 552 return (0); 553 } 554 555 /* 556 * Set the TCP log ID for a TCPCB. 557 * Called with INPCB locked. Returns with it unlocked. 558 */ 559 int 560 tcp_log_set_id(struct tcpcb *tp, char *id) 561 { 562 struct tcp_log_id_bucket *tlb, *tmp_tlb; 563 struct tcp_log_id_node *tln; 564 struct inpcb *inp; 565 int tree_locked, rv; 566 bool bucket_locked; 567 568 tlb = NULL; 569 tln = NULL; 570 inp = tp->t_inpcb; 571 tree_locked = TREE_UNLOCKED; 572 bucket_locked = false; 573 574 restart: 575 INP_WLOCK_ASSERT(inp); 576 577 /* See if the ID is unchanged. */ 578 if ((tp->t_lib != NULL && !strcmp(tp->t_lib->tlb_id, id)) || 579 (tp->t_lib == NULL && *id == 0)) { 580 if (tp->t_lib != NULL) { 581 tcp_log_increment_reqcnt(tp->t_lib); 582 if ((tp->t_lib->tlb_logstate) && 583 (tp->t_log_state_set == 0)) { 584 /* Clone in any logging */ 585 586 tp->t_logstate = tp->t_lib->tlb_logstate; 587 } 588 if ((tp->t_lib->tlb_loglimit) && 589 (tp->t_log_state_set == 0)) { 590 /* We also have a limit set */ 591 592 tp->t_loglimit = tp->t_lib->tlb_loglimit; 593 } 594 } 595 rv = 0; 596 goto done; 597 } 598 599 /* 600 * If the TCPCB had a previous ID, we need to extricate it from 601 * the previous list. 602 * 603 * Drop the TCPCB lock and lock the tree and the bucket. 604 * Because this is called in the socket context, we (theoretically) 605 * don't need to worry about the INPCB completely going away 606 * while we are gone. 607 */ 608 if (tp->t_lib != NULL) { 609 tlb = tp->t_lib; 610 TCPID_BUCKET_REF(tlb); 611 INP_WUNLOCK(inp); 612 613 if (tree_locked == TREE_UNLOCKED) { 614 TCPID_TREE_RLOCK(); 615 tree_locked = TREE_RLOCKED; 616 } 617 TCPID_BUCKET_LOCK(tlb); 618 bucket_locked = true; 619 INP_WLOCK(inp); 620 621 /* 622 * Unreference the bucket. If our bucket went away, it is no 623 * longer locked or valid. 624 */ 625 if (tcp_log_unref_bucket(tlb, &tree_locked, inp)) { 626 bucket_locked = false; 627 tlb = NULL; 628 } 629 630 /* Validate the INP. */ 631 RECHECK_INP(); 632 633 /* 634 * Evaluate whether the bucket changed while we were unlocked. 635 * 636 * Possible scenarios here: 637 * 1. Bucket is unchanged and the same one we started with. 638 * 2. The TCPCB no longer has a bucket and our bucket was 639 * freed. 640 * 3. The TCPCB has a new bucket, whether ours was freed. 641 * 4. The TCPCB no longer has a bucket and our bucket was 642 * not freed. 643 * 644 * In cases 2-4, we will start over. In case 1, we will 645 * proceed here to remove the bucket. 646 */ 647 if (tlb == NULL || tp->t_lib != tlb) { 648 KASSERT(bucket_locked || tlb == NULL, 649 ("%s: bucket_locked (%d) and tlb (%p) are " 650 "inconsistent", __func__, bucket_locked, tlb)); 651 652 if (bucket_locked) { 653 TCPID_BUCKET_UNLOCK(tlb); 654 bucket_locked = false; 655 tlb = NULL; 656 } 657 goto restart; 658 } 659 660 /* 661 * Store the (struct tcp_log_id_node) for reuse. Then, remove 662 * it from the bucket. In the process, we may end up relocking. 663 * If so, we need to validate that the INP is still valid, and 664 * the TCPCB entries match we expect. 665 * 666 * We will clear tlb and change the bucket_locked state just 667 * before calling tcp_log_remove_id_node(), since that function 668 * will unlock the bucket. 669 */ 670 if (tln != NULL) 671 uma_zfree(tcp_log_node_zone, tln); 672 tln = tp->t_lin; 673 tlb = NULL; 674 bucket_locked = false; 675 if (tcp_log_remove_id_node(inp, tp, NULL, NULL, &tree_locked)) { 676 RECHECK_INP(); 677 678 /* 679 * If the TCPCB moved to a new bucket while we had 680 * dropped the lock, restart. 681 */ 682 if (tp->t_lib != NULL || tp->t_lin != NULL) 683 goto restart; 684 } 685 686 /* 687 * Yay! We successfully removed the TCPCB from its old 688 * bucket. Phew! 689 * 690 * On to bigger and better things... 691 */ 692 } 693 694 /* At this point, the TCPCB should not be in any bucket. */ 695 KASSERT(tp->t_lib == NULL, ("%s: tp->t_lib is not NULL", __func__)); 696 697 /* 698 * If the new ID is not empty, we need to now assign this TCPCB to a 699 * new bucket. 700 */ 701 if (*id) { 702 /* Get a new tln, if we don't already have one to reuse. */ 703 if (tln == NULL) { 704 tln = uma_zalloc(tcp_log_node_zone, M_NOWAIT | M_ZERO); 705 if (tln == NULL) { 706 rv = ENOBUFS; 707 goto done; 708 } 709 tln->tln_inp = inp; 710 tln->tln_tp = tp; 711 } 712 713 /* 714 * Drop the INP lock for a bit. We don't need it, and dropping 715 * it prevents lock order reversals. 716 */ 717 INP_WUNLOCK(inp); 718 719 /* Make sure we have at least a read lock on the tree. */ 720 tcp_log_id_validate_tree_lock(tree_locked); 721 if (tree_locked == TREE_UNLOCKED) { 722 TCPID_TREE_RLOCK(); 723 tree_locked = TREE_RLOCKED; 724 } 725 726 refind: 727 /* 728 * Remember that we constructed (struct tcp_log_id_node) so 729 * we can safely cast the id to it for the purposes of finding. 730 */ 731 KASSERT(tlb == NULL, ("%s:%d tlb unexpectedly non-NULL", 732 __func__, __LINE__)); 733 tmp_tlb = RB_FIND(tcp_log_id_tree, &tcp_log_id_head, 734 (struct tcp_log_id_bucket *) id); 735 736 /* 737 * If we didn't find a matching bucket, we need to add a new 738 * one. This requires a write lock. But, of course, we will 739 * need to recheck some things when we re-acquire the lock. 740 */ 741 if (tmp_tlb == NULL && tree_locked != TREE_WLOCKED) { 742 tree_locked = TREE_WLOCKED; 743 if (!TCPID_TREE_UPGRADE()) { 744 TCPID_TREE_RUNLOCK(); 745 TCPID_TREE_WLOCK(); 746 747 /* 748 * The tree may have changed while we were 749 * unlocked. 750 */ 751 goto refind; 752 } 753 } 754 755 /* If we need to add a new bucket, do it now. */ 756 if (tmp_tlb == NULL) { 757 /* Allocate new bucket. */ 758 tlb = uma_zalloc(tcp_log_bucket_zone, M_NOWAIT); 759 if (tlb == NULL) { 760 rv = ENOBUFS; 761 goto done_noinp; 762 } 763 counter_u64_add(tcp_log_pcb_ids_cur, 1); 764 counter_u64_add(tcp_log_pcb_ids_tot, 1); 765 766 if ((tcp_log_auto_all == false) && 767 tcp_log_auto_mode && 768 tcp_log_selectauto()) { 769 /* Save off the log state */ 770 tlb->tlb_logstate = tcp_log_auto_mode; 771 } else 772 tlb->tlb_logstate = TCP_LOG_STATE_OFF; 773 tlb->tlb_loglimit = 0; 774 tlb->tlb_tag[0] = '\0'; /* Default to an empty tag. */ 775 776 /* 777 * Copy the ID to the bucket. 778 * NB: Don't use strlcpy() unless you are sure 779 * we've always validated NULL termination. 780 * 781 * TODO: When I'm done writing this, see if we 782 * we have correctly validated NULL termination and 783 * can use strlcpy(). :-) 784 */ 785 strncpy(tlb->tlb_id, id, TCP_LOG_ID_LEN - 1); 786 tlb->tlb_id[TCP_LOG_ID_LEN - 1] = '\0'; 787 788 /* 789 * Take the refcount for the first node and go ahead 790 * and lock this. Note that we zero the tlb_mtx 791 * structure, since 0xdeadc0de flips the right bits 792 * for the code to think that this mutex has already 793 * been initialized. :-( 794 */ 795 SLIST_INIT(&tlb->tlb_head); 796 refcount_init(&tlb->tlb_refcnt, 1); 797 tlb->tlb_reqcnt = 1; 798 memset(&tlb->tlb_mtx, 0, sizeof(struct mtx)); 799 TCPID_BUCKET_LOCK_INIT(tlb); 800 TCPID_BUCKET_LOCK(tlb); 801 bucket_locked = true; 802 803 #define FREE_NEW_TLB() do { \ 804 TCPID_BUCKET_LOCK_DESTROY(tlb); \ 805 uma_zfree(tcp_log_bucket_zone, tlb); \ 806 counter_u64_add(tcp_log_pcb_ids_cur, (int64_t)-1); \ 807 counter_u64_add(tcp_log_pcb_ids_tot, (int64_t)-1); \ 808 bucket_locked = false; \ 809 tlb = NULL; \ 810 } while (0) 811 /* 812 * Relock the INP and make sure we are still 813 * unassigned. 814 */ 815 INP_WLOCK(inp); 816 RECHECK_INP_CLEAN(FREE_NEW_TLB()); 817 if (tp->t_lib != NULL) { 818 FREE_NEW_TLB(); 819 goto restart; 820 } 821 822 /* Add the new bucket to the tree. */ 823 tmp_tlb = RB_INSERT(tcp_log_id_tree, &tcp_log_id_head, 824 tlb); 825 KASSERT(tmp_tlb == NULL, 826 ("%s: Unexpected conflicting bucket (%p) while " 827 "adding new bucket (%p)", __func__, tmp_tlb, tlb)); 828 829 /* 830 * If we found a conflicting bucket, free the new 831 * one we made and fall through to use the existing 832 * bucket. 833 */ 834 if (tmp_tlb != NULL) { 835 FREE_NEW_TLB(); 836 INP_WUNLOCK(inp); 837 } 838 #undef FREE_NEW_TLB 839 } 840 841 /* If we found an existing bucket, use it. */ 842 if (tmp_tlb != NULL) { 843 tlb = tmp_tlb; 844 TCPID_BUCKET_LOCK(tlb); 845 bucket_locked = true; 846 847 /* 848 * Relock the INP and make sure we are still 849 * unassigned. 850 */ 851 INP_UNLOCK_ASSERT(inp); 852 INP_WLOCK(inp); 853 RECHECK_INP(); 854 if (tp->t_lib != NULL) { 855 TCPID_BUCKET_UNLOCK(tlb); 856 bucket_locked = false; 857 tlb = NULL; 858 goto restart; 859 } 860 861 /* Take a reference on the bucket. */ 862 TCPID_BUCKET_REF(tlb); 863 864 /* Record the request. */ 865 tcp_log_increment_reqcnt(tlb); 866 } 867 868 tcp_log_grow_tlb(tlb->tlb_id, tp); 869 870 /* Add the new node to the list. */ 871 SLIST_INSERT_HEAD(&tlb->tlb_head, tln, tln_list); 872 tp->t_lib = tlb; 873 tp->t_lin = tln; 874 if (tp->t_lib->tlb_logstate) { 875 /* Clone in any logging */ 876 877 tp->t_logstate = tp->t_lib->tlb_logstate; 878 } 879 if (tp->t_lib->tlb_loglimit) { 880 /* The loglimit too */ 881 882 tp->t_loglimit = tp->t_lib->tlb_loglimit; 883 } 884 tln = NULL; 885 } 886 887 rv = 0; 888 889 done: 890 /* Unlock things, as needed, and return. */ 891 INP_WUNLOCK(inp); 892 done_noinp: 893 INP_UNLOCK_ASSERT(inp); 894 if (bucket_locked) { 895 TCPID_BUCKET_LOCK_ASSERT(tlb); 896 TCPID_BUCKET_UNLOCK(tlb); 897 } else if (tlb != NULL) 898 TCPID_BUCKET_UNLOCK_ASSERT(tlb); 899 if (tree_locked == TREE_WLOCKED) { 900 TCPID_TREE_WLOCK_ASSERT(); 901 TCPID_TREE_WUNLOCK(); 902 } else if (tree_locked == TREE_RLOCKED) { 903 TCPID_TREE_RLOCK_ASSERT(); 904 TCPID_TREE_RUNLOCK(); 905 } else 906 TCPID_TREE_UNLOCK_ASSERT(); 907 if (tln != NULL) 908 uma_zfree(tcp_log_node_zone, tln); 909 return (rv); 910 } 911 912 /* 913 * Get the TCP log ID for a TCPCB. 914 * Called with INPCB locked. 915 * 'buf' must point to a buffer that is at least TCP_LOG_ID_LEN bytes long. 916 * Returns number of bytes copied. 917 */ 918 size_t 919 tcp_log_get_id(struct tcpcb *tp, char *buf) 920 { 921 size_t len; 922 923 INP_LOCK_ASSERT(tp->t_inpcb); 924 if (tp->t_lib != NULL) { 925 len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 926 KASSERT(len < TCP_LOG_ID_LEN, 927 ("%s:%d: tp->t_lib->tlb_id too long (%zu)", 928 __func__, __LINE__, len)); 929 } else { 930 *buf = '\0'; 931 len = 0; 932 } 933 return (len); 934 } 935 936 /* 937 * Get the tag associated with the TCPCB's log ID. 938 * Called with INPCB locked. Returns with it unlocked. 939 * 'buf' must point to a buffer that is at least TCP_LOG_TAG_LEN bytes long. 940 * Returns number of bytes copied. 941 */ 942 size_t 943 tcp_log_get_tag(struct tcpcb *tp, char *buf) 944 { 945 struct tcp_log_id_bucket *tlb; 946 size_t len; 947 int tree_locked; 948 949 INP_WLOCK_ASSERT(tp->t_inpcb); 950 951 tree_locked = TREE_UNLOCKED; 952 tlb = tp->t_lib; 953 954 if (tlb != NULL) { 955 TCPID_BUCKET_REF(tlb); 956 INP_WUNLOCK(tp->t_inpcb); 957 TCPID_BUCKET_LOCK(tlb); 958 len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN); 959 KASSERT(len < TCP_LOG_TAG_LEN, 960 ("%s:%d: tp->t_lib->tlb_tag too long (%zu)", 961 __func__, __LINE__, len)); 962 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 963 TCPID_BUCKET_UNLOCK(tlb); 964 965 if (tree_locked == TREE_WLOCKED) { 966 TCPID_TREE_WLOCK_ASSERT(); 967 TCPID_TREE_WUNLOCK(); 968 } else if (tree_locked == TREE_RLOCKED) { 969 TCPID_TREE_RLOCK_ASSERT(); 970 TCPID_TREE_RUNLOCK(); 971 } else 972 TCPID_TREE_UNLOCK_ASSERT(); 973 } else { 974 INP_WUNLOCK(tp->t_inpcb); 975 *buf = '\0'; 976 len = 0; 977 } 978 979 return (len); 980 } 981 982 /* 983 * Get number of connections with the same log ID. 984 * Log ID is taken from given TCPCB. 985 * Called with INPCB locked. 986 */ 987 u_int 988 tcp_log_get_id_cnt(struct tcpcb *tp) 989 { 990 991 INP_WLOCK_ASSERT(tp->t_inpcb); 992 return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt); 993 } 994 995 #ifdef TCPLOG_DEBUG_RINGBUF 996 /* 997 * Functions/macros to increment/decrement reference count for a log 998 * entry. This should catch when we do a double-free/double-remove or 999 * a double-add. 1000 */ 1001 static inline void 1002 _tcp_log_entry_refcnt_add(struct tcp_log_mem *log_entry, const char *func, 1003 int line) 1004 { 1005 int refcnt; 1006 1007 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, 1); 1008 if (refcnt != 0) 1009 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 0)", 1010 func, line, log_entry, refcnt); 1011 } 1012 #define tcp_log_entry_refcnt_add(l) \ 1013 _tcp_log_entry_refcnt_add((l), __func__, __LINE__) 1014 1015 static inline void 1016 _tcp_log_entry_refcnt_rem(struct tcp_log_mem *log_entry, const char *func, 1017 int line) 1018 { 1019 int refcnt; 1020 1021 refcnt = atomic_fetchadd_int(&log_entry->tlm_refcnt, -1); 1022 if (refcnt != 1) 1023 panic("%s:%d: log_entry(%p)->tlm_refcnt is %d (expected 1)", 1024 func, line, log_entry, refcnt); 1025 } 1026 #define tcp_log_entry_refcnt_rem(l) \ 1027 _tcp_log_entry_refcnt_rem((l), __func__, __LINE__) 1028 1029 #else /* !TCPLOG_DEBUG_RINGBUF */ 1030 1031 #define tcp_log_entry_refcnt_add(l) 1032 #define tcp_log_entry_refcnt_rem(l) 1033 1034 #endif 1035 1036 /* 1037 * Cleanup after removing a log entry, but only decrement the count if we 1038 * are running INVARIANTS. 1039 */ 1040 static inline void 1041 tcp_log_free_log_common(struct tcp_log_mem *log_entry, int *count __unused) 1042 { 1043 1044 uma_zfree(tcp_log_zone, log_entry); 1045 #ifdef INVARIANTS 1046 (*count)--; 1047 KASSERT(*count >= 0, 1048 ("%s: count unexpectedly negative", __func__)); 1049 #endif 1050 } 1051 1052 static void 1053 tcp_log_free_entries(struct tcp_log_stailq *head, int *count) 1054 { 1055 struct tcp_log_mem *log_entry; 1056 1057 /* Free the entries. */ 1058 while ((log_entry = STAILQ_FIRST(head)) != NULL) { 1059 STAILQ_REMOVE_HEAD(head, tlm_queue); 1060 tcp_log_entry_refcnt_rem(log_entry); 1061 tcp_log_free_log_common(log_entry, count); 1062 } 1063 } 1064 1065 /* Cleanup after removing a log entry. */ 1066 static inline void 1067 tcp_log_remove_log_cleanup(struct tcpcb *tp, struct tcp_log_mem *log_entry) 1068 { 1069 uma_zfree(tcp_log_zone, log_entry); 1070 tp->t_lognum--; 1071 KASSERT(tp->t_lognum >= 0, 1072 ("%s: tp->t_lognum unexpectedly negative", __func__)); 1073 } 1074 1075 /* Remove a log entry from the head of a list. */ 1076 static inline void 1077 tcp_log_remove_log_head(struct tcpcb *tp, struct tcp_log_mem *log_entry) 1078 { 1079 1080 KASSERT(log_entry == STAILQ_FIRST(&tp->t_logs), 1081 ("%s: attempt to remove non-HEAD log entry", __func__)); 1082 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 1083 tcp_log_entry_refcnt_rem(log_entry); 1084 tcp_log_remove_log_cleanup(tp, log_entry); 1085 } 1086 1087 #ifdef TCPLOG_DEBUG_RINGBUF 1088 /* 1089 * Initialize the log entry's reference count, which we want to 1090 * survive allocations. 1091 */ 1092 static int 1093 tcp_log_zone_init(void *mem, int size, int flags __unused) 1094 { 1095 struct tcp_log_mem *tlm; 1096 1097 KASSERT(size >= sizeof(struct tcp_log_mem), 1098 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1099 tlm = (struct tcp_log_mem *)mem; 1100 tlm->tlm_refcnt = 0; 1101 return (0); 1102 } 1103 1104 /* 1105 * Double check that the refcnt is zero on allocation and return. 1106 */ 1107 static int 1108 tcp_log_zone_ctor(void *mem, int size, void *args __unused, int flags __unused) 1109 { 1110 struct tcp_log_mem *tlm; 1111 1112 KASSERT(size >= sizeof(struct tcp_log_mem), 1113 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1114 tlm = (struct tcp_log_mem *)mem; 1115 if (tlm->tlm_refcnt != 0) 1116 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 1117 __func__, __LINE__, tlm, tlm->tlm_refcnt); 1118 return (0); 1119 } 1120 1121 static void 1122 tcp_log_zone_dtor(void *mem, int size, void *args __unused) 1123 { 1124 struct tcp_log_mem *tlm; 1125 1126 KASSERT(size >= sizeof(struct tcp_log_mem), 1127 ("%s: unexpectedly short (%d) allocation", __func__, size)); 1128 tlm = (struct tcp_log_mem *)mem; 1129 if (tlm->tlm_refcnt != 0) 1130 panic("%s:%d: tlm(%p)->tlm_refcnt is %d (expected 0)", 1131 __func__, __LINE__, tlm, tlm->tlm_refcnt); 1132 } 1133 #endif /* TCPLOG_DEBUG_RINGBUF */ 1134 1135 /* Do global initialization. */ 1136 void 1137 tcp_log_init(void) 1138 { 1139 1140 tcp_log_zone = uma_zcreate("tcp_log", sizeof(struct tcp_log_mem), 1141 #ifdef TCPLOG_DEBUG_RINGBUF 1142 tcp_log_zone_ctor, tcp_log_zone_dtor, tcp_log_zone_init, 1143 #else 1144 NULL, NULL, NULL, 1145 #endif 1146 NULL, UMA_ALIGN_PTR, 0); 1147 (void)uma_zone_set_max(tcp_log_zone, TCP_LOG_BUF_DEFAULT_GLOBAL_LIMIT); 1148 tcp_log_bucket_zone = uma_zcreate("tcp_log_bucket", 1149 sizeof(struct tcp_log_id_bucket), NULL, NULL, NULL, NULL, 1150 UMA_ALIGN_PTR, 0); 1151 tcp_log_node_zone = uma_zcreate("tcp_log_node", 1152 sizeof(struct tcp_log_id_node), NULL, NULL, NULL, NULL, 1153 UMA_ALIGN_PTR, 0); 1154 #ifdef TCPLOG_DEBUG_COUNTERS 1155 tcp_log_queued = counter_u64_alloc(M_WAITOK); 1156 tcp_log_que_fail1 = counter_u64_alloc(M_WAITOK); 1157 tcp_log_que_fail2 = counter_u64_alloc(M_WAITOK); 1158 tcp_log_que_fail3 = counter_u64_alloc(M_WAITOK); 1159 tcp_log_que_fail4 = counter_u64_alloc(M_WAITOK); 1160 tcp_log_que_fail5 = counter_u64_alloc(M_WAITOK); 1161 tcp_log_que_copyout = counter_u64_alloc(M_WAITOK); 1162 tcp_log_que_read = counter_u64_alloc(M_WAITOK); 1163 tcp_log_que_freed = counter_u64_alloc(M_WAITOK); 1164 #endif 1165 tcp_log_pcb_ids_cur = counter_u64_alloc(M_WAITOK); 1166 tcp_log_pcb_ids_tot = counter_u64_alloc(M_WAITOK); 1167 1168 rw_init_flags(&tcp_id_tree_lock, "TCP ID tree", RW_NEW); 1169 mtx_init(&tcp_log_expireq_mtx, "TCP log expireq", NULL, MTX_DEF); 1170 callout_init(&tcp_log_expireq_callout, 1); 1171 } 1172 1173 /* Do per-TCPCB initialization. */ 1174 void 1175 tcp_log_tcpcbinit(struct tcpcb *tp) 1176 { 1177 1178 /* A new TCPCB should start out zero-initialized. */ 1179 STAILQ_INIT(&tp->t_logs); 1180 1181 /* 1182 * If we are doing auto-capturing, figure out whether we will capture 1183 * this session. 1184 */ 1185 tp->t_loglimit = tcp_log_session_limit; 1186 if ((tcp_log_auto_all == true) && 1187 tcp_log_auto_mode && 1188 tcp_log_selectauto()) { 1189 tp->t_logstate = tcp_log_auto_mode; 1190 tp->t_flags2 |= TF2_LOG_AUTO; 1191 } 1192 } 1193 1194 1195 /* Remove entries */ 1196 static void 1197 tcp_log_expire(void *unused __unused) 1198 { 1199 struct tcp_log_id_bucket *tlb; 1200 struct tcp_log_id_node *tln; 1201 sbintime_t expiry_limit; 1202 int tree_locked; 1203 1204 TCPLOG_EXPIREQ_LOCK(); 1205 if (callout_pending(&tcp_log_expireq_callout)) { 1206 /* Callout was reset. */ 1207 TCPLOG_EXPIREQ_UNLOCK(); 1208 return; 1209 } 1210 1211 /* 1212 * Process entries until we reach one that expires too far in the 1213 * future. Look one second in the future. 1214 */ 1215 expiry_limit = getsbinuptime() + SBT_1S; 1216 tree_locked = TREE_UNLOCKED; 1217 1218 while ((tln = STAILQ_FIRST(&tcp_log_expireq_head)) != NULL && 1219 tln->tln_expiretime <= expiry_limit) { 1220 if (!callout_active(&tcp_log_expireq_callout)) { 1221 /* 1222 * Callout was stopped. I guess we should 1223 * just quit at this point. 1224 */ 1225 TCPLOG_EXPIREQ_UNLOCK(); 1226 return; 1227 } 1228 1229 /* 1230 * Remove the node from the head of the list and unlock 1231 * the list. Change the expiry time to SBT_MAX as a signal 1232 * to other threads that we now own this. 1233 */ 1234 STAILQ_REMOVE_HEAD(&tcp_log_expireq_head, tln_expireq); 1235 tln->tln_expiretime = SBT_MAX; 1236 TCPLOG_EXPIREQ_UNLOCK(); 1237 1238 /* 1239 * Remove the node from the bucket. 1240 */ 1241 tlb = tln->tln_bucket; 1242 TCPID_BUCKET_LOCK(tlb); 1243 if (tcp_log_remove_id_node(NULL, NULL, tlb, tln, &tree_locked)) { 1244 tcp_log_id_validate_tree_lock(tree_locked); 1245 if (tree_locked == TREE_WLOCKED) 1246 TCPID_TREE_WUNLOCK(); 1247 else 1248 TCPID_TREE_RUNLOCK(); 1249 tree_locked = TREE_UNLOCKED; 1250 } 1251 1252 /* Drop the INP reference. */ 1253 INP_WLOCK(tln->tln_inp); 1254 if (!in_pcbrele_wlocked(tln->tln_inp)) 1255 INP_WUNLOCK(tln->tln_inp); 1256 1257 /* Free the log records. */ 1258 tcp_log_free_entries(&tln->tln_entries, &tln->tln_count); 1259 1260 /* Free the node. */ 1261 uma_zfree(tcp_log_node_zone, tln); 1262 1263 /* Relock the expiry queue. */ 1264 TCPLOG_EXPIREQ_LOCK(); 1265 } 1266 1267 /* 1268 * We've expired all the entries we can. Do we need to reschedule 1269 * ourselves? 1270 */ 1271 callout_deactivate(&tcp_log_expireq_callout); 1272 if (tln != NULL) { 1273 /* 1274 * Get max(now + TCP_LOG_EXPIRE_INTVL, tln->tln_expiretime) and 1275 * set the next callout to that. (This helps ensure we generally 1276 * run the callout no more often than desired.) 1277 */ 1278 expiry_limit = getsbinuptime() + TCP_LOG_EXPIRE_INTVL; 1279 if (expiry_limit < tln->tln_expiretime) 1280 expiry_limit = tln->tln_expiretime; 1281 callout_reset_sbt(&tcp_log_expireq_callout, expiry_limit, 1282 SBT_1S, tcp_log_expire, NULL, C_ABSOLUTE); 1283 } 1284 1285 /* We're done. */ 1286 TCPLOG_EXPIREQ_UNLOCK(); 1287 return; 1288 } 1289 1290 /* 1291 * Move log data from the TCPCB to a new node. This will reset the TCPCB log 1292 * entries and log count; however, it will not touch other things from the 1293 * TCPCB (e.g. t_lin, t_lib). 1294 * 1295 * NOTE: Must hold a lock on the INP. 1296 */ 1297 static void 1298 tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln) 1299 { 1300 1301 INP_WLOCK_ASSERT(tp->t_inpcb); 1302 1303 tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie; 1304 if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6) 1305 tln->tln_af = AF_INET6; 1306 else 1307 tln->tln_af = AF_INET; 1308 tln->tln_entries = tp->t_logs; 1309 tln->tln_count = tp->t_lognum; 1310 tln->tln_bucket = tp->t_lib; 1311 1312 /* Clear information from the PCB. */ 1313 STAILQ_INIT(&tp->t_logs); 1314 tp->t_lognum = 0; 1315 } 1316 1317 /* Do per-TCPCB cleanup */ 1318 void 1319 tcp_log_tcpcbfini(struct tcpcb *tp) 1320 { 1321 struct tcp_log_id_node *tln, *tln_first; 1322 struct tcp_log_mem *log_entry; 1323 sbintime_t callouttime; 1324 1325 INP_WLOCK_ASSERT(tp->t_inpcb); 1326 1327 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false); 1328 1329 /* 1330 * If we were gathering packets to be automatically dumped, try to do 1331 * it now. If this succeeds, the log information in the TCPCB will be 1332 * cleared. Otherwise, we'll handle the log information as we do 1333 * for other states. 1334 */ 1335 switch(tp->t_logstate) { 1336 case TCP_LOG_STATE_HEAD_AUTO: 1337 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 1338 M_NOWAIT, false); 1339 break; 1340 case TCP_LOG_STATE_TAIL_AUTO: 1341 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from tail", 1342 M_NOWAIT, false); 1343 break; 1344 case TCP_LOG_STATE_CONTINUAL: 1345 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1346 M_NOWAIT, false); 1347 break; 1348 } 1349 1350 /* 1351 * There are two ways we could keep logs: per-socket or per-ID. If 1352 * we are tracking logs with an ID, then the logs survive the 1353 * destruction of the TCPCB. 1354 * 1355 * If the TCPCB is associated with an ID node, move the logs from the 1356 * TCPCB to the ID node. In theory, this is safe, for reasons which I 1357 * will now explain for my own benefit when I next need to figure out 1358 * this code. :-) 1359 * 1360 * We own the INP lock. Therefore, no one else can change the contents 1361 * of this node (Rule C). Further, no one can remove this node from 1362 * the bucket while we hold the lock (Rule D). Basically, no one can 1363 * mess with this node. That leaves two states in which we could be: 1364 * 1365 * 1. Another thread is currently waiting to acquire the INP lock, with 1366 * plans to do something with this node. When we drop the INP lock, 1367 * they will have a chance to do that. They will recheck the 1368 * tln_closed field (see note to Rule C) and then acquire the 1369 * bucket lock before proceeding further. 1370 * 1371 * 2. Another thread will try to acquire a lock at some point in the 1372 * future. If they try to acquire a lock before we set the 1373 * tln_closed field, they will follow state #1. If they try to 1374 * acquire a lock after we set the tln_closed field, they will be 1375 * able to make changes to the node, at will, following Rule C. 1376 * 1377 * Therefore, we currently own this node and can make any changes 1378 * we want. But, as soon as we set the tln_closed field to true, we 1379 * have effectively dropped our lock on the node. (For this reason, we 1380 * also need to make sure our writes are ordered correctly. An atomic 1381 * operation with "release" semantics should be sufficient.) 1382 */ 1383 1384 if (tp->t_lin != NULL) { 1385 /* Copy the relevant information to the log entry. */ 1386 tln = tp->t_lin; 1387 KASSERT(tln->tln_inp == tp->t_inpcb, 1388 ("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)", 1389 __func__, tln->tln_inp, tp->t_inpcb)); 1390 tcp_log_move_tp_to_node(tp, tln); 1391 1392 /* Clear information from the PCB. */ 1393 tp->t_lin = NULL; 1394 tp->t_lib = NULL; 1395 1396 /* 1397 * Take a reference on the INP. This ensures that the INP 1398 * remains valid while the node is on the expiry queue. This 1399 * ensures the INP is valid for other threads that may be 1400 * racing to lock this node when we move it to the expire 1401 * queue. 1402 */ 1403 in_pcbref(tp->t_inpcb); 1404 1405 /* 1406 * Store the entry on the expiry list. The exact behavior 1407 * depends on whether we have entries to keep. If so, we 1408 * put the entry at the tail of the list and expire in 1409 * TCP_LOG_EXPIRE_TIME. Otherwise, we expire "now" and put 1410 * the entry at the head of the list. (Handling the cleanup 1411 * via the expiry timer lets us avoid locking messy-ness here.) 1412 */ 1413 tln->tln_expiretime = getsbinuptime(); 1414 TCPLOG_EXPIREQ_LOCK(); 1415 if (tln->tln_count) { 1416 tln->tln_expiretime += TCP_LOG_EXPIRE_TIME; 1417 if (STAILQ_EMPTY(&tcp_log_expireq_head) && 1418 !callout_active(&tcp_log_expireq_callout)) { 1419 /* 1420 * We are adding the first entry and a callout 1421 * is not currently scheduled; therefore, we 1422 * need to schedule one. 1423 */ 1424 callout_reset_sbt(&tcp_log_expireq_callout, 1425 tln->tln_expiretime, SBT_1S, tcp_log_expire, 1426 NULL, C_ABSOLUTE); 1427 } 1428 STAILQ_INSERT_TAIL(&tcp_log_expireq_head, tln, 1429 tln_expireq); 1430 } else { 1431 callouttime = tln->tln_expiretime + 1432 TCP_LOG_EXPIRE_INTVL; 1433 tln_first = STAILQ_FIRST(&tcp_log_expireq_head); 1434 1435 if ((tln_first == NULL || 1436 callouttime < tln_first->tln_expiretime) && 1437 (callout_pending(&tcp_log_expireq_callout) || 1438 !callout_active(&tcp_log_expireq_callout))) { 1439 /* 1440 * The list is empty, or we want to run the 1441 * expire code before the first entry's timer 1442 * fires. Also, we are in a case where a callout 1443 * is not actively running. We want to reset 1444 * the callout to occur sooner. 1445 */ 1446 callout_reset_sbt(&tcp_log_expireq_callout, 1447 callouttime, SBT_1S, tcp_log_expire, NULL, 1448 C_ABSOLUTE); 1449 } 1450 1451 /* 1452 * Insert to the head, or just after the head, as 1453 * appropriate. (This might result in small 1454 * mis-orderings as a bunch of "expire now" entries 1455 * gather at the start of the list, but that should 1456 * not produce big problems, since the expire timer 1457 * will walk through all of them.) 1458 */ 1459 if (tln_first == NULL || 1460 tln->tln_expiretime < tln_first->tln_expiretime) 1461 STAILQ_INSERT_HEAD(&tcp_log_expireq_head, tln, 1462 tln_expireq); 1463 else 1464 STAILQ_INSERT_AFTER(&tcp_log_expireq_head, 1465 tln_first, tln, tln_expireq); 1466 } 1467 TCPLOG_EXPIREQ_UNLOCK(); 1468 1469 /* 1470 * We are done messing with the tln. After this point, we 1471 * can't touch it. (Note that the "release" semantics should 1472 * be included with the TCPLOG_EXPIREQ_UNLOCK() call above. 1473 * Therefore, they should be unnecessary here. However, it 1474 * seems like a good idea to include them anyway, since we 1475 * really are releasing a lock here.) 1476 */ 1477 atomic_store_rel_int(&tln->tln_closed, 1); 1478 } else { 1479 /* Remove log entries. */ 1480 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1481 tcp_log_remove_log_head(tp, log_entry); 1482 KASSERT(tp->t_lognum == 0, 1483 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1484 __func__, tp->t_lognum)); 1485 } 1486 1487 /* 1488 * Change the log state to off (just in case anything tries to sneak 1489 * in a last-minute log). 1490 */ 1491 tp->t_logstate = TCP_LOG_STATE_OFF; 1492 } 1493 1494 static void 1495 tcp_log_purge_tp_logbuf(struct tcpcb *tp) 1496 { 1497 struct tcp_log_mem *log_entry; 1498 struct inpcb *inp; 1499 1500 inp = tp->t_inpcb; 1501 INP_WLOCK_ASSERT(inp); 1502 if (tp->t_lognum == 0) 1503 return; 1504 1505 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1506 tcp_log_remove_log_head(tp, log_entry); 1507 KASSERT(tp->t_lognum == 0, 1508 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 1509 __func__, tp->t_lognum)); 1510 tp->t_logstate = TCP_LOG_STATE_OFF; 1511 } 1512 1513 /* 1514 * This logs an event for a TCP socket. Normally, this is called via 1515 * TCP_LOG_EVENT or TCP_LOG_EVENT_VERBOSE. See the documentation for 1516 * TCP_LOG_EVENT(). 1517 */ 1518 1519 struct tcp_log_buffer * 1520 tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf, 1521 struct sockbuf *txbuf, uint8_t eventid, int errornum, uint32_t len, 1522 union tcp_log_stackspecific *stackinfo, int th_hostorder, 1523 const char *output_caller, const char *func, int line, const struct timeval *itv) 1524 { 1525 struct tcp_log_mem *log_entry; 1526 struct tcp_log_buffer *log_buf; 1527 int attempt_count = 0; 1528 struct tcp_log_verbose *log_verbose; 1529 uint32_t logsn; 1530 1531 KASSERT((func == NULL && line == 0) || (func != NULL && line > 0), 1532 ("%s called with inconsistent func (%p) and line (%d) arguments", 1533 __func__, func, line)); 1534 1535 INP_WLOCK_ASSERT(tp->t_inpcb); 1536 if (tcp_disable_all_bb_logs) { 1537 /* 1538 * The global shutdown logging 1539 * switch has been thrown. Call 1540 * the purge function that frees 1541 * purges out the logs and 1542 * turns off logging. 1543 */ 1544 tcp_log_purge_tp_logbuf(tp); 1545 return (NULL); 1546 } 1547 KASSERT(tp->t_logstate == TCP_LOG_STATE_HEAD || 1548 tp->t_logstate == TCP_LOG_STATE_TAIL || 1549 tp->t_logstate == TCP_LOG_STATE_CONTINUAL || 1550 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO || 1551 tp->t_logstate == TCP_LOG_STATE_TAIL_AUTO, 1552 ("%s called with unexpected tp->t_logstate (%d)", __func__, 1553 tp->t_logstate)); 1554 1555 /* 1556 * Get the serial number. We do this early so it will 1557 * increment even if we end up skipping the log entry for some 1558 * reason. 1559 */ 1560 logsn = tp->t_logsn++; 1561 1562 /* 1563 * Can we get a new log entry? If so, increment the lognum counter 1564 * here. 1565 */ 1566 retry: 1567 if (tp->t_lognum < tp->t_loglimit) { 1568 if ((log_entry = uma_zalloc(tcp_log_zone, M_NOWAIT)) != NULL) 1569 tp->t_lognum++; 1570 } else 1571 log_entry = NULL; 1572 1573 /* Do we need to try to reuse? */ 1574 if (log_entry == NULL) { 1575 /* 1576 * Sacrifice auto-logged sessions without a log ID if 1577 * tcp_log_auto_all is false. (If they don't have a log 1578 * ID by now, it is probable that either they won't get one 1579 * or we are resource-constrained.) 1580 */ 1581 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 1582 !tcp_log_auto_all) { 1583 if (tcp_log_state_change(tp, TCP_LOG_STATE_CLEAR)) { 1584 #ifdef INVARIANTS 1585 panic("%s:%d: tcp_log_state_change() failed " 1586 "to set tp %p to TCP_LOG_STATE_CLEAR", 1587 __func__, __LINE__, tp); 1588 #endif 1589 tp->t_logstate = TCP_LOG_STATE_OFF; 1590 } 1591 return (NULL); 1592 } 1593 /* 1594 * If we are in TCP_LOG_STATE_HEAD_AUTO state, try to dump 1595 * the buffers. If successful, deactivate tracing. Otherwise, 1596 * leave it active so we will retry. 1597 */ 1598 if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO && 1599 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from head", 1600 M_NOWAIT, false)) { 1601 tp->t_logstate = TCP_LOG_STATE_OFF; 1602 return(NULL); 1603 } else if ((tp->t_logstate == TCP_LOG_STATE_CONTINUAL) && 1604 !tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1605 M_NOWAIT, false)) { 1606 if (attempt_count == 0) { 1607 attempt_count++; 1608 goto retry; 1609 } 1610 #ifdef TCPLOG_DEBUG_COUNTERS 1611 counter_u64_add(tcp_log_que_fail4, 1); 1612 #endif 1613 return(NULL); 1614 } else if (tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) 1615 return(NULL); 1616 1617 /* If in HEAD state, just deactivate the tracing and return. */ 1618 if (tp->t_logstate == TCP_LOG_STATE_HEAD) { 1619 tp->t_logstate = TCP_LOG_STATE_OFF; 1620 return(NULL); 1621 } 1622 1623 /* 1624 * Get a buffer to reuse. If that fails, just give up. 1625 * (We can't log anything without a buffer in which to 1626 * put it.) 1627 * 1628 * Note that we don't change the t_lognum counter 1629 * here. Because we are re-using the buffer, the total 1630 * number won't change. 1631 */ 1632 if ((log_entry = STAILQ_FIRST(&tp->t_logs)) == NULL) 1633 return(NULL); 1634 STAILQ_REMOVE_HEAD(&tp->t_logs, tlm_queue); 1635 tcp_log_entry_refcnt_rem(log_entry); 1636 } 1637 1638 KASSERT(log_entry != NULL, 1639 ("%s: log_entry unexpectedly NULL", __func__)); 1640 1641 /* Extract the log buffer and verbose buffer pointers. */ 1642 log_buf = &log_entry->tlm_buf; 1643 log_verbose = &log_entry->tlm_v; 1644 1645 /* Basic entries. */ 1646 if (itv == NULL) 1647 getmicrouptime(&log_buf->tlb_tv); 1648 else 1649 memcpy(&log_buf->tlb_tv, itv, sizeof(struct timeval)); 1650 log_buf->tlb_ticks = ticks; 1651 log_buf->tlb_sn = logsn; 1652 log_buf->tlb_stackid = tp->t_fb->tfb_id; 1653 log_buf->tlb_eventid = eventid; 1654 log_buf->tlb_eventflags = 0; 1655 log_buf->tlb_errno = errornum; 1656 1657 /* Socket buffers */ 1658 if (rxbuf != NULL) { 1659 log_buf->tlb_eventflags |= TLB_FLAG_RXBUF; 1660 log_buf->tlb_rxbuf.tls_sb_acc = rxbuf->sb_acc; 1661 log_buf->tlb_rxbuf.tls_sb_ccc = rxbuf->sb_ccc; 1662 log_buf->tlb_rxbuf.tls_sb_spare = 0; 1663 } 1664 if (txbuf != NULL) { 1665 log_buf->tlb_eventflags |= TLB_FLAG_TXBUF; 1666 log_buf->tlb_txbuf.tls_sb_acc = txbuf->sb_acc; 1667 log_buf->tlb_txbuf.tls_sb_ccc = txbuf->sb_ccc; 1668 log_buf->tlb_txbuf.tls_sb_spare = 0; 1669 } 1670 /* Copy values from tp to the log entry. */ 1671 #define COPY_STAT(f) log_buf->tlb_ ## f = tp->f 1672 #define COPY_STAT_T(f) log_buf->tlb_ ## f = tp->t_ ## f 1673 COPY_STAT_T(state); 1674 COPY_STAT_T(starttime); 1675 COPY_STAT(iss); 1676 COPY_STAT_T(flags); 1677 COPY_STAT(snd_una); 1678 COPY_STAT(snd_max); 1679 COPY_STAT(snd_cwnd); 1680 COPY_STAT(snd_nxt); 1681 COPY_STAT(snd_recover); 1682 COPY_STAT(snd_wnd); 1683 COPY_STAT(snd_ssthresh); 1684 COPY_STAT_T(srtt); 1685 COPY_STAT_T(rttvar); 1686 COPY_STAT(rcv_up); 1687 COPY_STAT(rcv_adv); 1688 COPY_STAT(rcv_nxt); 1689 COPY_STAT(sack_newdata); 1690 COPY_STAT(rcv_wnd); 1691 COPY_STAT_T(dupacks); 1692 COPY_STAT_T(segqlen); 1693 COPY_STAT(snd_numholes); 1694 COPY_STAT(snd_scale); 1695 COPY_STAT(rcv_scale); 1696 #undef COPY_STAT 1697 #undef COPY_STAT_T 1698 log_buf->tlb_flex1 = 0; 1699 log_buf->tlb_flex2 = 0; 1700 /* Copy stack-specific info. */ 1701 if (stackinfo != NULL) { 1702 memcpy(&log_buf->tlb_stackinfo, stackinfo, 1703 sizeof(log_buf->tlb_stackinfo)); 1704 log_buf->tlb_eventflags |= TLB_FLAG_STACKINFO; 1705 } 1706 1707 /* The packet */ 1708 log_buf->tlb_len = len; 1709 if (th) { 1710 int optlen; 1711 1712 log_buf->tlb_eventflags |= TLB_FLAG_HDR; 1713 log_buf->tlb_th = *th; 1714 if (th_hostorder) 1715 tcp_fields_to_net(&log_buf->tlb_th); 1716 optlen = (th->th_off << 2) - sizeof (struct tcphdr); 1717 if (optlen > 0) 1718 memcpy(log_buf->tlb_opts, th + 1, optlen); 1719 } 1720 1721 /* Verbose information */ 1722 if (func != NULL) { 1723 log_buf->tlb_eventflags |= TLB_FLAG_VERBOSE; 1724 if (output_caller != NULL) 1725 strlcpy(log_verbose->tlv_snd_frm, output_caller, 1726 TCP_FUNC_LEN); 1727 else 1728 *log_verbose->tlv_snd_frm = 0; 1729 strlcpy(log_verbose->tlv_trace_func, func, TCP_FUNC_LEN); 1730 log_verbose->tlv_trace_line = line; 1731 } 1732 1733 /* Insert the new log at the tail. */ 1734 STAILQ_INSERT_TAIL(&tp->t_logs, log_entry, tlm_queue); 1735 tcp_log_entry_refcnt_add(log_entry); 1736 return (log_buf); 1737 } 1738 1739 /* 1740 * Change the logging state for a TCPCB. Returns 0 on success or an 1741 * error code on failure. 1742 */ 1743 int 1744 tcp_log_state_change(struct tcpcb *tp, int state) 1745 { 1746 struct tcp_log_mem *log_entry; 1747 1748 INP_WLOCK_ASSERT(tp->t_inpcb); 1749 switch(state) { 1750 case TCP_LOG_STATE_CLEAR: 1751 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 1752 tcp_log_remove_log_head(tp, log_entry); 1753 /* Fall through */ 1754 1755 case TCP_LOG_STATE_OFF: 1756 tp->t_logstate = TCP_LOG_STATE_OFF; 1757 break; 1758 1759 case TCP_LOG_STATE_TAIL: 1760 case TCP_LOG_STATE_HEAD: 1761 case TCP_LOG_STATE_CONTINUAL: 1762 case TCP_LOG_STATE_HEAD_AUTO: 1763 case TCP_LOG_STATE_TAIL_AUTO: 1764 tp->t_logstate = state; 1765 break; 1766 1767 default: 1768 return (EINVAL); 1769 } 1770 if (tcp_disable_all_bb_logs) { 1771 /* We are prohibited from doing any logs */ 1772 tp->t_logstate = TCP_LOG_STATE_OFF; 1773 } 1774 tp->t_flags2 &= ~(TF2_LOG_AUTO); 1775 1776 return (0); 1777 } 1778 1779 /* If tcp_drain() is called, flush half the log entries. */ 1780 void 1781 tcp_log_drain(struct tcpcb *tp) 1782 { 1783 struct tcp_log_mem *log_entry, *next; 1784 int target, skip; 1785 1786 INP_WLOCK_ASSERT(tp->t_inpcb); 1787 if ((target = tp->t_lognum / 2) == 0) 1788 return; 1789 1790 /* 1791 * If we are logging the "head" packets, we want to discard 1792 * from the tail of the queue. Otherwise, we want to discard 1793 * from the head. 1794 */ 1795 if (tp->t_logstate == TCP_LOG_STATE_HEAD || 1796 tp->t_logstate == TCP_LOG_STATE_HEAD_AUTO) { 1797 skip = tp->t_lognum - target; 1798 STAILQ_FOREACH(log_entry, &tp->t_logs, tlm_queue) 1799 if (!--skip) 1800 break; 1801 KASSERT(log_entry != NULL, 1802 ("%s: skipped through all entries!", __func__)); 1803 if (log_entry == NULL) 1804 return; 1805 while ((next = STAILQ_NEXT(log_entry, tlm_queue)) != NULL) { 1806 STAILQ_REMOVE_AFTER(&tp->t_logs, log_entry, tlm_queue); 1807 tcp_log_entry_refcnt_rem(next); 1808 tcp_log_remove_log_cleanup(tp, next); 1809 #ifdef INVARIANTS 1810 target--; 1811 #endif 1812 } 1813 KASSERT(target == 0, 1814 ("%s: After removing from tail, target was %d", __func__, 1815 target)); 1816 } else if (tp->t_logstate == TCP_LOG_STATE_CONTINUAL) { 1817 (void)tcp_log_dump_tp_logbuf(tp, "auto-dumped from continual", 1818 M_NOWAIT, false); 1819 } else { 1820 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL && 1821 target--) 1822 tcp_log_remove_log_head(tp, log_entry); 1823 KASSERT(target <= 0, 1824 ("%s: After removing from head, target was %d", __func__, 1825 target)); 1826 KASSERT(tp->t_lognum > 0, 1827 ("%s: After removing from head, tp->t_lognum was %d", 1828 __func__, target)); 1829 KASSERT(log_entry != NULL, 1830 ("%s: After removing from head, the tailq was empty", 1831 __func__)); 1832 } 1833 } 1834 1835 static inline int 1836 tcp_log_copyout(struct sockopt *sopt, void *src, void *dst, size_t len) 1837 { 1838 1839 if (sopt->sopt_td != NULL) 1840 return (copyout(src, dst, len)); 1841 bcopy(src, dst, len); 1842 return (0); 1843 } 1844 1845 static int 1846 tcp_log_logs_to_buf(struct sockopt *sopt, struct tcp_log_stailq *log_tailqp, 1847 struct tcp_log_buffer **end, int count) 1848 { 1849 struct tcp_log_buffer *out_entry; 1850 struct tcp_log_mem *log_entry; 1851 size_t entrysize; 1852 int error; 1853 #ifdef INVARIANTS 1854 int orig_count = count; 1855 #endif 1856 1857 /* Copy the data out. */ 1858 error = 0; 1859 out_entry = (struct tcp_log_buffer *) sopt->sopt_val; 1860 STAILQ_FOREACH(log_entry, log_tailqp, tlm_queue) { 1861 count--; 1862 KASSERT(count >= 0, 1863 ("%s:%d: Exceeded expected count (%d) processing list %p", 1864 __func__, __LINE__, orig_count, log_tailqp)); 1865 1866 #ifdef TCPLOG_DEBUG_COUNTERS 1867 counter_u64_add(tcp_log_que_copyout, 1); 1868 #endif 1869 1870 /* 1871 * Skip copying out the header if it isn't present. 1872 * Instead, copy out zeros (to ensure we don't leak info). 1873 * TODO: Make sure we truly do zero everything we don't 1874 * explicitly set. 1875 */ 1876 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR) 1877 entrysize = sizeof(struct tcp_log_buffer); 1878 else 1879 entrysize = offsetof(struct tcp_log_buffer, tlb_th); 1880 error = tcp_log_copyout(sopt, &log_entry->tlm_buf, out_entry, 1881 entrysize); 1882 if (error) 1883 break; 1884 if (!(log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_HDR)) { 1885 error = tcp_log_copyout(sopt, zerobuf, 1886 ((uint8_t *)out_entry) + entrysize, 1887 sizeof(struct tcp_log_buffer) - entrysize); 1888 } 1889 1890 /* 1891 * Copy out the verbose bit, if needed. Either way, 1892 * increment the output pointer the correct amount. 1893 */ 1894 if (log_entry->tlm_buf.tlb_eventflags & TLB_FLAG_VERBOSE) { 1895 error = tcp_log_copyout(sopt, &log_entry->tlm_v, 1896 out_entry->tlb_verbose, 1897 sizeof(struct tcp_log_verbose)); 1898 if (error) 1899 break; 1900 out_entry = (struct tcp_log_buffer *) 1901 (((uint8_t *) (out_entry + 1)) + 1902 sizeof(struct tcp_log_verbose)); 1903 } else 1904 out_entry++; 1905 } 1906 *end = out_entry; 1907 KASSERT(error || count == 0, 1908 ("%s:%d: Less than expected count (%d) processing list %p" 1909 " (%d remain)", __func__, __LINE__, orig_count, 1910 log_tailqp, count)); 1911 1912 return (error); 1913 } 1914 1915 /* 1916 * Copy out the buffer. Note that we do incremental copying, so 1917 * sooptcopyout() won't work. However, the goal is to produce the same 1918 * end result as if we copied in the entire user buffer, updated it, 1919 * and then used sooptcopyout() to copy it out. 1920 * 1921 * NOTE: This should be called with a write lock on the PCB; however, 1922 * the function will drop it after it extracts the data from the TCPCB. 1923 */ 1924 int 1925 tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp) 1926 { 1927 struct tcp_log_stailq log_tailq; 1928 struct tcp_log_mem *log_entry, *log_next; 1929 struct tcp_log_buffer *out_entry; 1930 struct inpcb *inp; 1931 size_t outsize, entrysize; 1932 int error, outnum; 1933 1934 INP_WLOCK_ASSERT(tp->t_inpcb); 1935 inp = tp->t_inpcb; 1936 1937 /* 1938 * Determine which log entries will fit in the buffer. As an 1939 * optimization, skip this if all the entries will clearly fit 1940 * in the buffer. (However, get an exact size if we are using 1941 * INVARIANTS.) 1942 */ 1943 #ifndef INVARIANTS 1944 if (sopt->sopt_valsize / (sizeof(struct tcp_log_buffer) + 1945 sizeof(struct tcp_log_verbose)) >= tp->t_lognum) { 1946 log_entry = STAILQ_LAST(&tp->t_logs, tcp_log_mem, tlm_queue); 1947 log_next = NULL; 1948 outsize = 0; 1949 outnum = tp->t_lognum; 1950 } else { 1951 #endif 1952 outsize = outnum = 0; 1953 log_entry = NULL; 1954 STAILQ_FOREACH(log_next, &tp->t_logs, tlm_queue) { 1955 entrysize = sizeof(struct tcp_log_buffer); 1956 if (log_next->tlm_buf.tlb_eventflags & 1957 TLB_FLAG_VERBOSE) 1958 entrysize += sizeof(struct tcp_log_verbose); 1959 if ((sopt->sopt_valsize - outsize) < entrysize) 1960 break; 1961 outsize += entrysize; 1962 outnum++; 1963 log_entry = log_next; 1964 } 1965 KASSERT(outsize <= sopt->sopt_valsize, 1966 ("%s: calculated output size (%zu) greater than available" 1967 "space (%zu)", __func__, outsize, sopt->sopt_valsize)); 1968 #ifndef INVARIANTS 1969 } 1970 #endif 1971 1972 /* 1973 * Copy traditional sooptcopyout() behavior: if sopt->sopt_val 1974 * is NULL, silently skip the copy. However, in this case, we 1975 * will leave the list alone and return. Functionally, this 1976 * gives userspace a way to poll for an approximate buffer 1977 * size they will need to get the log entries. 1978 */ 1979 if (sopt->sopt_val == NULL) { 1980 INP_WUNLOCK(inp); 1981 if (outsize == 0) { 1982 outsize = outnum * (sizeof(struct tcp_log_buffer) + 1983 sizeof(struct tcp_log_verbose)); 1984 } 1985 if (sopt->sopt_valsize > outsize) 1986 sopt->sopt_valsize = outsize; 1987 return (0); 1988 } 1989 1990 /* 1991 * Break apart the list. We'll save the ones we want to copy 1992 * out locally and remove them from the TCPCB list. We can 1993 * then drop the INPCB lock while we do the copyout. 1994 * 1995 * There are roughly three cases: 1996 * 1. There was nothing to copy out. That's easy: drop the 1997 * lock and return. 1998 * 2. We are copying out the entire list. Again, that's easy: 1999 * move the whole list. 2000 * 3. We are copying out a partial list. That's harder. We 2001 * need to update the list book-keeping entries. 2002 */ 2003 if (log_entry != NULL && log_next == NULL) { 2004 /* Move entire list. */ 2005 KASSERT(outnum == tp->t_lognum, 2006 ("%s:%d: outnum (%d) should match tp->t_lognum (%d)", 2007 __func__, __LINE__, outnum, tp->t_lognum)); 2008 log_tailq = tp->t_logs; 2009 tp->t_lognum = 0; 2010 STAILQ_INIT(&tp->t_logs); 2011 } else if (log_entry != NULL) { 2012 /* Move partial list. */ 2013 KASSERT(outnum < tp->t_lognum, 2014 ("%s:%d: outnum (%d) not less than tp->t_lognum (%d)", 2015 __func__, __LINE__, outnum, tp->t_lognum)); 2016 STAILQ_FIRST(&log_tailq) = STAILQ_FIRST(&tp->t_logs); 2017 STAILQ_FIRST(&tp->t_logs) = STAILQ_NEXT(log_entry, tlm_queue); 2018 KASSERT(STAILQ_NEXT(log_entry, tlm_queue) != NULL, 2019 ("%s:%d: tp->t_logs is unexpectedly shorter than expected" 2020 "(tp: %p, log_tailq: %p, outnum: %d, tp->t_lognum: %d)", 2021 __func__, __LINE__, tp, &log_tailq, outnum, tp->t_lognum)); 2022 STAILQ_NEXT(log_entry, tlm_queue) = NULL; 2023 log_tailq.stqh_last = &STAILQ_NEXT(log_entry, tlm_queue); 2024 tp->t_lognum -= outnum; 2025 } else 2026 STAILQ_INIT(&log_tailq); 2027 2028 /* Drop the PCB lock. */ 2029 INP_WUNLOCK(inp); 2030 2031 /* Copy the data out. */ 2032 error = tcp_log_logs_to_buf(sopt, &log_tailq, &out_entry, outnum); 2033 2034 if (error) { 2035 /* Restore list */ 2036 INP_WLOCK(inp); 2037 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0) { 2038 tp = intotcpcb(inp); 2039 2040 /* Merge the two lists. */ 2041 STAILQ_CONCAT(&log_tailq, &tp->t_logs); 2042 tp->t_logs = log_tailq; 2043 tp->t_lognum += outnum; 2044 } 2045 INP_WUNLOCK(inp); 2046 } else { 2047 /* Sanity check entries */ 2048 KASSERT(((caddr_t)out_entry - (caddr_t)sopt->sopt_val) == 2049 outsize, ("%s: Actual output size (%zu) != " 2050 "calculated output size (%zu)", __func__, 2051 (size_t)((caddr_t)out_entry - (caddr_t)sopt->sopt_val), 2052 outsize)); 2053 2054 /* Free the entries we just copied out. */ 2055 STAILQ_FOREACH_SAFE(log_entry, &log_tailq, tlm_queue, log_next) { 2056 tcp_log_entry_refcnt_rem(log_entry); 2057 uma_zfree(tcp_log_zone, log_entry); 2058 } 2059 } 2060 2061 sopt->sopt_valsize = (size_t)((caddr_t)out_entry - 2062 (caddr_t)sopt->sopt_val); 2063 return (error); 2064 } 2065 2066 static void 2067 tcp_log_free_queue(struct tcp_log_dev_queue *param) 2068 { 2069 struct tcp_log_dev_log_queue *entry; 2070 2071 KASSERT(param != NULL, ("%s: called with NULL param", __func__)); 2072 if (param == NULL) 2073 return; 2074 2075 entry = (struct tcp_log_dev_log_queue *)param; 2076 2077 /* Free the entries. */ 2078 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 2079 2080 /* Free the buffer, if it is allocated. */ 2081 if (entry->tldl_common.tldq_buf != NULL) 2082 free(entry->tldl_common.tldq_buf, M_TCPLOGDEV); 2083 2084 /* Free the queue entry. */ 2085 free(entry, M_TCPLOGDEV); 2086 } 2087 2088 static struct tcp_log_common_header * 2089 tcp_log_expandlogbuf(struct tcp_log_dev_queue *param) 2090 { 2091 struct tcp_log_dev_log_queue *entry; 2092 struct tcp_log_header *hdr; 2093 uint8_t *end; 2094 struct sockopt sopt; 2095 int error; 2096 2097 entry = (struct tcp_log_dev_log_queue *)param; 2098 2099 /* Take a worst-case guess at space needs. */ 2100 sopt.sopt_valsize = sizeof(struct tcp_log_header) + 2101 entry->tldl_count * (sizeof(struct tcp_log_buffer) + 2102 sizeof(struct tcp_log_verbose)); 2103 hdr = malloc(sopt.sopt_valsize, M_TCPLOGDEV, M_NOWAIT); 2104 if (hdr == NULL) { 2105 #ifdef TCPLOG_DEBUG_COUNTERS 2106 counter_u64_add(tcp_log_que_fail5, entry->tldl_count); 2107 #endif 2108 return (NULL); 2109 } 2110 sopt.sopt_val = hdr + 1; 2111 sopt.sopt_valsize -= sizeof(struct tcp_log_header); 2112 sopt.sopt_td = NULL; 2113 2114 error = tcp_log_logs_to_buf(&sopt, &entry->tldl_entries, 2115 (struct tcp_log_buffer **)&end, entry->tldl_count); 2116 if (error) { 2117 free(hdr, M_TCPLOGDEV); 2118 return (NULL); 2119 } 2120 2121 /* Free the entries. */ 2122 tcp_log_free_entries(&entry->tldl_entries, &entry->tldl_count); 2123 entry->tldl_count = 0; 2124 2125 memset(hdr, 0, sizeof(struct tcp_log_header)); 2126 hdr->tlh_version = TCP_LOG_BUF_VER; 2127 hdr->tlh_type = TCP_LOG_DEV_TYPE_BBR; 2128 hdr->tlh_length = end - (uint8_t *)hdr; 2129 hdr->tlh_ie = entry->tldl_ie; 2130 hdr->tlh_af = entry->tldl_af; 2131 getboottime(&hdr->tlh_offset); 2132 strlcpy(hdr->tlh_id, entry->tldl_id, TCP_LOG_ID_LEN); 2133 strlcpy(hdr->tlh_tag, entry->tldl_tag, TCP_LOG_TAG_LEN); 2134 strlcpy(hdr->tlh_reason, entry->tldl_reason, TCP_LOG_REASON_LEN); 2135 return ((struct tcp_log_common_header *)hdr); 2136 } 2137 2138 /* 2139 * Queue the tcpcb's log buffer for transmission via the log buffer facility. 2140 * 2141 * NOTE: This should be called with a write lock on the PCB. 2142 * 2143 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 2144 * and reacquire the INP lock if it needs to do so. 2145 * 2146 * If force is false, this will only dump auto-logged sessions if 2147 * tcp_log_auto_all is true or if there is a log ID defined for the session. 2148 */ 2149 int 2150 tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force) 2151 { 2152 struct tcp_log_dev_log_queue *entry; 2153 struct inpcb *inp; 2154 #ifdef TCPLOG_DEBUG_COUNTERS 2155 int num_entries; 2156 #endif 2157 2158 inp = tp->t_inpcb; 2159 INP_WLOCK_ASSERT(inp); 2160 2161 /* If there are no log entries, there is nothing to do. */ 2162 if (tp->t_lognum == 0) 2163 return (0); 2164 2165 /* Check for a log ID. */ 2166 if (tp->t_lib == NULL && (tp->t_flags2 & TF2_LOG_AUTO) && 2167 !tcp_log_auto_all && !force) { 2168 struct tcp_log_mem *log_entry; 2169 2170 /* 2171 * We needed a log ID and none was found. Free the log entries 2172 * and return success. Also, cancel further logging. If the 2173 * session doesn't have a log ID by now, we'll assume it isn't 2174 * going to get one. 2175 */ 2176 while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL) 2177 tcp_log_remove_log_head(tp, log_entry); 2178 KASSERT(tp->t_lognum == 0, 2179 ("%s: After freeing entries, tp->t_lognum=%d (expected 0)", 2180 __func__, tp->t_lognum)); 2181 tp->t_logstate = TCP_LOG_STATE_OFF; 2182 return (0); 2183 } 2184 2185 /* 2186 * Allocate memory. If we must wait, we'll need to drop the locks 2187 * and reacquire them (and do all the related business that goes 2188 * along with that). 2189 */ 2190 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 2191 M_NOWAIT); 2192 if (entry == NULL && (how & M_NOWAIT)) { 2193 #ifdef TCPLOG_DEBUG_COUNTERS 2194 counter_u64_add(tcp_log_que_fail3, 1); 2195 #endif 2196 return (ENOBUFS); 2197 } 2198 if (entry == NULL) { 2199 INP_WUNLOCK(inp); 2200 entry = malloc(sizeof(struct tcp_log_dev_log_queue), 2201 M_TCPLOGDEV, M_WAITOK); 2202 INP_WLOCK(inp); 2203 /* 2204 * Note that this check is slightly overly-restrictive in 2205 * that the TCB can survive either of these events. 2206 * However, there is currently not a good way to ensure 2207 * that is the case. So, if we hit this M_WAIT path, we 2208 * may end up dropping some entries. That seems like a 2209 * small price to pay for safety. 2210 */ 2211 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2212 free(entry, M_TCPLOGDEV); 2213 #ifdef TCPLOG_DEBUG_COUNTERS 2214 counter_u64_add(tcp_log_que_fail2, 1); 2215 #endif 2216 return (ECONNRESET); 2217 } 2218 tp = intotcpcb(inp); 2219 if (tp->t_lognum == 0) { 2220 free(entry, M_TCPLOGDEV); 2221 return (0); 2222 } 2223 } 2224 2225 /* Fill in the unique parts of the queue entry. */ 2226 if (tp->t_lib != NULL) { 2227 strlcpy(entry->tldl_id, tp->t_lib->tlb_id, TCP_LOG_ID_LEN); 2228 strlcpy(entry->tldl_tag, tp->t_lib->tlb_tag, TCP_LOG_TAG_LEN); 2229 } else { 2230 strlcpy(entry->tldl_id, "UNKNOWN", TCP_LOG_ID_LEN); 2231 strlcpy(entry->tldl_tag, "UNKNOWN", TCP_LOG_TAG_LEN); 2232 } 2233 if (reason != NULL) 2234 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 2235 else 2236 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 2237 entry->tldl_ie = inp->inp_inc.inc_ie; 2238 if (inp->inp_inc.inc_flags & INC_ISIPV6) 2239 entry->tldl_af = AF_INET6; 2240 else 2241 entry->tldl_af = AF_INET; 2242 entry->tldl_entries = tp->t_logs; 2243 entry->tldl_count = tp->t_lognum; 2244 2245 /* Fill in the common parts of the queue entry. */ 2246 entry->tldl_common.tldq_buf = NULL; 2247 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 2248 entry->tldl_common.tldq_dtor = tcp_log_free_queue; 2249 2250 /* Clear the log data from the TCPCB. */ 2251 #ifdef TCPLOG_DEBUG_COUNTERS 2252 num_entries = tp->t_lognum; 2253 #endif 2254 tp->t_lognum = 0; 2255 STAILQ_INIT(&tp->t_logs); 2256 2257 /* Add the entry. If no one is listening, free the entry. */ 2258 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) { 2259 tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 2260 #ifdef TCPLOG_DEBUG_COUNTERS 2261 counter_u64_add(tcp_log_que_fail1, num_entries); 2262 } else { 2263 counter_u64_add(tcp_log_queued, num_entries); 2264 #endif 2265 } 2266 return (0); 2267 } 2268 2269 /* 2270 * Queue the log_id_node's log buffers for transmission via the log buffer 2271 * facility. 2272 * 2273 * NOTE: This should be called with the bucket locked and referenced. 2274 * 2275 * how should be M_WAITOK or M_NOWAIT. If M_WAITOK, the function will drop 2276 * and reacquire the bucket lock if it needs to do so. (The caller must 2277 * ensure that the tln is no longer on any lists so no one else will mess 2278 * with this while the lock is dropped!) 2279 */ 2280 static int 2281 tcp_log_dump_node_logbuf(struct tcp_log_id_node *tln, char *reason, int how) 2282 { 2283 struct tcp_log_dev_log_queue *entry; 2284 struct tcp_log_id_bucket *tlb; 2285 2286 tlb = tln->tln_bucket; 2287 TCPID_BUCKET_LOCK_ASSERT(tlb); 2288 KASSERT(tlb->tlb_refcnt > 0, 2289 ("%s:%d: Called with unreferenced bucket (tln=%p, tlb=%p)", 2290 __func__, __LINE__, tln, tlb)); 2291 KASSERT(tln->tln_closed, 2292 ("%s:%d: Called for node with tln_closed==false (tln=%p)", 2293 __func__, __LINE__, tln)); 2294 2295 /* If there are no log entries, there is nothing to do. */ 2296 if (tln->tln_count == 0) 2297 return (0); 2298 2299 /* 2300 * Allocate memory. If we must wait, we'll need to drop the locks 2301 * and reacquire them (and do all the related business that goes 2302 * along with that). 2303 */ 2304 entry = malloc(sizeof(struct tcp_log_dev_log_queue), M_TCPLOGDEV, 2305 M_NOWAIT); 2306 if (entry == NULL && (how & M_NOWAIT)) 2307 return (ENOBUFS); 2308 if (entry == NULL) { 2309 TCPID_BUCKET_UNLOCK(tlb); 2310 entry = malloc(sizeof(struct tcp_log_dev_log_queue), 2311 M_TCPLOGDEV, M_WAITOK); 2312 TCPID_BUCKET_LOCK(tlb); 2313 } 2314 2315 /* Fill in the common parts of the queue entry.. */ 2316 entry->tldl_common.tldq_buf = NULL; 2317 entry->tldl_common.tldq_xform = tcp_log_expandlogbuf; 2318 entry->tldl_common.tldq_dtor = tcp_log_free_queue; 2319 2320 /* Fill in the unique parts of the queue entry. */ 2321 strlcpy(entry->tldl_id, tlb->tlb_id, TCP_LOG_ID_LEN); 2322 strlcpy(entry->tldl_tag, tlb->tlb_tag, TCP_LOG_TAG_LEN); 2323 if (reason != NULL) 2324 strlcpy(entry->tldl_reason, reason, TCP_LOG_REASON_LEN); 2325 else 2326 strlcpy(entry->tldl_reason, "UNKNOWN", TCP_LOG_ID_LEN); 2327 entry->tldl_ie = tln->tln_ie; 2328 entry->tldl_entries = tln->tln_entries; 2329 entry->tldl_count = tln->tln_count; 2330 entry->tldl_af = tln->tln_af; 2331 2332 /* Add the entry. If no one is listening, free the entry. */ 2333 if (tcp_log_dev_add_log((struct tcp_log_dev_queue *)entry)) 2334 tcp_log_free_queue((struct tcp_log_dev_queue *)entry); 2335 2336 return (0); 2337 } 2338 2339 2340 /* 2341 * Queue the log buffers for all sessions in a bucket for transmissions via 2342 * the log buffer facility. 2343 * 2344 * NOTE: This should be called with a locked bucket; however, the function 2345 * will drop the lock. 2346 */ 2347 #define LOCAL_SAVE 10 2348 static void 2349 tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason) 2350 { 2351 struct tcp_log_id_node local_entries[LOCAL_SAVE]; 2352 struct inpcb *inp; 2353 struct tcpcb *tp; 2354 struct tcp_log_id_node *cur_tln, *prev_tln, *tmp_tln; 2355 int i, num_local_entries, tree_locked; 2356 bool expireq_locked; 2357 2358 TCPID_BUCKET_LOCK_ASSERT(tlb); 2359 2360 /* 2361 * Take a reference on the bucket to keep it from disappearing until 2362 * we are done. 2363 */ 2364 TCPID_BUCKET_REF(tlb); 2365 2366 /* 2367 * We'll try to create these without dropping locks. However, we 2368 * might very well need to drop locks to get memory. If that's the 2369 * case, we'll save up to 10 on the stack, and sacrifice the rest. 2370 * (Otherwise, we need to worry about finding our place again in a 2371 * potentially changed list. It just doesn't seem worth the trouble 2372 * to do that. 2373 */ 2374 expireq_locked = false; 2375 num_local_entries = 0; 2376 prev_tln = NULL; 2377 tree_locked = TREE_UNLOCKED; 2378 SLIST_FOREACH_SAFE(cur_tln, &tlb->tlb_head, tln_list, tmp_tln) { 2379 /* 2380 * If this isn't associated with a TCPCB, we can pull it off 2381 * the list now. We need to be careful that the expire timer 2382 * hasn't already taken ownership (tln_expiretime == SBT_MAX). 2383 * If so, we let the expire timer code free the data. 2384 */ 2385 if (cur_tln->tln_closed) { 2386 no_inp: 2387 /* 2388 * Get the expireq lock so we can get a consistent 2389 * read of tln_expiretime and so we can remove this 2390 * from the expireq. 2391 */ 2392 if (!expireq_locked) { 2393 TCPLOG_EXPIREQ_LOCK(); 2394 expireq_locked = true; 2395 } 2396 2397 /* 2398 * We ignore entries with tln_expiretime == SBT_MAX. 2399 * The expire timer code already owns those. 2400 */ 2401 KASSERT(cur_tln->tln_expiretime > (sbintime_t) 0, 2402 ("%s:%d: node on the expire queue without positive " 2403 "expire time", __func__, __LINE__)); 2404 if (cur_tln->tln_expiretime == SBT_MAX) { 2405 prev_tln = cur_tln; 2406 continue; 2407 } 2408 2409 /* Remove the entry from the expireq. */ 2410 STAILQ_REMOVE(&tcp_log_expireq_head, cur_tln, 2411 tcp_log_id_node, tln_expireq); 2412 2413 /* Remove the entry from the bucket. */ 2414 if (prev_tln != NULL) 2415 SLIST_REMOVE_AFTER(prev_tln, tln_list); 2416 else 2417 SLIST_REMOVE_HEAD(&tlb->tlb_head, tln_list); 2418 2419 /* 2420 * Drop the INP and bucket reference counts. Due to 2421 * lock-ordering rules, we need to drop the expire 2422 * queue lock. 2423 */ 2424 TCPLOG_EXPIREQ_UNLOCK(); 2425 expireq_locked = false; 2426 2427 /* Drop the INP reference. */ 2428 INP_WLOCK(cur_tln->tln_inp); 2429 if (!in_pcbrele_wlocked(cur_tln->tln_inp)) 2430 INP_WUNLOCK(cur_tln->tln_inp); 2431 2432 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 2433 #ifdef INVARIANTS 2434 panic("%s: Bucket refcount unexpectedly 0.", 2435 __func__); 2436 #endif 2437 /* 2438 * Recover as best we can: free the entry we 2439 * own. 2440 */ 2441 tcp_log_free_entries(&cur_tln->tln_entries, 2442 &cur_tln->tln_count); 2443 uma_zfree(tcp_log_node_zone, cur_tln); 2444 goto done; 2445 } 2446 2447 if (tcp_log_dump_node_logbuf(cur_tln, reason, 2448 M_NOWAIT)) { 2449 /* 2450 * If we have sapce, save the entries locally. 2451 * Otherwise, free them. 2452 */ 2453 if (num_local_entries < LOCAL_SAVE) { 2454 local_entries[num_local_entries] = 2455 *cur_tln; 2456 num_local_entries++; 2457 } else { 2458 tcp_log_free_entries( 2459 &cur_tln->tln_entries, 2460 &cur_tln->tln_count); 2461 } 2462 } 2463 2464 /* No matter what, we are done with the node now. */ 2465 uma_zfree(tcp_log_node_zone, cur_tln); 2466 2467 /* 2468 * Because we removed this entry from the list, prev_tln 2469 * (which tracks the previous entry still on the tlb 2470 * list) remains unchanged. 2471 */ 2472 continue; 2473 } 2474 2475 /* 2476 * If we get to this point, the session data is still held in 2477 * the TCPCB. So, we need to pull the data out of that. 2478 * 2479 * We will need to drop the expireq lock so we can lock the INP. 2480 * We can then try to extract the data the "easy" way. If that 2481 * fails, we'll save the log entries for later. 2482 */ 2483 if (expireq_locked) { 2484 TCPLOG_EXPIREQ_UNLOCK(); 2485 expireq_locked = false; 2486 } 2487 2488 /* Lock the INP and then re-check the state. */ 2489 inp = cur_tln->tln_inp; 2490 INP_WLOCK(inp); 2491 /* 2492 * If we caught this while it was transitioning, the data 2493 * might have moved from the TCPCB to the tln (signified by 2494 * setting tln_closed to true. If so, treat this like an 2495 * inactive connection. 2496 */ 2497 if (cur_tln->tln_closed) { 2498 /* 2499 * It looks like we may have caught this connection 2500 * while it was transitioning from active to inactive. 2501 * Treat this like an inactive connection. 2502 */ 2503 INP_WUNLOCK(inp); 2504 goto no_inp; 2505 } 2506 2507 /* 2508 * Try to dump the data from the tp without dropping the lock. 2509 * If this fails, try to save off the data locally. 2510 */ 2511 tp = cur_tln->tln_tp; 2512 if (tcp_log_dump_tp_logbuf(tp, reason, M_NOWAIT, true) && 2513 num_local_entries < LOCAL_SAVE) { 2514 tcp_log_move_tp_to_node(tp, 2515 &local_entries[num_local_entries]); 2516 local_entries[num_local_entries].tln_closed = 1; 2517 KASSERT(local_entries[num_local_entries].tln_bucket == 2518 tlb, ("%s: %d: bucket mismatch for node %p", 2519 __func__, __LINE__, cur_tln)); 2520 num_local_entries++; 2521 } 2522 2523 INP_WUNLOCK(inp); 2524 2525 /* 2526 * We are goint to leave the current tln on the list. It will 2527 * become the previous tln. 2528 */ 2529 prev_tln = cur_tln; 2530 } 2531 2532 /* Drop our locks, if any. */ 2533 KASSERT(tree_locked == TREE_UNLOCKED, 2534 ("%s: %d: tree unexpectedly locked", __func__, __LINE__)); 2535 switch (tree_locked) { 2536 case TREE_WLOCKED: 2537 TCPID_TREE_WUNLOCK(); 2538 tree_locked = TREE_UNLOCKED; 2539 break; 2540 case TREE_RLOCKED: 2541 TCPID_TREE_RUNLOCK(); 2542 tree_locked = TREE_UNLOCKED; 2543 break; 2544 } 2545 if (expireq_locked) { 2546 TCPLOG_EXPIREQ_UNLOCK(); 2547 expireq_locked = false; 2548 } 2549 2550 /* 2551 * Try again for any saved entries. tcp_log_dump_node_logbuf() is 2552 * guaranteed to free the log entries within the node. And, since 2553 * the node itself is on our stack, we don't need to free it. 2554 */ 2555 for (i = 0; i < num_local_entries; i++) 2556 tcp_log_dump_node_logbuf(&local_entries[i], reason, M_WAITOK); 2557 2558 /* Drop our reference. */ 2559 if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL)) 2560 TCPID_BUCKET_UNLOCK(tlb); 2561 2562 done: 2563 /* Drop our locks, if any. */ 2564 switch (tree_locked) { 2565 case TREE_WLOCKED: 2566 TCPID_TREE_WUNLOCK(); 2567 break; 2568 case TREE_RLOCKED: 2569 TCPID_TREE_RUNLOCK(); 2570 break; 2571 } 2572 if (expireq_locked) 2573 TCPLOG_EXPIREQ_UNLOCK(); 2574 } 2575 #undef LOCAL_SAVE 2576 2577 2578 /* 2579 * Queue the log buffers for all sessions in a bucket for transmissions via 2580 * the log buffer facility. 2581 * 2582 * NOTE: This should be called with a locked INP; however, the function 2583 * will drop the lock. 2584 */ 2585 void 2586 tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason) 2587 { 2588 struct tcp_log_id_bucket *tlb; 2589 int tree_locked; 2590 2591 /* Figure out our bucket and lock it. */ 2592 INP_WLOCK_ASSERT(tp->t_inpcb); 2593 tlb = tp->t_lib; 2594 if (tlb == NULL) { 2595 /* 2596 * No bucket; treat this like a request to dump a single 2597 * session's traces. 2598 */ 2599 (void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true); 2600 INP_WUNLOCK(tp->t_inpcb); 2601 return; 2602 } 2603 TCPID_BUCKET_REF(tlb); 2604 INP_WUNLOCK(tp->t_inpcb); 2605 TCPID_BUCKET_LOCK(tlb); 2606 2607 /* If we are the last reference, we have nothing more to do here. */ 2608 tree_locked = TREE_UNLOCKED; 2609 if (tcp_log_unref_bucket(tlb, &tree_locked, NULL)) { 2610 switch (tree_locked) { 2611 case TREE_WLOCKED: 2612 TCPID_TREE_WUNLOCK(); 2613 break; 2614 case TREE_RLOCKED: 2615 TCPID_TREE_RUNLOCK(); 2616 break; 2617 } 2618 return; 2619 } 2620 2621 /* Turn this over to tcp_log_dumpbucketlogs() to finish the work. */ 2622 tcp_log_dumpbucketlogs(tlb, reason); 2623 } 2624 2625 /* 2626 * Mark the end of a flow with the current stack. A stack can add 2627 * stack-specific info to this trace event by overriding this 2628 * function (see bbr_log_flowend() for example). 2629 */ 2630 void 2631 tcp_log_flowend(struct tcpcb *tp) 2632 { 2633 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2634 struct socket *so = tp->t_inpcb->inp_socket; 2635 TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd, 2636 TCP_LOG_FLOWEND, 0, 0, NULL, false); 2637 } 2638 } 2639 2640