1 /*- 2 * Copyright (c) 2016-2018 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_inet.h" 30 #include "opt_inet6.h" 31 #include "opt_tcpdebug.h" 32 /** 33 * Some notes about usage. 34 * 35 * The tcp_hpts system is designed to provide a high precision timer 36 * system for tcp. Its main purpose is to provide a mechanism for 37 * pacing packets out onto the wire. It can be used in two ways 38 * by a given TCP stack (and those two methods can be used simultaneously). 39 * 40 * First, and probably the main thing its used by Rack and BBR for, it can 41 * be used to call tcp_output() of a transport stack at some time in the future. 42 * The normal way this is done is that tcp_output() of the stack schedules 43 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The 44 * slot is the time from now that the stack wants to be called but it 45 * must be converted to tcp_hpts's notion of slot. This is done with 46 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical 47 * call from the tcp_output() routine might look like: 48 * 49 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550)); 50 * 51 * The above would schedule tcp_ouput() to be called in 550 useconds. 52 * Note that if using this mechanism the stack will want to add near 53 * its top a check to prevent unwanted calls (from user land or the 54 * arrival of incoming ack's). So it would add something like: 55 * 56 * if (inp->inp_in_hpts) 57 * return; 58 * 59 * to prevent output processing until the time alotted has gone by. 60 * Of course this is a bare bones example and the stack will probably 61 * have more consideration then just the above. 62 * 63 * Now the tcp_hpts system will call tcp_output in one of two forms, 64 * it will first check to see if the stack as defined a 65 * tfb_tcp_output_wtime() function, if so that is the routine it 66 * will call, if that function is not defined then it will call the 67 * tfb_tcp_output() function. The only difference between these 68 * two calls is that the former passes the time in to the function 69 * so the function does not have to access the time (which tcp_hpts 70 * already has). What these functions do is of course totally up 71 * to the individual tcp stack. 72 * 73 * Now the second function (actually two functions I guess :D) 74 * the tcp_hpts system provides is the ability to either abort 75 * a connection (later) or process input on a connection. 76 * Why would you want to do this? To keep processor locality. 77 * 78 * So in order to use the input redirection function the 79 * stack changes its tcp_do_segment() routine to instead 80 * of process the data call the function: 81 * 82 * tcp_queue_pkt_to_input() 83 * 84 * You will note that the arguments to this function look 85 * a lot like tcp_do_segments's arguments. This function 86 * will assure that the tcp_hpts system will 87 * call the functions tfb_tcp_hpts_do_segment() from the 88 * correct CPU. Note that multiple calls can get pushed 89 * into the tcp_hpts system this will be indicated by 90 * the next to last argument to tfb_tcp_hpts_do_segment() 91 * (nxt_pkt). If nxt_pkt is a 1 then another packet is 92 * coming. If nxt_pkt is a 0 then this is the last call 93 * that the tcp_hpts system has available for the tcp stack. 94 * 95 * The other point of the input system is to be able to safely 96 * drop a tcp connection without worrying about the recursive 97 * locking that may be occuring on the INP_WLOCK. So if 98 * a stack wants to drop a connection it calls: 99 * 100 * tcp_set_inp_to_drop(tp, ETIMEDOUT) 101 * 102 * To schedule the tcp_hpts system to call 103 * 104 * tcp_drop(tp, drop_reason) 105 * 106 * at a future point. This is quite handy to prevent locking 107 * issues when dropping connections. 108 * 109 */ 110 111 #include <sys/param.h> 112 #include <sys/bus.h> 113 #include <sys/interrupt.h> 114 #include <sys/module.h> 115 #include <sys/kernel.h> 116 #include <sys/hhook.h> 117 #include <sys/malloc.h> 118 #include <sys/mbuf.h> 119 #include <sys/proc.h> /* for proc0 declaration */ 120 #include <sys/socket.h> 121 #include <sys/socketvar.h> 122 #include <sys/sysctl.h> 123 #include <sys/systm.h> 124 #include <sys/refcount.h> 125 #include <sys/sched.h> 126 #include <sys/queue.h> 127 #include <sys/smp.h> 128 #include <sys/counter.h> 129 #include <sys/time.h> 130 #include <sys/kthread.h> 131 #include <sys/kern_prefetch.h> 132 133 #include <vm/uma.h> 134 #include <vm/vm.h> 135 136 #include <net/route.h> 137 #include <net/vnet.h> 138 139 #define TCPSTATES /* for logging */ 140 141 #include <netinet/in.h> 142 #include <netinet/in_kdtrace.h> 143 #include <netinet/in_pcb.h> 144 #include <netinet/ip.h> 145 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 146 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 147 #include <netinet/ip_var.h> 148 #include <netinet/ip6.h> 149 #include <netinet6/in6_pcb.h> 150 #include <netinet6/ip6_var.h> 151 #include <netinet/tcp.h> 152 #include <netinet/tcp_fsm.h> 153 #include <netinet/tcp_seq.h> 154 #include <netinet/tcp_timer.h> 155 #include <netinet/tcp_var.h> 156 #include <netinet/tcpip.h> 157 #include <netinet/cc/cc.h> 158 #include <netinet/tcp_hpts.h> 159 160 #ifdef tcpdebug 161 #include <netinet/tcp_debug.h> 162 #endif /* tcpdebug */ 163 #ifdef tcp_offload 164 #include <netinet/tcp_offload.h> 165 #endif 166 167 #include "opt_rss.h" 168 169 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts"); 170 #ifdef RSS 171 #include <net/netisr.h> 172 #include <net/rss_config.h> 173 static int tcp_bind_threads = 1; 174 #else 175 static int tcp_bind_threads = 2; 176 #endif 177 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads); 178 179 static uint32_t tcp_hpts_logging_size = DEFAULT_HPTS_LOG; 180 181 TUNABLE_INT("net.inet.tcp.hpts_logging_sz", &tcp_hpts_logging_size); 182 183 static struct tcp_hptsi tcp_pace; 184 185 static void tcp_wakehpts(struct tcp_hpts_entry *p); 186 static void tcp_wakeinput(struct tcp_hpts_entry *p); 187 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv); 188 static void tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick); 189 static void tcp_hpts_thread(void *ctx); 190 static void tcp_init_hptsi(void *st); 191 192 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP; 193 static int32_t tcp_hpts_callout_skip_swi = 0; 194 195 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW, 0, "TCP Hpts controls"); 196 197 #define timersub(tvp, uvp, vvp) \ 198 do { \ 199 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 200 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 201 if ((vvp)->tv_usec < 0) { \ 202 (vvp)->tv_sec--; \ 203 (vvp)->tv_usec += 1000000; \ 204 } \ 205 } while (0) 206 207 static int32_t logging_on = 0; 208 static int32_t hpts_sleep_max = (NUM_OF_HPTSI_SLOTS - 2); 209 static int32_t tcp_hpts_precision = 120; 210 211 struct hpts_domain_info { 212 int count; 213 int cpu[MAXCPU]; 214 }; 215 216 struct hpts_domain_info hpts_domains[MAXMEMDOM]; 217 218 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW, 219 &tcp_hpts_precision, 120, 220 "Value for PRE() precision of callout"); 221 222 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW, 223 &logging_on, 0, 224 "Turn on logging if compiled in"); 225 226 counter_u64_t hpts_loops; 227 228 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD, 229 &hpts_loops, "Number of times hpts had to loop to catch up"); 230 231 counter_u64_t back_tosleep; 232 233 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD, 234 &back_tosleep, "Number of times hpts found no tcbs"); 235 236 static int32_t in_newts_every_tcb = 0; 237 238 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tsperpcb, CTLFLAG_RW, 239 &in_newts_every_tcb, 0, 240 "Do we have a new cts every tcb we process for input"); 241 static int32_t in_ts_percision = 0; 242 243 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tspercision, CTLFLAG_RW, 244 &in_ts_percision, 0, 245 "Do we use percise timestamp for clients on input"); 246 static int32_t out_newts_every_tcb = 0; 247 248 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tsperpcb, CTLFLAG_RW, 249 &out_newts_every_tcb, 0, 250 "Do we have a new cts every tcb we process for output"); 251 static int32_t out_ts_percision = 0; 252 253 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW, 254 &out_ts_percision, 0, 255 "Do we use a percise timestamp for every output cts"); 256 257 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLFLAG_RW, 258 &hpts_sleep_max, 0, 259 "The maximum time the hpts will sleep <1 - 254>"); 260 261 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW, 262 &tcp_min_hptsi_time, 0, 263 "The minimum time the hpts must sleep before processing more slots"); 264 265 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW, 266 &tcp_hpts_callout_skip_swi, 0, 267 "Do we have the callout call directly to the hpts?"); 268 269 static void 270 __tcp_hpts_log_it(struct tcp_hpts_entry *hpts, struct inpcb *inp, int event, uint32_t slot, 271 uint32_t ticknow, int32_t line) 272 { 273 struct hpts_log *pl; 274 275 HPTS_MTX_ASSERT(hpts); 276 if (hpts->p_log == NULL) 277 return; 278 pl = &hpts->p_log[hpts->p_log_at]; 279 hpts->p_log_at++; 280 if (hpts->p_log_at >= hpts->p_logsize) { 281 hpts->p_log_at = 0; 282 hpts->p_log_wrapped = 1; 283 } 284 pl->inp = inp; 285 if (inp) { 286 pl->t_paceslot = inp->inp_hptsslot; 287 pl->t_hptsreq = inp->inp_hpts_request; 288 pl->p_onhpts = inp->inp_in_hpts; 289 pl->p_oninput = inp->inp_in_input; 290 } else { 291 pl->t_paceslot = 0; 292 pl->t_hptsreq = 0; 293 pl->p_onhpts = 0; 294 pl->p_oninput = 0; 295 } 296 pl->is_notempty = 1; 297 pl->event = event; 298 pl->line = line; 299 pl->cts = tcp_get_usecs(NULL); 300 pl->p_curtick = hpts->p_curtick; 301 pl->p_prevtick = hpts->p_prevtick; 302 pl->p_on_queue_cnt = hpts->p_on_queue_cnt; 303 pl->ticknow = ticknow; 304 pl->slot_req = slot; 305 pl->p_nxt_slot = hpts->p_nxt_slot; 306 pl->p_cur_slot = hpts->p_cur_slot; 307 pl->p_hpts_sleep_time = hpts->p_hpts_sleep_time; 308 pl->p_flags = (hpts->p_cpu & 0x7f); 309 pl->p_flags <<= 7; 310 pl->p_flags |= (hpts->p_num & 0x7f); 311 pl->p_flags <<= 2; 312 if (hpts->p_hpts_active) { 313 pl->p_flags |= HPTS_HPTS_ACTIVE; 314 } 315 } 316 317 #define tcp_hpts_log_it(a, b, c, d, e) __tcp_hpts_log_it(a, b, c, d, e, __LINE__) 318 319 static void 320 hpts_timeout_swi(void *arg) 321 { 322 struct tcp_hpts_entry *hpts; 323 324 hpts = (struct tcp_hpts_entry *)arg; 325 swi_sched(hpts->ie_cookie, 0); 326 } 327 328 static void 329 hpts_timeout_dir(void *arg) 330 { 331 tcp_hpts_thread(arg); 332 } 333 334 static inline void 335 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear) 336 { 337 #ifdef INVARIANTS 338 if (mtx_owned(&hpts->p_mtx) == 0) { 339 /* We don't own the mutex? */ 340 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp); 341 } 342 if (hpts->p_cpu != inp->inp_hpts_cpu) { 343 /* It is not the right cpu/mutex? */ 344 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp); 345 } 346 if (inp->inp_in_hpts == 0) { 347 /* We are not on the hpts? */ 348 panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp); 349 } 350 if (TAILQ_EMPTY(head) && 351 (hpts->p_on_queue_cnt != 0)) { 352 /* We should not be empty with a queue count */ 353 panic("%s hpts:%p hpts bucket empty but cnt:%d", 354 __FUNCTION__, hpts, hpts->p_on_queue_cnt); 355 } 356 #endif 357 TAILQ_REMOVE(head, inp, inp_hpts); 358 hpts->p_on_queue_cnt--; 359 if (hpts->p_on_queue_cnt < 0) { 360 /* Count should not go negative .. */ 361 #ifdef INVARIANTS 362 panic("Hpts goes negative inp:%p hpts:%p", 363 inp, hpts); 364 #endif 365 hpts->p_on_queue_cnt = 0; 366 } 367 if (clear) { 368 inp->inp_hpts_request = 0; 369 inp->inp_in_hpts = 0; 370 } 371 } 372 373 static inline void 374 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref) 375 { 376 #ifdef INVARIANTS 377 if (mtx_owned(&hpts->p_mtx) == 0) { 378 /* We don't own the mutex? */ 379 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp); 380 } 381 if (hpts->p_cpu != inp->inp_hpts_cpu) { 382 /* It is not the right cpu/mutex? */ 383 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp); 384 } 385 if ((noref == 0) && (inp->inp_in_hpts == 1)) { 386 /* We are already on the hpts? */ 387 panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp); 388 } 389 #endif 390 TAILQ_INSERT_TAIL(head, inp, inp_hpts); 391 inp->inp_in_hpts = 1; 392 hpts->p_on_queue_cnt++; 393 if (noref == 0) { 394 in_pcbref(inp); 395 } 396 } 397 398 static inline void 399 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear) 400 { 401 #ifdef INVARIANTS 402 if (mtx_owned(&hpts->p_mtx) == 0) { 403 /* We don't own the mutex? */ 404 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp); 405 } 406 if (hpts->p_cpu != inp->inp_input_cpu) { 407 /* It is not the right cpu/mutex? */ 408 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp); 409 } 410 if (inp->inp_in_input == 0) { 411 /* We are not on the input hpts? */ 412 panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp); 413 } 414 #endif 415 TAILQ_REMOVE(&hpts->p_input, inp, inp_input); 416 hpts->p_on_inqueue_cnt--; 417 if (hpts->p_on_inqueue_cnt < 0) { 418 #ifdef INVARIANTS 419 panic("Hpts in goes negative inp:%p hpts:%p", 420 inp, hpts); 421 #endif 422 hpts->p_on_inqueue_cnt = 0; 423 } 424 #ifdef INVARIANTS 425 if (TAILQ_EMPTY(&hpts->p_input) && 426 (hpts->p_on_inqueue_cnt != 0)) { 427 /* We should not be empty with a queue count */ 428 panic("%s hpts:%p in_hpts input empty but cnt:%d", 429 __FUNCTION__, hpts, hpts->p_on_inqueue_cnt); 430 } 431 #endif 432 if (clear) 433 inp->inp_in_input = 0; 434 } 435 436 static inline void 437 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line) 438 { 439 #ifdef INVARIANTS 440 if (mtx_owned(&hpts->p_mtx) == 0) { 441 /* We don't own the mutex? */ 442 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp); 443 } 444 if (hpts->p_cpu != inp->inp_input_cpu) { 445 /* It is not the right cpu/mutex? */ 446 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp); 447 } 448 if (inp->inp_in_input == 1) { 449 /* We are already on the input hpts? */ 450 panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp); 451 } 452 #endif 453 TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input); 454 inp->inp_in_input = 1; 455 hpts->p_on_inqueue_cnt++; 456 in_pcbref(inp); 457 } 458 459 static int 460 sysctl_tcp_hpts_log(SYSCTL_HANDLER_ARGS) 461 { 462 struct tcp_hpts_entry *hpts; 463 size_t sz; 464 int32_t logging_was, i; 465 int32_t error = 0; 466 467 /* 468 * HACK: Turn off logging so no locks are required this really needs 469 * a memory barrier :) 470 */ 471 logging_was = logging_on; 472 logging_on = 0; 473 if (!req->oldptr) { 474 /* How much? */ 475 sz = 0; 476 for (i = 0; i < tcp_pace.rp_num_hptss; i++) { 477 hpts = tcp_pace.rp_ent[i]; 478 if (hpts->p_log == NULL) 479 continue; 480 sz += (sizeof(struct hpts_log) * hpts->p_logsize); 481 } 482 error = SYSCTL_OUT(req, 0, sz); 483 } else { 484 for (i = 0; i < tcp_pace.rp_num_hptss; i++) { 485 hpts = tcp_pace.rp_ent[i]; 486 if (hpts->p_log == NULL) 487 continue; 488 if (hpts->p_log_wrapped) 489 sz = (sizeof(struct hpts_log) * hpts->p_logsize); 490 else 491 sz = (sizeof(struct hpts_log) * hpts->p_log_at); 492 error = SYSCTL_OUT(req, hpts->p_log, sz); 493 } 494 } 495 logging_on = logging_was; 496 return error; 497 } 498 499 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, log, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 500 0, 0, sysctl_tcp_hpts_log, "A", "tcp hptsi log"); 501 502 503 static void 504 tcp_wakehpts(struct tcp_hpts_entry *hpts) 505 { 506 HPTS_MTX_ASSERT(hpts); 507 swi_sched(hpts->ie_cookie, 0); 508 if (hpts->p_hpts_active == 2) { 509 /* Rare sleeping on a ENOBUF */ 510 wakeup_one(hpts); 511 } 512 } 513 514 static void 515 tcp_wakeinput(struct tcp_hpts_entry *hpts) 516 { 517 HPTS_MTX_ASSERT(hpts); 518 swi_sched(hpts->ie_cookie, 0); 519 if (hpts->p_hpts_active == 2) { 520 /* Rare sleeping on a ENOBUF */ 521 wakeup_one(hpts); 522 } 523 } 524 525 struct tcp_hpts_entry * 526 tcp_cur_hpts(struct inpcb *inp) 527 { 528 int32_t hpts_num; 529 struct tcp_hpts_entry *hpts; 530 531 hpts_num = inp->inp_hpts_cpu; 532 hpts = tcp_pace.rp_ent[hpts_num]; 533 return (hpts); 534 } 535 536 struct tcp_hpts_entry * 537 tcp_hpts_lock(struct inpcb *inp) 538 { 539 struct tcp_hpts_entry *hpts; 540 int32_t hpts_num; 541 542 again: 543 hpts_num = inp->inp_hpts_cpu; 544 hpts = tcp_pace.rp_ent[hpts_num]; 545 #ifdef INVARIANTS 546 if (mtx_owned(&hpts->p_mtx)) { 547 panic("Hpts:%p owns mtx prior-to lock line:%d", 548 hpts, __LINE__); 549 } 550 #endif 551 mtx_lock(&hpts->p_mtx); 552 if (hpts_num != inp->inp_hpts_cpu) { 553 mtx_unlock(&hpts->p_mtx); 554 goto again; 555 } 556 return (hpts); 557 } 558 559 struct tcp_hpts_entry * 560 tcp_input_lock(struct inpcb *inp) 561 { 562 struct tcp_hpts_entry *hpts; 563 int32_t hpts_num; 564 565 again: 566 hpts_num = inp->inp_input_cpu; 567 hpts = tcp_pace.rp_ent[hpts_num]; 568 #ifdef INVARIANTS 569 if (mtx_owned(&hpts->p_mtx)) { 570 panic("Hpts:%p owns mtx prior-to lock line:%d", 571 hpts, __LINE__); 572 } 573 #endif 574 mtx_lock(&hpts->p_mtx); 575 if (hpts_num != inp->inp_input_cpu) { 576 mtx_unlock(&hpts->p_mtx); 577 goto again; 578 } 579 return (hpts); 580 } 581 582 static void 583 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line) 584 { 585 int32_t add_freed; 586 587 if (inp->inp_flags2 & INP_FREED) { 588 /* 589 * Need to play a special trick so that in_pcbrele_wlocked 590 * does not return 1 when it really should have returned 0. 591 */ 592 add_freed = 1; 593 inp->inp_flags2 &= ~INP_FREED; 594 } else { 595 add_freed = 0; 596 } 597 #ifndef INP_REF_DEBUG 598 if (in_pcbrele_wlocked(inp)) { 599 /* 600 * This should not happen. We have the inpcb referred to by 601 * the main socket (why we are called) and the hpts. It 602 * should always return 0. 603 */ 604 panic("inpcb:%p release ret 1", 605 inp); 606 } 607 #else 608 if (__in_pcbrele_wlocked(inp, line)) { 609 /* 610 * This should not happen. We have the inpcb referred to by 611 * the main socket (why we are called) and the hpts. It 612 * should always return 0. 613 */ 614 panic("inpcb:%p release ret 1", 615 inp); 616 } 617 #endif 618 if (add_freed) { 619 inp->inp_flags2 |= INP_FREED; 620 } 621 } 622 623 static void 624 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line) 625 { 626 if (inp->inp_in_hpts) { 627 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1); 628 tcp_remove_hpts_ref(inp, hpts, line); 629 } 630 } 631 632 static void 633 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line) 634 { 635 HPTS_MTX_ASSERT(hpts); 636 if (inp->inp_in_input) { 637 hpts_sane_input_remove(hpts, inp, 1); 638 tcp_remove_hpts_ref(inp, hpts, line); 639 } 640 } 641 642 /* 643 * Called normally with the INP_LOCKED but it 644 * does not matter, the hpts lock is the key 645 * but the lock order allows us to hold the 646 * INP lock and then get the hpts lock. 647 * 648 * Valid values in the flags are 649 * HPTS_REMOVE_OUTPUT - remove from the output of the hpts. 650 * HPTS_REMOVE_INPUT - remove from the input of the hpts. 651 * Note that you can or both values together and get two 652 * actions. 653 */ 654 void 655 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line) 656 { 657 struct tcp_hpts_entry *hpts; 658 659 INP_WLOCK_ASSERT(inp); 660 if (flags & HPTS_REMOVE_OUTPUT) { 661 hpts = tcp_hpts_lock(inp); 662 tcp_hpts_remove_locked_output(hpts, inp, flags, line); 663 mtx_unlock(&hpts->p_mtx); 664 } 665 if (flags & HPTS_REMOVE_INPUT) { 666 hpts = tcp_input_lock(inp); 667 tcp_hpts_remove_locked_input(hpts, inp, flags, line); 668 mtx_unlock(&hpts->p_mtx); 669 } 670 } 671 672 static inline int 673 hpts_tick(struct tcp_hpts_entry *hpts, int32_t plus) 674 { 675 return ((hpts->p_prevtick + plus) % NUM_OF_HPTSI_SLOTS); 676 } 677 678 static int 679 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref) 680 { 681 int32_t need_wake = 0; 682 uint32_t ticknow = 0; 683 684 HPTS_MTX_ASSERT(hpts); 685 if (inp->inp_in_hpts == 0) { 686 /* Ok we need to set it on the hpts in the current slot */ 687 if (hpts->p_hpts_active == 0) { 688 /* A sleeping hpts we want in next slot to run */ 689 if (logging_on) { 690 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, 0, 691 hpts_tick(hpts, 1)); 692 } 693 inp->inp_hptsslot = hpts_tick(hpts, 1); 694 inp->inp_hpts_request = 0; 695 if (logging_on) { 696 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEP_BEFORE, 1, ticknow); 697 } 698 need_wake = 1; 699 } else if ((void *)inp == hpts->p_inp) { 700 /* 701 * We can't allow you to go into the same slot we 702 * are in. We must put you out. 703 */ 704 inp->inp_hptsslot = hpts->p_nxt_slot; 705 } else 706 inp->inp_hptsslot = hpts->p_cur_slot; 707 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref); 708 inp->inp_hpts_request = 0; 709 if (logging_on) { 710 tcp_hpts_log_it(hpts, inp, HPTSLOG_IMMEDIATE, 0, 0); 711 } 712 if (need_wake) { 713 /* 714 * Activate the hpts if it is sleeping and its 715 * timeout is not 1. 716 */ 717 if (logging_on) { 718 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_HPTS, 0, ticknow); 719 } 720 hpts->p_direct_wake = 1; 721 tcp_wakehpts(hpts); 722 } 723 } 724 return (need_wake); 725 } 726 727 int 728 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line) 729 { 730 int32_t ret; 731 struct tcp_hpts_entry *hpts; 732 733 INP_WLOCK_ASSERT(inp); 734 hpts = tcp_hpts_lock(inp); 735 ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0); 736 mtx_unlock(&hpts->p_mtx); 737 return (ret); 738 } 739 740 static void 741 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, uint32_t cts, int32_t line, 742 struct hpts_diag *diag, int32_t noref) 743 { 744 int32_t need_new_to = 0; 745 int32_t need_wakeup = 0; 746 uint32_t largest_slot; 747 uint32_t ticknow = 0; 748 uint32_t slot_calc; 749 750 HPTS_MTX_ASSERT(hpts); 751 if (diag) { 752 memset(diag, 0, sizeof(struct hpts_diag)); 753 diag->p_hpts_active = hpts->p_hpts_active; 754 diag->p_nxt_slot = hpts->p_nxt_slot; 755 diag->p_cur_slot = hpts->p_cur_slot; 756 diag->slot_req = slot; 757 } 758 if ((inp->inp_in_hpts == 0) || noref) { 759 inp->inp_hpts_request = slot; 760 if (slot == 0) { 761 /* Immediate */ 762 tcp_queue_to_hpts_immediate_locked(inp, hpts, line, noref); 763 return; 764 } 765 if (hpts->p_hpts_active) { 766 /* 767 * Its slot - 1 since nxt_slot is the next tick that 768 * will go off since the hpts is awake 769 */ 770 if (logging_on) { 771 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_NORMAL, slot, 0); 772 } 773 /* 774 * We want to make sure that we don't place a inp in 775 * the range of p_cur_slot <-> p_nxt_slot. If we 776 * take from p_nxt_slot to the end, plus p_cur_slot 777 * and then take away 2, we will know how many is 778 * the max slots we can use. 779 */ 780 if (hpts->p_nxt_slot > hpts->p_cur_slot) { 781 /* 782 * Non-wrap case nxt_slot <-> cur_slot we 783 * don't want to land in. So the diff gives 784 * us what is taken away from the number of 785 * slots. 786 */ 787 largest_slot = NUM_OF_HPTSI_SLOTS - (hpts->p_nxt_slot - hpts->p_cur_slot); 788 } else if (hpts->p_nxt_slot == hpts->p_cur_slot) { 789 largest_slot = NUM_OF_HPTSI_SLOTS - 2; 790 } else { 791 /* 792 * Wrap case so the diff gives us the number 793 * of slots that we can land in. 794 */ 795 largest_slot = hpts->p_cur_slot - hpts->p_nxt_slot; 796 } 797 /* 798 * We take away two so we never have a problem (20 799 * usec's) out of 1024000 usecs 800 */ 801 largest_slot -= 2; 802 if (inp->inp_hpts_request > largest_slot) { 803 /* 804 * Restrict max jump of slots and remember 805 * leftover 806 */ 807 slot = largest_slot; 808 inp->inp_hpts_request -= largest_slot; 809 } else { 810 /* This one will run when we hit it */ 811 inp->inp_hpts_request = 0; 812 } 813 if (hpts->p_nxt_slot == hpts->p_cur_slot) 814 slot_calc = (hpts->p_nxt_slot + slot) % NUM_OF_HPTSI_SLOTS; 815 else 816 slot_calc = (hpts->p_nxt_slot + slot - 1) % NUM_OF_HPTSI_SLOTS; 817 if (slot_calc == hpts->p_cur_slot) { 818 #ifdef INVARIANTS 819 /* TSNH */ 820 panic("Hpts:%p impossible slot calculation slot_calc:%u slot:%u largest:%u\n", 821 hpts, slot_calc, slot, largest_slot); 822 #endif 823 if (slot_calc) 824 slot_calc--; 825 else 826 slot_calc = NUM_OF_HPTSI_SLOTS - 1; 827 } 828 inp->inp_hptsslot = slot_calc; 829 if (diag) { 830 diag->inp_hptsslot = inp->inp_hptsslot; 831 } 832 } else { 833 /* 834 * The hpts is sleeping, we need to figure out where 835 * it will wake up at and if we need to reschedule 836 * its time-out. 837 */ 838 uint32_t have_slept, yet_to_sleep; 839 uint32_t slot_now; 840 struct timeval tv; 841 842 ticknow = tcp_gethptstick(&tv); 843 slot_now = ticknow % NUM_OF_HPTSI_SLOTS; 844 /* 845 * The user wants to be inserted at (slot_now + 846 * slot) % NUM_OF_HPTSI_SLOTS, so lets set that up. 847 */ 848 largest_slot = NUM_OF_HPTSI_SLOTS - 2; 849 if (inp->inp_hpts_request > largest_slot) { 850 /* Adjust the residual in inp_hpts_request */ 851 slot = largest_slot; 852 inp->inp_hpts_request -= largest_slot; 853 } else { 854 /* No residual it all fits */ 855 inp->inp_hpts_request = 0; 856 } 857 inp->inp_hptsslot = (slot_now + slot) % NUM_OF_HPTSI_SLOTS; 858 if (diag) { 859 diag->slot_now = slot_now; 860 diag->inp_hptsslot = inp->inp_hptsslot; 861 diag->p_on_min_sleep = hpts->p_on_min_sleep; 862 } 863 if (logging_on) { 864 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, slot, ticknow); 865 } 866 /* Now do we need to restart the hpts's timer? */ 867 if (TSTMP_GT(ticknow, hpts->p_curtick)) 868 have_slept = ticknow - hpts->p_curtick; 869 else 870 have_slept = 0; 871 if (have_slept < hpts->p_hpts_sleep_time) { 872 /* This should be what happens */ 873 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept; 874 } else { 875 /* We are over-due */ 876 yet_to_sleep = 0; 877 need_wakeup = 1; 878 } 879 if (diag) { 880 diag->have_slept = have_slept; 881 diag->yet_to_sleep = yet_to_sleep; 882 diag->hpts_sleep_time = hpts->p_hpts_sleep_time; 883 } 884 if ((hpts->p_on_min_sleep == 0) && (yet_to_sleep > slot)) { 885 /* 886 * We need to reschedule the hptss time-out. 887 */ 888 hpts->p_hpts_sleep_time = slot; 889 need_new_to = slot * HPTS_TICKS_PER_USEC; 890 } 891 } 892 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref); 893 if (logging_on) { 894 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERTED, slot, ticknow); 895 } 896 /* 897 * Now how far is the hpts sleeping to? if active is 1, its 898 * up and ticking we do nothing, otherwise we may need to 899 * reschedule its callout if need_new_to is set from above. 900 */ 901 if (need_wakeup) { 902 if (logging_on) { 903 tcp_hpts_log_it(hpts, inp, HPTSLOG_RESCHEDULE, 1, 0); 904 } 905 hpts->p_direct_wake = 1; 906 tcp_wakehpts(hpts); 907 if (diag) { 908 diag->need_new_to = 0; 909 diag->co_ret = 0xffff0000; 910 } 911 } else if (need_new_to) { 912 int32_t co_ret; 913 struct timeval tv; 914 sbintime_t sb; 915 916 tv.tv_sec = 0; 917 tv.tv_usec = 0; 918 while (need_new_to > HPTS_USEC_IN_SEC) { 919 tv.tv_sec++; 920 need_new_to -= HPTS_USEC_IN_SEC; 921 } 922 tv.tv_usec = need_new_to; 923 sb = tvtosbt(tv); 924 if (tcp_hpts_callout_skip_swi == 0) { 925 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0, 926 hpts_timeout_swi, hpts, hpts->p_cpu, 927 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision))); 928 } else { 929 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0, 930 hpts_timeout_dir, hpts, 931 hpts->p_cpu, 932 C_PREL(tcp_hpts_precision)); 933 } 934 if (diag) { 935 diag->need_new_to = need_new_to; 936 diag->co_ret = co_ret; 937 } 938 } 939 } else { 940 #ifdef INVARIANTS 941 panic("Hpts:%p tp:%p already on hpts and add?", hpts, inp); 942 #endif 943 } 944 } 945 946 uint32_t 947 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag){ 948 struct tcp_hpts_entry *hpts; 949 uint32_t slot_on, cts; 950 struct timeval tv; 951 952 /* 953 * We now return the next-slot the hpts will be on, beyond its 954 * current run (if up) or where it was when it stopped if it is 955 * sleeping. 956 */ 957 INP_WLOCK_ASSERT(inp); 958 hpts = tcp_hpts_lock(inp); 959 if (in_ts_percision) 960 microuptime(&tv); 961 else 962 getmicrouptime(&tv); 963 cts = tcp_tv_to_usectick(&tv); 964 tcp_hpts_insert_locked(hpts, inp, slot, cts, line, diag, 0); 965 slot_on = hpts->p_nxt_slot; 966 mtx_unlock(&hpts->p_mtx); 967 return (slot_on); 968 } 969 970 uint32_t 971 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){ 972 return (tcp_hpts_insert_diag(inp, slot, line, NULL)); 973 } 974 975 int 976 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line) 977 { 978 int32_t retval = 0; 979 980 HPTS_MTX_ASSERT(hpts); 981 if (inp->inp_in_input == 0) { 982 /* Ok we need to set it on the hpts in the current slot */ 983 hpts_sane_input_insert(hpts, inp, line); 984 retval = 1; 985 if (hpts->p_hpts_active == 0) { 986 /* 987 * Activate the hpts if it is sleeping. 988 */ 989 if (logging_on) { 990 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_INPUT, 0, 0); 991 } 992 retval = 2; 993 hpts->p_direct_wake = 1; 994 tcp_wakeinput(hpts); 995 } 996 } else if (hpts->p_hpts_active == 0) { 997 retval = 4; 998 hpts->p_direct_wake = 1; 999 tcp_wakeinput(hpts); 1000 } 1001 return (retval); 1002 } 1003 1004 void 1005 tcp_queue_pkt_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 1006 int32_t tlen, int32_t drop_hdrlen, uint8_t iptos) 1007 { 1008 /* Setup packet for input first */ 1009 INP_WLOCK_ASSERT(tp->t_inpcb); 1010 m->m_pkthdr.pace_thoff = (uint16_t) ((caddr_t)th - mtod(m, caddr_t)); 1011 m->m_pkthdr.pace_tlen = (uint16_t) tlen; 1012 m->m_pkthdr.pace_drphdrlen = drop_hdrlen; 1013 m->m_pkthdr.pace_tos = iptos; 1014 m->m_pkthdr.pace_lock = (curthread->td_epochnest != 0); 1015 if (tp->t_in_pkt == NULL) { 1016 tp->t_in_pkt = m; 1017 tp->t_tail_pkt = m; 1018 } else { 1019 tp->t_tail_pkt->m_nextpkt = m; 1020 tp->t_tail_pkt = m; 1021 } 1022 } 1023 1024 1025 int32_t 1026 __tcp_queue_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, 1027 int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, int32_t line){ 1028 struct tcp_hpts_entry *hpts; 1029 int32_t ret; 1030 1031 tcp_queue_pkt_to_input(tp, m, th, tlen, drop_hdrlen, iptos); 1032 hpts = tcp_input_lock(tp->t_inpcb); 1033 ret = __tcp_queue_to_input_locked(tp->t_inpcb, hpts, line); 1034 mtx_unlock(&hpts->p_mtx); 1035 return (ret); 1036 } 1037 1038 void 1039 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line) 1040 { 1041 struct tcp_hpts_entry *hpts; 1042 struct tcpcb *tp; 1043 1044 tp = intotcpcb(inp); 1045 hpts = tcp_input_lock(tp->t_inpcb); 1046 if (inp->inp_in_input == 0) { 1047 /* Ok we need to set it on the hpts in the current slot */ 1048 hpts_sane_input_insert(hpts, inp, line); 1049 if (hpts->p_hpts_active == 0) { 1050 /* 1051 * Activate the hpts if it is sleeping. 1052 */ 1053 hpts->p_direct_wake = 1; 1054 tcp_wakeinput(hpts); 1055 } 1056 } else if (hpts->p_hpts_active == 0) { 1057 hpts->p_direct_wake = 1; 1058 tcp_wakeinput(hpts); 1059 } 1060 inp->inp_hpts_drop_reas = reason; 1061 mtx_unlock(&hpts->p_mtx); 1062 } 1063 1064 static uint16_t 1065 hpts_random_cpu(struct inpcb *inp){ 1066 /* 1067 * No flow type set distribute the load randomly. 1068 */ 1069 uint16_t cpuid; 1070 uint32_t ran; 1071 1072 /* 1073 * If one has been set use it i.e. we want both in and out on the 1074 * same hpts. 1075 */ 1076 if (inp->inp_input_cpu_set) { 1077 return (inp->inp_input_cpu); 1078 } else if (inp->inp_hpts_cpu_set) { 1079 return (inp->inp_hpts_cpu); 1080 } 1081 /* Nothing set use a random number */ 1082 ran = arc4random(); 1083 cpuid = (ran & 0xffff) % mp_ncpus; 1084 return (cpuid); 1085 } 1086 1087 static uint16_t 1088 hpts_cpuid(struct inpcb *inp){ 1089 u_int cpuid; 1090 #ifdef NUMA 1091 struct hpts_domain_info *di; 1092 #endif 1093 1094 /* 1095 * If one has been set use it i.e. we want both in and out on the 1096 * same hpts. 1097 */ 1098 if (inp->inp_input_cpu_set) { 1099 return (inp->inp_input_cpu); 1100 } else if (inp->inp_hpts_cpu_set) { 1101 return (inp->inp_hpts_cpu); 1102 } 1103 /* If one is set the other must be the same */ 1104 #ifdef RSS 1105 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype); 1106 if (cpuid == NETISR_CPUID_NONE) 1107 return (hpts_random_cpu(inp)); 1108 else 1109 return (cpuid); 1110 #else 1111 /* 1112 * We don't have a flowid -> cpuid mapping, so cheat and just map 1113 * unknown cpuids to curcpu. Not the best, but apparently better 1114 * than defaulting to swi 0. 1115 */ 1116 1117 if (inp->inp_flowtype == M_HASHTYPE_NONE) 1118 return (hpts_random_cpu(inp)); 1119 /* 1120 * Hash to a thread based on the flowid. If we are using numa, 1121 * then restrict the hash to the numa domain where the inp lives. 1122 */ 1123 #ifdef NUMA 1124 if (tcp_bind_threads == 2 && inp->inp_numa_domain != M_NODOM) { 1125 di = &hpts_domains[inp->inp_numa_domain]; 1126 cpuid = di->cpu[inp->inp_flowid % di->count]; 1127 } else 1128 #endif 1129 cpuid = inp->inp_flowid % mp_ncpus; 1130 1131 return (cpuid); 1132 #endif 1133 } 1134 1135 /* 1136 * Do NOT try to optimize the processing of inp's 1137 * by first pulling off all the inp's into a temporary 1138 * list (e.g. TAILQ_CONCAT). If you do that the subtle 1139 * interactions of switching CPU's will kill because of 1140 * problems in the linked list manipulation. Basically 1141 * you would switch cpu's with the hpts mutex locked 1142 * but then while you were processing one of the inp's 1143 * some other one that you switch will get a new 1144 * packet on the different CPU. It will insert it 1145 * on the new hptss input list. Creating a temporary 1146 * link in the inp will not fix it either, since 1147 * the other hpts will be doing the same thing and 1148 * you will both end up using the temporary link. 1149 * 1150 * You will die in an ASSERT for tailq corruption if you 1151 * run INVARIANTS or you will die horribly without 1152 * INVARIANTS in some unknown way with a corrupt linked 1153 * list. 1154 */ 1155 static void 1156 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv) 1157 { 1158 struct mbuf *m, *n; 1159 struct tcpcb *tp; 1160 struct inpcb *inp; 1161 uint16_t drop_reason; 1162 int16_t set_cpu; 1163 uint32_t did_prefetch = 0; 1164 int32_t ti_locked = TI_UNLOCKED; 1165 struct epoch_tracker et; 1166 1167 HPTS_MTX_ASSERT(hpts); 1168 while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) { 1169 HPTS_MTX_ASSERT(hpts); 1170 hpts_sane_input_remove(hpts, inp, 0); 1171 if (inp->inp_input_cpu_set == 0) { 1172 set_cpu = 1; 1173 } else { 1174 set_cpu = 0; 1175 } 1176 hpts->p_inp = inp; 1177 drop_reason = inp->inp_hpts_drop_reas; 1178 inp->inp_in_input = 0; 1179 mtx_unlock(&hpts->p_mtx); 1180 CURVNET_SET(inp->inp_vnet); 1181 if (drop_reason) { 1182 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1183 ti_locked = TI_RLOCKED; 1184 } else { 1185 ti_locked = TI_UNLOCKED; 1186 } 1187 INP_WLOCK(inp); 1188 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) || 1189 (inp->inp_flags2 & INP_FREED)) { 1190 out: 1191 hpts->p_inp = NULL; 1192 if (ti_locked == TI_RLOCKED) { 1193 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1194 } 1195 if (in_pcbrele_wlocked(inp) == 0) { 1196 INP_WUNLOCK(inp); 1197 } 1198 ti_locked = TI_UNLOCKED; 1199 CURVNET_RESTORE(); 1200 mtx_lock(&hpts->p_mtx); 1201 continue; 1202 } 1203 tp = intotcpcb(inp); 1204 if ((tp == NULL) || (tp->t_inpcb == NULL)) { 1205 goto out; 1206 } 1207 if (drop_reason) { 1208 /* This tcb is being destroyed for drop_reason */ 1209 m = tp->t_in_pkt; 1210 if (m) 1211 n = m->m_nextpkt; 1212 else 1213 n = NULL; 1214 tp->t_in_pkt = NULL; 1215 while (m) { 1216 m_freem(m); 1217 m = n; 1218 if (m) 1219 n = m->m_nextpkt; 1220 } 1221 tp = tcp_drop(tp, drop_reason); 1222 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1223 if (tp == NULL) { 1224 INP_WLOCK(inp); 1225 } 1226 if (in_pcbrele_wlocked(inp) == 0) 1227 INP_WUNLOCK(inp); 1228 CURVNET_RESTORE(); 1229 mtx_lock(&hpts->p_mtx); 1230 continue; 1231 } 1232 if (set_cpu) { 1233 /* 1234 * Setup so the next time we will move to the right 1235 * CPU. This should be a rare event. It will 1236 * sometimes happens when we are the client side 1237 * (usually not the server). Somehow tcp_output() 1238 * gets called before the tcp_do_segment() sets the 1239 * intial state. This means the r_cpu and r_hpts_cpu 1240 * is 0. We get on the hpts, and then tcp_input() 1241 * gets called setting up the r_cpu to the correct 1242 * value. The hpts goes off and sees the mis-match. 1243 * We simply correct it here and the CPU will switch 1244 * to the new hpts nextime the tcb gets added to the 1245 * the hpts (not this time) :-) 1246 */ 1247 tcp_set_hpts(inp); 1248 } 1249 m = tp->t_in_pkt; 1250 n = NULL; 1251 if (m != NULL && 1252 (m->m_pkthdr.pace_lock == TI_RLOCKED || 1253 tp->t_state != TCPS_ESTABLISHED)) { 1254 ti_locked = TI_RLOCKED; 1255 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1256 m = tp->t_in_pkt; 1257 } 1258 if (in_newts_every_tcb) { 1259 if (in_ts_percision) 1260 microuptime(tv); 1261 else 1262 getmicrouptime(tv); 1263 } 1264 if (tp->t_fb_ptr != NULL) { 1265 kern_prefetch(tp->t_fb_ptr, &did_prefetch); 1266 did_prefetch = 1; 1267 } 1268 /* Any input work to do, if so do it first */ 1269 if ((m != NULL) && (m == tp->t_in_pkt)) { 1270 struct tcphdr *th; 1271 int32_t tlen, drop_hdrlen, nxt_pkt; 1272 uint8_t iptos; 1273 1274 n = m->m_nextpkt; 1275 tp->t_in_pkt = tp->t_tail_pkt = NULL; 1276 while (m) { 1277 th = (struct tcphdr *)(mtod(m, caddr_t)+m->m_pkthdr.pace_thoff); 1278 tlen = m->m_pkthdr.pace_tlen; 1279 drop_hdrlen = m->m_pkthdr.pace_drphdrlen; 1280 iptos = m->m_pkthdr.pace_tos; 1281 m->m_nextpkt = NULL; 1282 if (n) 1283 nxt_pkt = 1; 1284 else 1285 nxt_pkt = 0; 1286 inp->inp_input_calls = 1; 1287 if (tp->t_fb->tfb_tcp_hpts_do_segment) { 1288 /* Use the hpts specific do_segment */ 1289 (*tp->t_fb->tfb_tcp_hpts_do_segment) (m, th, inp->inp_socket, 1290 tp, drop_hdrlen, 1291 tlen, iptos, nxt_pkt, tv); 1292 } else { 1293 /* Use the default do_segment */ 1294 (*tp->t_fb->tfb_tcp_do_segment) (m, th, inp->inp_socket, 1295 tp, drop_hdrlen, 1296 tlen, iptos); 1297 } 1298 if (ti_locked == TI_RLOCKED) 1299 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1300 /* 1301 * Do segment returns unlocked we need the 1302 * lock again but we also need some kasserts 1303 * here. 1304 */ 1305 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1306 INP_UNLOCK_ASSERT(inp); 1307 m = n; 1308 if (m) 1309 n = m->m_nextpkt; 1310 if (m != NULL && 1311 m->m_pkthdr.pace_lock == TI_RLOCKED) { 1312 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1313 ti_locked = TI_RLOCKED; 1314 } else 1315 ti_locked = TI_UNLOCKED; 1316 INP_WLOCK(inp); 1317 /* 1318 * Since we have an opening here we must 1319 * re-check if the tcb went away while we 1320 * were getting the lock(s). 1321 */ 1322 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) || 1323 (inp->inp_flags2 & INP_FREED)) { 1324 while (m) { 1325 m_freem(m); 1326 m = n; 1327 if (m) 1328 n = m->m_nextpkt; 1329 } 1330 goto out; 1331 } 1332 /* 1333 * Now that we hold the INP lock, check if 1334 * we need to upgrade our lock. 1335 */ 1336 if (ti_locked == TI_UNLOCKED && 1337 (tp->t_state != TCPS_ESTABLISHED)) { 1338 ti_locked = TI_RLOCKED; 1339 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 1340 } 1341 } /** end while(m) */ 1342 } /** end if ((m != NULL) && (m == tp->t_in_pkt)) */ 1343 if (in_pcbrele_wlocked(inp) == 0) 1344 INP_WUNLOCK(inp); 1345 if (ti_locked == TI_RLOCKED) 1346 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 1347 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo); 1348 INP_UNLOCK_ASSERT(inp); 1349 ti_locked = TI_UNLOCKED; 1350 mtx_lock(&hpts->p_mtx); 1351 hpts->p_inp = NULL; 1352 CURVNET_RESTORE(); 1353 } 1354 } 1355 1356 static int 1357 tcp_hpts_est_run(struct tcp_hpts_entry *hpts) 1358 { 1359 int32_t ticks_to_run; 1360 1361 if (hpts->p_prevtick && (SEQ_GT(hpts->p_curtick, hpts->p_prevtick))) { 1362 ticks_to_run = hpts->p_curtick - hpts->p_prevtick; 1363 if (ticks_to_run >= (NUM_OF_HPTSI_SLOTS - 1)) { 1364 ticks_to_run = NUM_OF_HPTSI_SLOTS - 2; 1365 } 1366 } else { 1367 if (hpts->p_prevtick == hpts->p_curtick) { 1368 /* This happens when we get woken up right away */ 1369 return (-1); 1370 } 1371 ticks_to_run = 1; 1372 } 1373 /* Set in where we will be when we catch up */ 1374 hpts->p_nxt_slot = (hpts->p_cur_slot + ticks_to_run) % NUM_OF_HPTSI_SLOTS; 1375 if (hpts->p_nxt_slot == hpts->p_cur_slot) { 1376 panic("Impossible math -- hpts:%p p_nxt_slot:%d p_cur_slot:%d ticks_to_run:%d", 1377 hpts, hpts->p_nxt_slot, hpts->p_cur_slot, ticks_to_run); 1378 } 1379 return (ticks_to_run); 1380 } 1381 1382 static void 1383 tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick) 1384 { 1385 struct tcpcb *tp; 1386 struct inpcb *inp = NULL, *ninp; 1387 struct timeval tv; 1388 int32_t ticks_to_run, i, error, tick_now, interum_tick; 1389 int32_t paced_cnt = 0; 1390 int32_t did_prefetch = 0; 1391 int32_t prefetch_ninp = 0; 1392 int32_t prefetch_tp = 0; 1393 uint32_t cts; 1394 int16_t set_cpu; 1395 1396 HPTS_MTX_ASSERT(hpts); 1397 hpts->p_curtick = tcp_tv_to_hptstick(ctick); 1398 cts = tcp_tv_to_usectick(ctick); 1399 memcpy(&tv, ctick, sizeof(struct timeval)); 1400 hpts->p_cur_slot = hpts_tick(hpts, 1); 1401 1402 /* Figure out if we had missed ticks */ 1403 again: 1404 HPTS_MTX_ASSERT(hpts); 1405 ticks_to_run = tcp_hpts_est_run(hpts); 1406 if (!TAILQ_EMPTY(&hpts->p_input)) { 1407 tcp_input_data(hpts, &tv); 1408 } 1409 #ifdef INVARIANTS 1410 if (TAILQ_EMPTY(&hpts->p_input) && 1411 (hpts->p_on_inqueue_cnt != 0)) { 1412 panic("tp:%p in_hpts input empty but cnt:%d", 1413 hpts, hpts->p_on_inqueue_cnt); 1414 } 1415 #endif 1416 HPTS_MTX_ASSERT(hpts); 1417 /* Reset the ticks to run and time if we need too */ 1418 interum_tick = tcp_gethptstick(&tv); 1419 if (interum_tick != hpts->p_curtick) { 1420 /* Save off the new time we execute to */ 1421 *ctick = tv; 1422 hpts->p_curtick = interum_tick; 1423 cts = tcp_tv_to_usectick(&tv); 1424 hpts->p_cur_slot = hpts_tick(hpts, 1); 1425 ticks_to_run = tcp_hpts_est_run(hpts); 1426 } 1427 if (ticks_to_run == -1) { 1428 goto no_run; 1429 } 1430 if (logging_on) { 1431 tcp_hpts_log_it(hpts, inp, HPTSLOG_SETTORUN, ticks_to_run, 0); 1432 } 1433 if (hpts->p_on_queue_cnt == 0) { 1434 goto no_one; 1435 } 1436 HPTS_MTX_ASSERT(hpts); 1437 for (i = 0; i < ticks_to_run; i++) { 1438 /* 1439 * Calculate our delay, if there are no extra ticks there 1440 * was not any 1441 */ 1442 hpts->p_delayed_by = (ticks_to_run - (i + 1)) * HPTS_TICKS_PER_USEC; 1443 HPTS_MTX_ASSERT(hpts); 1444 while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) { 1445 /* For debugging */ 1446 if (logging_on) { 1447 tcp_hpts_log_it(hpts, inp, HPTSLOG_HPTSI, ticks_to_run, i); 1448 } 1449 hpts->p_inp = inp; 1450 paced_cnt++; 1451 if (hpts->p_cur_slot != inp->inp_hptsslot) { 1452 panic("Hpts:%p inp:%p slot mis-aligned %u vs %u", 1453 hpts, inp, hpts->p_cur_slot, inp->inp_hptsslot); 1454 } 1455 /* Now pull it */ 1456 if (inp->inp_hpts_cpu_set == 0) { 1457 set_cpu = 1; 1458 } else { 1459 set_cpu = 0; 1460 } 1461 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_cur_slot], 0); 1462 if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) { 1463 /* We prefetch the next inp if possible */ 1464 kern_prefetch(ninp, &prefetch_ninp); 1465 prefetch_ninp = 1; 1466 } 1467 if (inp->inp_hpts_request) { 1468 /* 1469 * This guy is deferred out further in time 1470 * then our wheel had on it. Push him back 1471 * on the wheel. 1472 */ 1473 int32_t remaining_slots; 1474 1475 remaining_slots = ticks_to_run - (i + 1); 1476 if (inp->inp_hpts_request > remaining_slots) { 1477 /* 1478 * Keep INVARIANTS happy by clearing 1479 * the flag 1480 */ 1481 tcp_hpts_insert_locked(hpts, inp, inp->inp_hpts_request, cts, __LINE__, NULL, 1); 1482 hpts->p_inp = NULL; 1483 continue; 1484 } 1485 inp->inp_hpts_request = 0; 1486 } 1487 /* 1488 * We clear the hpts flag here after dealing with 1489 * remaining slots. This way anyone looking with the 1490 * TCB lock will see its on the hpts until just 1491 * before we unlock. 1492 */ 1493 inp->inp_in_hpts = 0; 1494 mtx_unlock(&hpts->p_mtx); 1495 INP_WLOCK(inp); 1496 if (in_pcbrele_wlocked(inp)) { 1497 mtx_lock(&hpts->p_mtx); 1498 if (logging_on) 1499 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 1); 1500 hpts->p_inp = NULL; 1501 continue; 1502 } 1503 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 1504 out_now: 1505 #ifdef INVARIANTS 1506 if (mtx_owned(&hpts->p_mtx)) { 1507 panic("Hpts:%p owns mtx prior-to lock line:%d", 1508 hpts, __LINE__); 1509 } 1510 #endif 1511 INP_WUNLOCK(inp); 1512 mtx_lock(&hpts->p_mtx); 1513 if (logging_on) 1514 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 3); 1515 hpts->p_inp = NULL; 1516 continue; 1517 } 1518 tp = intotcpcb(inp); 1519 if ((tp == NULL) || (tp->t_inpcb == NULL)) { 1520 goto out_now; 1521 } 1522 if (set_cpu) { 1523 /* 1524 * Setup so the next time we will move to 1525 * the right CPU. This should be a rare 1526 * event. It will sometimes happens when we 1527 * are the client side (usually not the 1528 * server). Somehow tcp_output() gets called 1529 * before the tcp_do_segment() sets the 1530 * intial state. This means the r_cpu and 1531 * r_hpts_cpu is 0. We get on the hpts, and 1532 * then tcp_input() gets called setting up 1533 * the r_cpu to the correct value. The hpts 1534 * goes off and sees the mis-match. We 1535 * simply correct it here and the CPU will 1536 * switch to the new hpts nextime the tcb 1537 * gets added to the the hpts (not this one) 1538 * :-) 1539 */ 1540 tcp_set_hpts(inp); 1541 } 1542 if (out_newts_every_tcb) { 1543 struct timeval sv; 1544 1545 if (out_ts_percision) 1546 microuptime(&sv); 1547 else 1548 getmicrouptime(&sv); 1549 cts = tcp_tv_to_usectick(&sv); 1550 } 1551 CURVNET_SET(inp->inp_vnet); 1552 /* 1553 * There is a hole here, we get the refcnt on the 1554 * inp so it will still be preserved but to make 1555 * sure we can get the INP we need to hold the p_mtx 1556 * above while we pull out the tp/inp, as long as 1557 * fini gets the lock first we are assured of having 1558 * a sane INP we can lock and test. 1559 */ 1560 #ifdef INVARIANTS 1561 if (mtx_owned(&hpts->p_mtx)) { 1562 panic("Hpts:%p owns mtx before tcp-output:%d", 1563 hpts, __LINE__); 1564 } 1565 #endif 1566 if (tp->t_fb_ptr != NULL) { 1567 kern_prefetch(tp->t_fb_ptr, &did_prefetch); 1568 did_prefetch = 1; 1569 } 1570 inp->inp_hpts_calls = 1; 1571 if (tp->t_fb->tfb_tcp_output_wtime != NULL) { 1572 error = (*tp->t_fb->tfb_tcp_output_wtime) (tp, &tv); 1573 } else { 1574 error = tp->t_fb->tfb_tcp_output(tp); 1575 } 1576 if (ninp && ninp->inp_ppcb) { 1577 /* 1578 * If we have a nxt inp, see if we can 1579 * prefetch its ppcb. Note this may seem 1580 * "risky" since we have no locks (other 1581 * than the previous inp) and there no 1582 * assurance that ninp was not pulled while 1583 * we were processing inp and freed. If this 1584 * occured it could mean that either: 1585 * 1586 * a) Its NULL (which is fine we won't go 1587 * here) <or> b) Its valid (which is cool we 1588 * will prefetch it) <or> c) The inp got 1589 * freed back to the slab which was 1590 * reallocated. Then the piece of memory was 1591 * re-used and something else (not an 1592 * address) is in inp_ppcb. If that occurs 1593 * we don't crash, but take a TLB shootdown 1594 * performance hit (same as if it was NULL 1595 * and we tried to pre-fetch it). 1596 * 1597 * Considering that the likelyhood of <c> is 1598 * quite rare we will take a risk on doing 1599 * this. If performance drops after testing 1600 * we can always take this out. NB: the 1601 * kern_prefetch on amd64 actually has 1602 * protection against a bad address now via 1603 * the DMAP_() tests. This will prevent the 1604 * TLB hit, and instead if <c> occurs just 1605 * cause us to load cache with a useless 1606 * address (to us). 1607 */ 1608 kern_prefetch(ninp->inp_ppcb, &prefetch_tp); 1609 prefetch_tp = 1; 1610 } 1611 INP_WUNLOCK(inp); 1612 INP_UNLOCK_ASSERT(inp); 1613 CURVNET_RESTORE(); 1614 #ifdef INVARIANTS 1615 if (mtx_owned(&hpts->p_mtx)) { 1616 panic("Hpts:%p owns mtx prior-to lock line:%d", 1617 hpts, __LINE__); 1618 } 1619 #endif 1620 mtx_lock(&hpts->p_mtx); 1621 if (logging_on) 1622 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 4); 1623 hpts->p_inp = NULL; 1624 } 1625 HPTS_MTX_ASSERT(hpts); 1626 hpts->p_inp = NULL; 1627 hpts->p_cur_slot++; 1628 if (hpts->p_cur_slot >= NUM_OF_HPTSI_SLOTS) { 1629 hpts->p_cur_slot = 0; 1630 } 1631 } 1632 no_one: 1633 HPTS_MTX_ASSERT(hpts); 1634 hpts->p_prevtick = hpts->p_curtick; 1635 hpts->p_delayed_by = 0; 1636 /* 1637 * Check to see if we took an excess amount of time and need to run 1638 * more ticks (if we did not hit eno-bufs). 1639 */ 1640 /* Re-run any input that may be there */ 1641 (void)tcp_gethptstick(&tv); 1642 if (!TAILQ_EMPTY(&hpts->p_input)) { 1643 tcp_input_data(hpts, &tv); 1644 } 1645 #ifdef INVARIANTS 1646 if (TAILQ_EMPTY(&hpts->p_input) && 1647 (hpts->p_on_inqueue_cnt != 0)) { 1648 panic("tp:%p in_hpts input empty but cnt:%d", 1649 hpts, hpts->p_on_inqueue_cnt); 1650 } 1651 #endif 1652 tick_now = tcp_gethptstick(&tv); 1653 if (SEQ_GT(tick_now, hpts->p_prevtick)) { 1654 struct timeval res; 1655 1656 /* Did we really spend a full tick or more in here? */ 1657 timersub(&tv, ctick, &res); 1658 if (res.tv_sec || (res.tv_usec >= HPTS_TICKS_PER_USEC)) { 1659 counter_u64_add(hpts_loops, 1); 1660 if (logging_on) { 1661 tcp_hpts_log_it(hpts, inp, HPTSLOG_TOLONG, (uint32_t) res.tv_usec, tick_now); 1662 } 1663 *ctick = res; 1664 hpts->p_curtick = tick_now; 1665 goto again; 1666 } 1667 } 1668 no_run: 1669 { 1670 uint32_t t = 0, i, fnd = 0; 1671 1672 if (hpts->p_on_queue_cnt) { 1673 1674 1675 /* 1676 * Find next slot that is occupied and use that to 1677 * be the sleep time. 1678 */ 1679 for (i = 1, t = hpts->p_nxt_slot; i < NUM_OF_HPTSI_SLOTS; i++) { 1680 if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) { 1681 fnd = 1; 1682 break; 1683 } 1684 t = (t + 1) % NUM_OF_HPTSI_SLOTS; 1685 } 1686 if (fnd) { 1687 hpts->p_hpts_sleep_time = i; 1688 } else { 1689 counter_u64_add(back_tosleep, 1); 1690 #ifdef INVARIANTS 1691 panic("Hpts:%p cnt:%d but non found", hpts, hpts->p_on_queue_cnt); 1692 #endif 1693 hpts->p_on_queue_cnt = 0; 1694 goto non_found; 1695 } 1696 t++; 1697 } else { 1698 /* No one on the wheel sleep for all but 2 slots */ 1699 non_found: 1700 if (hpts_sleep_max == 0) 1701 hpts_sleep_max = 1; 1702 hpts->p_hpts_sleep_time = min((NUM_OF_HPTSI_SLOTS - 2), hpts_sleep_max); 1703 t = 0; 1704 } 1705 if (logging_on) { 1706 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEPSET, t, (hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC)); 1707 } 1708 } 1709 } 1710 1711 void 1712 __tcp_set_hpts(struct inpcb *inp, int32_t line) 1713 { 1714 struct tcp_hpts_entry *hpts; 1715 1716 INP_WLOCK_ASSERT(inp); 1717 hpts = tcp_hpts_lock(inp); 1718 if ((inp->inp_in_hpts == 0) && 1719 (inp->inp_hpts_cpu_set == 0)) { 1720 inp->inp_hpts_cpu = hpts_cpuid(inp); 1721 inp->inp_hpts_cpu_set = 1; 1722 } 1723 mtx_unlock(&hpts->p_mtx); 1724 hpts = tcp_input_lock(inp); 1725 if ((inp->inp_input_cpu_set == 0) && 1726 (inp->inp_in_input == 0)) { 1727 inp->inp_input_cpu = hpts_cpuid(inp); 1728 inp->inp_input_cpu_set = 1; 1729 } 1730 mtx_unlock(&hpts->p_mtx); 1731 } 1732 1733 uint16_t 1734 tcp_hpts_delayedby(struct inpcb *inp){ 1735 return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by); 1736 } 1737 1738 static void 1739 tcp_hpts_thread(void *ctx) 1740 { 1741 struct tcp_hpts_entry *hpts; 1742 struct timeval tv; 1743 sbintime_t sb; 1744 1745 hpts = (struct tcp_hpts_entry *)ctx; 1746 mtx_lock(&hpts->p_mtx); 1747 if (hpts->p_direct_wake) { 1748 /* Signaled by input */ 1749 if (logging_on) 1750 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 1, 1); 1751 callout_stop(&hpts->co); 1752 } else { 1753 /* Timed out */ 1754 if (callout_pending(&hpts->co) || 1755 !callout_active(&hpts->co)) { 1756 if (logging_on) 1757 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 2, 2); 1758 mtx_unlock(&hpts->p_mtx); 1759 return; 1760 } 1761 callout_deactivate(&hpts->co); 1762 if (logging_on) 1763 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 3, 3); 1764 } 1765 hpts->p_hpts_active = 1; 1766 (void)tcp_gethptstick(&tv); 1767 tcp_hptsi(hpts, &tv); 1768 HPTS_MTX_ASSERT(hpts); 1769 tv.tv_sec = 0; 1770 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC; 1771 if (tcp_min_hptsi_time && (tv.tv_usec < tcp_min_hptsi_time)) { 1772 tv.tv_usec = tcp_min_hptsi_time; 1773 hpts->p_on_min_sleep = 1; 1774 } else { 1775 /* Clear the min sleep flag */ 1776 hpts->p_on_min_sleep = 0; 1777 } 1778 hpts->p_hpts_active = 0; 1779 sb = tvtosbt(tv); 1780 if (tcp_hpts_callout_skip_swi == 0) { 1781 callout_reset_sbt_on(&hpts->co, sb, 0, 1782 hpts_timeout_swi, hpts, hpts->p_cpu, 1783 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision))); 1784 } else { 1785 callout_reset_sbt_on(&hpts->co, sb, 0, 1786 hpts_timeout_dir, hpts, 1787 hpts->p_cpu, 1788 C_PREL(tcp_hpts_precision)); 1789 } 1790 hpts->p_direct_wake = 0; 1791 mtx_unlock(&hpts->p_mtx); 1792 } 1793 1794 #undef timersub 1795 1796 static void 1797 tcp_init_hptsi(void *st) 1798 { 1799 int32_t i, j, error, bound = 0, created = 0; 1800 size_t sz, asz; 1801 struct timeval tv; 1802 sbintime_t sb; 1803 struct tcp_hpts_entry *hpts; 1804 struct pcpu *pc; 1805 cpuset_t cs; 1806 char unit[16]; 1807 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 1808 int count, domain; 1809 1810 tcp_pace.rp_proc = NULL; 1811 tcp_pace.rp_num_hptss = ncpus; 1812 hpts_loops = counter_u64_alloc(M_WAITOK); 1813 back_tosleep = counter_u64_alloc(M_WAITOK); 1814 1815 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *)); 1816 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO); 1817 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS; 1818 for (i = 0; i < tcp_pace.rp_num_hptss; i++) { 1819 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry), 1820 M_TCPHPTS, M_WAITOK | M_ZERO); 1821 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, 1822 M_TCPHPTS, M_WAITOK); 1823 hpts = tcp_pace.rp_ent[i]; 1824 /* 1825 * Init all the hpts structures that are not specifically 1826 * zero'd by the allocations. Also lets attach them to the 1827 * appropriate sysctl block as well. 1828 */ 1829 mtx_init(&hpts->p_mtx, "tcp_hpts_lck", 1830 "hpts", MTX_DEF | MTX_DUPOK); 1831 TAILQ_INIT(&hpts->p_input); 1832 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) { 1833 TAILQ_INIT(&hpts->p_hptss[j]); 1834 } 1835 sysctl_ctx_init(&hpts->hpts_ctx); 1836 sprintf(unit, "%d", i); 1837 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx, 1838 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts), 1839 OID_AUTO, 1840 unit, 1841 CTLFLAG_RW, 0, 1842 ""); 1843 SYSCTL_ADD_INT(&hpts->hpts_ctx, 1844 SYSCTL_CHILDREN(hpts->hpts_root), 1845 OID_AUTO, "in_qcnt", CTLFLAG_RD, 1846 &hpts->p_on_inqueue_cnt, 0, 1847 "Count TCB's awaiting input processing"); 1848 SYSCTL_ADD_INT(&hpts->hpts_ctx, 1849 SYSCTL_CHILDREN(hpts->hpts_root), 1850 OID_AUTO, "out_qcnt", CTLFLAG_RD, 1851 &hpts->p_on_queue_cnt, 0, 1852 "Count TCB's awaiting output processing"); 1853 SYSCTL_ADD_UINT(&hpts->hpts_ctx, 1854 SYSCTL_CHILDREN(hpts->hpts_root), 1855 OID_AUTO, "active", CTLFLAG_RD, 1856 &hpts->p_hpts_active, 0, 1857 "Is the hpts active"); 1858 SYSCTL_ADD_UINT(&hpts->hpts_ctx, 1859 SYSCTL_CHILDREN(hpts->hpts_root), 1860 OID_AUTO, "curslot", CTLFLAG_RD, 1861 &hpts->p_cur_slot, 0, 1862 "What the current slot is if active"); 1863 SYSCTL_ADD_UINT(&hpts->hpts_ctx, 1864 SYSCTL_CHILDREN(hpts->hpts_root), 1865 OID_AUTO, "curtick", CTLFLAG_RD, 1866 &hpts->p_curtick, 0, 1867 "What the current tick on if active"); 1868 SYSCTL_ADD_UINT(&hpts->hpts_ctx, 1869 SYSCTL_CHILDREN(hpts->hpts_root), 1870 OID_AUTO, "logsize", CTLFLAG_RD, 1871 &hpts->p_logsize, 0, 1872 "Hpts logging buffer size"); 1873 hpts->p_hpts_sleep_time = NUM_OF_HPTSI_SLOTS - 2; 1874 hpts->p_num = i; 1875 hpts->p_prevtick = hpts->p_curtick = tcp_gethptstick(&tv); 1876 hpts->p_prevtick -= 1; 1877 hpts->p_prevtick %= NUM_OF_HPTSI_SLOTS; 1878 hpts->p_cpu = 0xffff; 1879 hpts->p_nxt_slot = 1; 1880 hpts->p_logsize = tcp_hpts_logging_size; 1881 if (hpts->p_logsize) { 1882 sz = (sizeof(struct hpts_log) * hpts->p_logsize); 1883 hpts->p_log = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO); 1884 } 1885 callout_init(&hpts->co, 1); 1886 } 1887 1888 /* Don't try to bind to NUMA domains if we don't have any */ 1889 if (vm_ndomains == 1 && tcp_bind_threads == 2) 1890 tcp_bind_threads = 0; 1891 1892 /* 1893 * Now lets start ithreads to handle the hptss. 1894 */ 1895 CPU_FOREACH(i) { 1896 hpts = tcp_pace.rp_ent[i]; 1897 hpts->p_cpu = i; 1898 error = swi_add(&hpts->ie, "hpts", 1899 tcp_hpts_thread, (void *)hpts, 1900 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie); 1901 if (error) { 1902 panic("Can't add hpts:%p i:%d err:%d", 1903 hpts, i, error); 1904 } 1905 created++; 1906 if (tcp_bind_threads == 1) { 1907 if (intr_event_bind(hpts->ie, i) == 0) 1908 bound++; 1909 } else if (tcp_bind_threads == 2) { 1910 pc = pcpu_find(i); 1911 domain = pc->pc_domain; 1912 CPU_COPY(&cpuset_domain[domain], &cs); 1913 if (intr_event_bind_ithread_cpuset(hpts->ie, &cs) 1914 == 0) { 1915 bound++; 1916 count = hpts_domains[domain].count; 1917 hpts_domains[domain].cpu[count] = i; 1918 hpts_domains[domain].count++; 1919 } 1920 } 1921 tv.tv_sec = 0; 1922 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC; 1923 sb = tvtosbt(tv); 1924 if (tcp_hpts_callout_skip_swi == 0) { 1925 callout_reset_sbt_on(&hpts->co, sb, 0, 1926 hpts_timeout_swi, hpts, hpts->p_cpu, 1927 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision))); 1928 } else { 1929 callout_reset_sbt_on(&hpts->co, sb, 0, 1930 hpts_timeout_dir, hpts, 1931 hpts->p_cpu, 1932 C_PREL(tcp_hpts_precision)); 1933 } 1934 } 1935 /* 1936 * If we somehow have an empty domain, fall back to choosing 1937 * among all htps threads. 1938 */ 1939 for (i = 0; i < vm_ndomains; i++) { 1940 if (hpts_domains[i].count == 0) { 1941 tcp_bind_threads = 0; 1942 break; 1943 } 1944 } 1945 1946 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n", 1947 created, bound, 1948 tcp_bind_threads == 2 ? "NUMA domains" : "cpus"); 1949 } 1950 1951 SYSINIT(tcphptsi, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, tcp_init_hptsi, NULL); 1952 MODULE_VERSION(tcphpts, 1); 1953