1 /*-
2 * Copyright (c) 2016-2018 Netflix, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 */
26 #include <sys/cdefs.h>
27 #include "opt_inet.h"
28 #include "opt_inet6.h"
29 #include "opt_rss.h"
30
31 /**
32 * Some notes about usage.
33 *
34 * The tcp_hpts system is designed to provide a high precision timer
35 * system for tcp. Its main purpose is to provide a mechanism for
36 * pacing packets out onto the wire. It can be used in two ways
37 * by a given TCP stack (and those two methods can be used simultaneously).
38 *
39 * First, and probably the main thing its used by Rack and BBR, it can
40 * be used to call tcp_output() of a transport stack at some time in the future.
41 * The normal way this is done is that tcp_output() of the stack schedules
42 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
43 * slot is the time from now that the stack wants to be called but it
44 * must be converted to tcp_hpts's notion of slot. This is done with
45 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
46 * call from the tcp_output() routine might look like:
47 *
48 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
49 *
50 * The above would schedule tcp_output() to be called in 550 useconds.
51 * Note that if using this mechanism the stack will want to add near
52 * its top a check to prevent unwanted calls (from user land or the
53 * arrival of incoming ack's). So it would add something like:
54 *
55 * if (tcp_in_hpts(inp))
56 * return;
57 *
58 * to prevent output processing until the time alotted has gone by.
59 * Of course this is a bare bones example and the stack will probably
60 * have more consideration then just the above.
61 *
62 * In order to run input queued segments from the HPTS context the
63 * tcp stack must define an input function for
64 * tfb_do_queued_segments(). This function understands
65 * how to dequeue a array of packets that were input and
66 * knows how to call the correct processing routine.
67 *
68 * Locking in this is important as well so most likely the
69 * stack will need to define the tfb_do_segment_nounlock()
70 * splitting tfb_do_segment() into two parts. The main processing
71 * part that does not unlock the INP and returns a value of 1 or 0.
72 * It returns 0 if all is well and the lock was not released. It
73 * returns 1 if we had to destroy the TCB (a reset received etc).
74 * The remains of tfb_do_segment() then become just a simple call
75 * to the tfb_do_segment_nounlock() function and check the return
76 * code and possibly unlock.
77 *
78 * The stack must also set the flag on the INP that it supports this
79 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
80 * this flag as well and will queue packets when it is set.
81 * There are other flags as well INP_MBUF_QUEUE_READY and
82 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
83 * that we are in the pacer for output so there is no
84 * need to wake up the hpts system to get immediate
85 * input. The second tells the LRO code that its okay
86 * if a SACK arrives you can still defer input and let
87 * the current hpts timer run (this is usually set when
88 * a rack timer is up so we know SACK's are happening
89 * on the connection already and don't want to wakeup yet).
90 *
91 * There is a common functions within the rack_bbr_common code
92 * version i.e. ctf_do_queued_segments(). This function
93 * knows how to take the input queue of packets from tp->t_inqueue
94 * and process them digging out all the arguments, calling any bpf tap and
95 * calling into tfb_do_segment_nounlock(). The common
96 * function (ctf_do_queued_segments()) requires that
97 * you have defined the tfb_do_segment_nounlock() as
98 * described above.
99 */
100
101 #include <sys/param.h>
102 #include <sys/bus.h>
103 #include <sys/interrupt.h>
104 #include <sys/module.h>
105 #include <sys/kernel.h>
106 #include <sys/hhook.h>
107 #include <sys/malloc.h>
108 #include <sys/mbuf.h>
109 #include <sys/proc.h> /* for proc0 declaration */
110 #include <sys/socket.h>
111 #include <sys/socketvar.h>
112 #include <sys/sysctl.h>
113 #include <sys/systm.h>
114 #include <sys/refcount.h>
115 #include <sys/sched.h>
116 #include <sys/queue.h>
117 #include <sys/smp.h>
118 #include <sys/counter.h>
119 #include <sys/time.h>
120 #include <sys/kthread.h>
121 #include <sys/kern_prefetch.h>
122
123 #include <vm/uma.h>
124 #include <vm/vm.h>
125
126 #include <net/route.h>
127 #include <net/vnet.h>
128
129 #ifdef RSS
130 #include <net/netisr.h>
131 #include <net/rss_config.h>
132 #endif
133
134 #define TCPSTATES /* for logging */
135
136 #include <netinet/in.h>
137 #include <netinet/in_kdtrace.h>
138 #include <netinet/in_pcb.h>
139 #include <netinet/ip.h>
140 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
141 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
142 #include <netinet/ip_var.h>
143 #include <netinet/ip6.h>
144 #include <netinet6/in6_pcb.h>
145 #include <netinet6/ip6_var.h>
146 #include <netinet/tcp.h>
147 #include <netinet/tcp_fsm.h>
148 #include <netinet/tcp_seq.h>
149 #include <netinet/tcp_timer.h>
150 #include <netinet/tcp_var.h>
151 #include <netinet/tcpip.h>
152 #include <netinet/cc/cc.h>
153 #include <netinet/tcp_hpts.h>
154 #include <netinet/tcp_log_buf.h>
155
156 #ifdef tcp_offload
157 #include <netinet/tcp_offload.h>
158 #endif
159
160 /*
161 * The hpts uses a 102400 wheel. The wheel
162 * defines the time in 10 usec increments (102400 x 10).
163 * This gives a range of 10usec - 1024ms to place
164 * an entry within. If the user requests more than
165 * 1.024 second, a remaineder is attached and the hpts
166 * when seeing the remainder will re-insert the
167 * inpcb forward in time from where it is until
168 * the remainder is zero.
169 */
170
171 #define NUM_OF_HPTSI_SLOTS 102400
172
173 /* Each hpts has its own p_mtx which is used for locking */
174 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
175 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
176 #define HPTS_TRYLOCK(hpts) mtx_trylock(&(hpts)->p_mtx)
177 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
178 struct tcp_hpts_entry {
179 /* Cache line 0x00 */
180 struct mtx p_mtx; /* Mutex for hpts */
181 struct timeval p_mysleep; /* Our min sleep time */
182 uint64_t syscall_cnt;
183 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
184 uint16_t p_hpts_active; /* Flag that says hpts is awake */
185 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
186 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
187 uint32_t p_runningslot; /* Current tick we are at if we are running */
188 uint32_t p_prev_slot; /* Previous slot we were on */
189 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
190 uint32_t p_nxt_slot; /* The next slot outside the current range of
191 * slots that the hpts is running on. */
192 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
193 uint32_t p_lasttick; /* Last tick before the current one */
194 uint8_t p_direct_wake :1, /* boolean */
195 p_on_min_sleep:1, /* boolean */
196 p_hpts_wake_scheduled:1, /* boolean */
197 hit_callout_thresh:1,
198 p_avail:4;
199 uint8_t p_fill[3]; /* Fill to 32 bits */
200 /* Cache line 0x40 */
201 struct hptsh {
202 TAILQ_HEAD(, tcpcb) head;
203 uint32_t count;
204 uint32_t gencnt;
205 } *p_hptss; /* Hptsi wheel */
206 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
207 * of 255ms */
208 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
209 uint32_t saved_lasttick; /* for logging */
210 uint32_t saved_curtick; /* for logging */
211 uint32_t saved_curslot; /* for logging */
212 uint32_t saved_prev_slot; /* for logging */
213 uint32_t p_delayed_by; /* How much were we delayed by */
214 /* Cache line 0x80 */
215 struct sysctl_ctx_list hpts_ctx;
216 struct sysctl_oid *hpts_root;
217 struct intr_event *ie;
218 void *ie_cookie;
219 uint16_t p_num; /* The hpts number one per cpu */
220 uint16_t p_cpu; /* The hpts CPU */
221 /* There is extra space in here */
222 /* Cache line 0x100 */
223 struct callout co __aligned(CACHE_LINE_SIZE);
224 } __aligned(CACHE_LINE_SIZE);
225
226 static struct tcp_hptsi {
227 struct cpu_group **grps;
228 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
229 uint32_t *cts_last_ran;
230 uint32_t grp_cnt;
231 uint32_t rp_num_hptss; /* Number of hpts threads */
232 } tcp_pace;
233
234 static MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
235 #ifdef RSS
236 static int tcp_bind_threads = 1;
237 #else
238 static int tcp_bind_threads = 2;
239 #endif
240 static int tcp_use_irq_cpu = 0;
241 static int hpts_does_tp_logging = 0;
242
243 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout);
244 static void tcp_hpts_thread(void *ctx);
245
246 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
247 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
248 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
249 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
250
251
252 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
253 "TCP Hpts controls");
254 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
255 "TCP Hpts statistics");
256
257 #define timersub(tvp, uvp, vvp) \
258 do { \
259 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
260 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
261 if ((vvp)->tv_usec < 0) { \
262 (vvp)->tv_sec--; \
263 (vvp)->tv_usec += 1000000; \
264 } \
265 } while (0)
266
267 static int32_t tcp_hpts_precision = 120;
268
269 static struct hpts_domain_info {
270 int count;
271 int cpu[MAXCPU];
272 } hpts_domains[MAXMEMDOM];
273
274 counter_u64_t hpts_hopelessly_behind;
275
276 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
277 &hpts_hopelessly_behind,
278 "Number of times hpts could not catch up and was behind hopelessly");
279
280 counter_u64_t hpts_loops;
281
282 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
283 &hpts_loops, "Number of times hpts had to loop to catch up");
284
285 counter_u64_t back_tosleep;
286
287 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
288 &back_tosleep, "Number of times hpts found no tcbs");
289
290 counter_u64_t combined_wheel_wrap;
291
292 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
293 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
294
295 counter_u64_t wheel_wrap;
296
297 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
298 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
299
300 counter_u64_t hpts_direct_call;
301 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
302 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
303
304 counter_u64_t hpts_wake_timeout;
305
306 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
307 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
308
309 counter_u64_t hpts_direct_awakening;
310
311 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
312 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
313
314 counter_u64_t hpts_back_tosleep;
315
316 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
317 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
318
319 counter_u64_t cpu_uses_flowid;
320 counter_u64_t cpu_uses_random;
321
322 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
323 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
324 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
325 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
326
327 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
328 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
329 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
330 &tcp_bind_threads, 2,
331 "Thread Binding tunable");
332 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
333 &tcp_use_irq_cpu, 0,
334 "Use of irq CPU tunable");
335 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
336 &tcp_hpts_precision, 120,
337 "Value for PRE() precision of callout");
338 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
339 &conn_cnt_thresh, 0,
340 "How many connections (below) make us use the callout based mechanism");
341 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
342 &hpts_does_tp_logging, 0,
343 "Do we add to any tp that has logging on pacer logs");
344 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
345 &dynamic_min_sleep, 250,
346 "What is the dynamic minsleep value?");
347 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
348 &dynamic_max_sleep, 5000,
349 "What is the dynamic maxsleep value?");
350
351 static int32_t max_pacer_loops = 10;
352 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
353 &max_pacer_loops, 10,
354 "What is the maximum number of times the pacer will loop trying to catch up");
355
356 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
357
358 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
359
360 static int
sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)361 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
362 {
363 int error;
364 uint32_t new;
365
366 new = hpts_sleep_max;
367 error = sysctl_handle_int(oidp, &new, 0, req);
368 if (error == 0 && req->newptr) {
369 if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) ||
370 (new > HPTS_MAX_SLEEP_ALLOWED))
371 error = EINVAL;
372 else
373 hpts_sleep_max = new;
374 }
375 return (error);
376 }
377
378 static int
sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)379 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
380 {
381 int error;
382 uint32_t new;
383
384 new = tcp_min_hptsi_time;
385 error = sysctl_handle_int(oidp, &new, 0, req);
386 if (error == 0 && req->newptr) {
387 if (new < LOWEST_SLEEP_ALLOWED)
388 error = EINVAL;
389 else
390 tcp_min_hptsi_time = new;
391 }
392 return (error);
393 }
394
395 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
396 CTLTYPE_UINT | CTLFLAG_RW,
397 &hpts_sleep_max, 0,
398 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
399 "Maximum time hpts will sleep in slots");
400
401 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
402 CTLTYPE_UINT | CTLFLAG_RW,
403 &tcp_min_hptsi_time, 0,
404 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
405 "The minimum time the hpts must sleep before processing more slots");
406
407 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
408 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
409 static int tcp_hpts_no_wake_over_thresh = 1;
410
411 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
412 &ticks_indicate_more_sleep, 0,
413 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
414 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
415 &ticks_indicate_less_sleep, 0,
416 "If we process this many or more on a timeout, we need less sleep on the next callout");
417 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
418 &tcp_hpts_no_wake_over_thresh, 0,
419 "When we are over the threshold on the pacer do we prohibit wakeups?");
420
421 static uint16_t
hpts_random_cpu(void)422 hpts_random_cpu(void)
423 {
424 uint16_t cpuid;
425 uint32_t ran;
426
427 ran = arc4random();
428 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
429 return (cpuid);
430 }
431
432 static void
tcp_hpts_log(struct tcp_hpts_entry * hpts,struct tcpcb * tp,struct timeval * tv,int slots_to_run,int idx,bool from_callout)433 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
434 int slots_to_run, int idx, bool from_callout)
435 {
436 union tcp_log_stackspecific log;
437 /*
438 * Unused logs are
439 * 64 bit - delRate, rttProp, bw_inuse
440 * 16 bit - cwnd_gain
441 * 8 bit - bbr_state, bbr_substate, inhpts;
442 */
443 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
444 log.u_bbr.flex1 = hpts->p_nxt_slot;
445 log.u_bbr.flex2 = hpts->p_cur_slot;
446 log.u_bbr.flex3 = hpts->p_prev_slot;
447 log.u_bbr.flex4 = idx;
448 log.u_bbr.flex5 = hpts->p_curtick;
449 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
450 log.u_bbr.flex7 = hpts->p_cpu;
451 log.u_bbr.flex8 = (uint8_t)from_callout;
452 log.u_bbr.inflight = slots_to_run;
453 log.u_bbr.applimited = hpts->overidden_sleep;
454 log.u_bbr.delivered = hpts->saved_curtick;
455 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
456 log.u_bbr.epoch = hpts->saved_curslot;
457 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
458 log.u_bbr.pkts_out = hpts->p_delayed_by;
459 log.u_bbr.lost = hpts->p_hpts_sleep_time;
460 log.u_bbr.pacing_gain = hpts->p_cpu;
461 log.u_bbr.pkt_epoch = hpts->p_runningslot;
462 log.u_bbr.use_lt_bw = 1;
463 TCP_LOG_EVENTP(tp, NULL,
464 &tptosocket(tp)->so_rcv,
465 &tptosocket(tp)->so_snd,
466 BBR_LOG_HPTSDIAG, 0,
467 0, &log, false, tv);
468 }
469
470 static void
tcp_wakehpts(struct tcp_hpts_entry * hpts)471 tcp_wakehpts(struct tcp_hpts_entry *hpts)
472 {
473 HPTS_MTX_ASSERT(hpts);
474
475 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
476 hpts->p_direct_wake = 0;
477 return;
478 }
479 if (hpts->p_hpts_wake_scheduled == 0) {
480 hpts->p_hpts_wake_scheduled = 1;
481 swi_sched(hpts->ie_cookie, 0);
482 }
483 }
484
485 static void
hpts_timeout_swi(void * arg)486 hpts_timeout_swi(void *arg)
487 {
488 struct tcp_hpts_entry *hpts;
489
490 hpts = (struct tcp_hpts_entry *)arg;
491 swi_sched(hpts->ie_cookie, 0);
492 }
493
494 static void
tcp_hpts_insert_internal(struct tcpcb * tp,struct tcp_hpts_entry * hpts)495 tcp_hpts_insert_internal(struct tcpcb *tp, struct tcp_hpts_entry *hpts)
496 {
497 struct inpcb *inp = tptoinpcb(tp);
498 struct hptsh *hptsh;
499
500 INP_WLOCK_ASSERT(inp);
501 HPTS_MTX_ASSERT(hpts);
502 MPASS(hpts->p_cpu == tp->t_hpts_cpu);
503 MPASS(!(inp->inp_flags & INP_DROPPED));
504
505 hptsh = &hpts->p_hptss[tp->t_hpts_slot];
506
507 if (tp->t_in_hpts == IHPTS_NONE) {
508 tp->t_in_hpts = IHPTS_ONQUEUE;
509 in_pcbref(inp);
510 } else if (tp->t_in_hpts == IHPTS_MOVING) {
511 tp->t_in_hpts = IHPTS_ONQUEUE;
512 } else
513 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
514 tp->t_hpts_gencnt = hptsh->gencnt;
515
516 TAILQ_INSERT_TAIL(&hptsh->head, tp, t_hpts);
517 hptsh->count++;
518 hpts->p_on_queue_cnt++;
519 }
520
521 static struct tcp_hpts_entry *
tcp_hpts_lock(struct tcpcb * tp)522 tcp_hpts_lock(struct tcpcb *tp)
523 {
524 struct tcp_hpts_entry *hpts;
525
526 INP_LOCK_ASSERT(tptoinpcb(tp));
527
528 hpts = tcp_pace.rp_ent[tp->t_hpts_cpu];
529 HPTS_LOCK(hpts);
530
531 return (hpts);
532 }
533
534 static void
tcp_hpts_release(struct tcpcb * tp)535 tcp_hpts_release(struct tcpcb *tp)
536 {
537 bool released __diagused;
538
539 tp->t_in_hpts = IHPTS_NONE;
540 released = in_pcbrele_wlocked(tptoinpcb(tp));
541 MPASS(released == false);
542 }
543
544 /*
545 * Initialize tcpcb to get ready for use with HPTS. We will know which CPU
546 * is preferred on the first incoming packet. Before that avoid crowding
547 * a single CPU with newborn connections and use a random one.
548 * This initialization is normally called on a newborn tcpcb, but potentially
549 * can be called once again if stack is switched. In that case we inherit CPU
550 * that the previous stack has set, be it random or not. In extreme cases,
551 * e.g. syzkaller fuzzing, a tcpcb can already be in HPTS in IHPTS_MOVING state
552 * and has never received a first packet.
553 */
554 void
tcp_hpts_init(struct tcpcb * tp)555 tcp_hpts_init(struct tcpcb *tp)
556 {
557
558 if (__predict_true(tp->t_hpts_cpu == HPTS_CPU_NONE)) {
559 tp->t_hpts_cpu = hpts_random_cpu();
560 MPASS(!(tp->t_flags2 & TF2_HPTS_CPU_SET));
561 }
562 }
563
564 /*
565 * Called normally with the INP_LOCKED but it
566 * does not matter, the hpts lock is the key
567 * but the lock order allows us to hold the
568 * INP lock and then get the hpts lock.
569 */
570 void
tcp_hpts_remove(struct tcpcb * tp)571 tcp_hpts_remove(struct tcpcb *tp)
572 {
573 struct tcp_hpts_entry *hpts;
574 struct hptsh *hptsh;
575
576 INP_WLOCK_ASSERT(tptoinpcb(tp));
577
578 hpts = tcp_hpts_lock(tp);
579 if (tp->t_in_hpts == IHPTS_ONQUEUE) {
580 hptsh = &hpts->p_hptss[tp->t_hpts_slot];
581 tp->t_hpts_request = 0;
582 if (__predict_true(tp->t_hpts_gencnt == hptsh->gencnt)) {
583 TAILQ_REMOVE(&hptsh->head, tp, t_hpts);
584 MPASS(hptsh->count > 0);
585 hptsh->count--;
586 MPASS(hpts->p_on_queue_cnt > 0);
587 hpts->p_on_queue_cnt--;
588 tcp_hpts_release(tp);
589 } else {
590 /*
591 * tcp_hptsi() now owns the TAILQ head of this inp.
592 * Can't TAILQ_REMOVE, just mark it.
593 */
594 #ifdef INVARIANTS
595 struct tcpcb *tmp;
596
597 TAILQ_FOREACH(tmp, &hptsh->head, t_hpts)
598 MPASS(tmp != tp);
599 #endif
600 tp->t_in_hpts = IHPTS_MOVING;
601 tp->t_hpts_slot = -1;
602 }
603 } else if (tp->t_in_hpts == IHPTS_MOVING) {
604 /*
605 * Handle a special race condition:
606 * tcp_hptsi() moves inpcb to detached tailq
607 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
608 * tcp_hpts_insert() sets slot to a meaningful value
609 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
610 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
611 */
612 tp->t_hpts_slot = -1;
613 }
614 HPTS_UNLOCK(hpts);
615 }
616
617 static inline int
hpts_slot(uint32_t wheel_slot,uint32_t plus)618 hpts_slot(uint32_t wheel_slot, uint32_t plus)
619 {
620 /*
621 * Given a slot on the wheel, what slot
622 * is that plus ticks out?
623 */
624 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
625 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
626 }
627
628 static inline int
tick_to_wheel(uint32_t cts_in_wticks)629 tick_to_wheel(uint32_t cts_in_wticks)
630 {
631 /*
632 * Given a timestamp in ticks (so by
633 * default to get it to a real time one
634 * would multiply by 10.. i.e the number
635 * of ticks in a slot) map it to our limited
636 * space wheel.
637 */
638 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
639 }
640
641 static inline int
hpts_slots_diff(int prev_slot,int slot_now)642 hpts_slots_diff(int prev_slot, int slot_now)
643 {
644 /*
645 * Given two slots that are someplace
646 * on our wheel. How far are they apart?
647 */
648 if (slot_now > prev_slot)
649 return (slot_now - prev_slot);
650 else if (slot_now == prev_slot)
651 /*
652 * Special case, same means we can go all of our
653 * wheel less one slot.
654 */
655 return (NUM_OF_HPTSI_SLOTS - 1);
656 else
657 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
658 }
659
660 /*
661 * Given a slot on the wheel that is the current time
662 * mapped to the wheel (wheel_slot), what is the maximum
663 * distance forward that can be obtained without
664 * wrapping past either prev_slot or running_slot
665 * depending on the htps state? Also if passed
666 * a uint32_t *, fill it with the slot location.
667 *
668 * Note if you do not give this function the current
669 * time (that you think it is) mapped to the wheel slot
670 * then the results will not be what you expect and
671 * could lead to invalid inserts.
672 */
673 static inline int32_t
max_slots_available(struct tcp_hpts_entry * hpts,uint32_t wheel_slot,uint32_t * target_slot)674 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
675 {
676 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
677
678 if ((hpts->p_hpts_active == 1) &&
679 (hpts->p_wheel_complete == 0)) {
680 end_slot = hpts->p_runningslot;
681 /* Back up one tick */
682 if (end_slot == 0)
683 end_slot = NUM_OF_HPTSI_SLOTS - 1;
684 else
685 end_slot--;
686 if (target_slot)
687 *target_slot = end_slot;
688 } else {
689 /*
690 * For the case where we are
691 * not active, or we have
692 * completed the pass over
693 * the wheel, we can use the
694 * prev tick and subtract one from it. This puts us
695 * as far out as possible on the wheel.
696 */
697 end_slot = hpts->p_prev_slot;
698 if (end_slot == 0)
699 end_slot = NUM_OF_HPTSI_SLOTS - 1;
700 else
701 end_slot--;
702 if (target_slot)
703 *target_slot = end_slot;
704 /*
705 * Now we have close to the full wheel left minus the
706 * time it has been since the pacer went to sleep. Note
707 * that wheel_tick, passed in, should be the current time
708 * from the perspective of the caller, mapped to the wheel.
709 */
710 if (hpts->p_prev_slot != wheel_slot)
711 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
712 else
713 dis_to_travel = 1;
714 /*
715 * dis_to_travel in this case is the space from when the
716 * pacer stopped (p_prev_slot) and where our wheel_slot
717 * is now. To know how many slots we can put it in we
718 * subtract from the wheel size. We would not want
719 * to place something after p_prev_slot or it will
720 * get ran too soon.
721 */
722 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
723 }
724 /*
725 * So how many slots are open between p_runningslot -> p_cur_slot
726 * that is what is currently un-available for insertion. Special
727 * case when we are at the last slot, this gets 1, so that
728 * the answer to how many slots are available is all but 1.
729 */
730 if (hpts->p_runningslot == hpts->p_cur_slot)
731 dis_to_travel = 1;
732 else
733 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
734 /*
735 * How long has the pacer been running?
736 */
737 if (hpts->p_cur_slot != wheel_slot) {
738 /* The pacer is a bit late */
739 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
740 } else {
741 /* The pacer is right on time, now == pacers start time */
742 pacer_to_now = 0;
743 }
744 /*
745 * To get the number left we can insert into we simply
746 * subtract the distance the pacer has to run from how
747 * many slots there are.
748 */
749 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
750 /*
751 * Now how many of those we will eat due to the pacer's
752 * time (p_cur_slot) of start being behind the
753 * real time (wheel_slot)?
754 */
755 if (avail_on_wheel <= pacer_to_now) {
756 /*
757 * Wheel wrap, we can't fit on the wheel, that
758 * is unusual the system must be way overloaded!
759 * Insert into the assured slot, and return special
760 * "0".
761 */
762 counter_u64_add(combined_wheel_wrap, 1);
763 if (target_slot)
764 *target_slot = hpts->p_nxt_slot;
765 return (0);
766 } else {
767 /*
768 * We know how many slots are open
769 * on the wheel (the reverse of what
770 * is left to run. Take away the time
771 * the pacer started to now (wheel_slot)
772 * and that tells you how many slots are
773 * open that can be inserted into that won't
774 * be touched by the pacer until later.
775 */
776 return (avail_on_wheel - pacer_to_now);
777 }
778 }
779
780
781 #ifdef INVARIANTS
782 static void
check_if_slot_would_be_wrong(struct tcp_hpts_entry * hpts,struct tcpcb * tp,uint32_t hptsslot,int line)783 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
784 uint32_t hptsslot, int line)
785 {
786 /*
787 * Sanity checks for the pacer with invariants
788 * on insert.
789 */
790 KASSERT(hptsslot < NUM_OF_HPTSI_SLOTS,
791 ("hpts:%p tp:%p slot:%d > max", hpts, tp, hptsslot));
792 if ((hpts->p_hpts_active) &&
793 (hpts->p_wheel_complete == 0)) {
794 /*
795 * If the pacer is processing a arc
796 * of the wheel, we need to make
797 * sure we are not inserting within
798 * that arc.
799 */
800 int distance, yet_to_run;
801
802 distance = hpts_slots_diff(hpts->p_runningslot, hptsslot);
803 if (hpts->p_runningslot != hpts->p_cur_slot)
804 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
805 else
806 yet_to_run = 0; /* processing last slot */
807 KASSERT(yet_to_run <= distance, ("hpts:%p tp:%p slot:%d "
808 "distance:%d yet_to_run:%d rs:%d cs:%d", hpts, tp,
809 hptsslot, distance, yet_to_run, hpts->p_runningslot,
810 hpts->p_cur_slot));
811 }
812 }
813 #endif
814
815 uint32_t
tcp_hpts_insert_diag(struct tcpcb * tp,uint32_t slot,int32_t line,struct hpts_diag * diag)816 tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_diag *diag)
817 {
818 struct tcp_hpts_entry *hpts;
819 struct timeval tv;
820 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
821 int32_t wheel_slot, maxslots;
822 bool need_wakeup = false;
823
824 INP_WLOCK_ASSERT(tptoinpcb(tp));
825 MPASS(!(tptoinpcb(tp)->inp_flags & INP_DROPPED));
826 MPASS(!(tp->t_in_hpts == IHPTS_ONQUEUE));
827
828 /*
829 * We now return the next-slot the hpts will be on, beyond its
830 * current run (if up) or where it was when it stopped if it is
831 * sleeping.
832 */
833 hpts = tcp_hpts_lock(tp);
834 microuptime(&tv);
835 if (diag) {
836 memset(diag, 0, sizeof(struct hpts_diag));
837 diag->p_hpts_active = hpts->p_hpts_active;
838 diag->p_prev_slot = hpts->p_prev_slot;
839 diag->p_runningslot = hpts->p_runningslot;
840 diag->p_nxt_slot = hpts->p_nxt_slot;
841 diag->p_cur_slot = hpts->p_cur_slot;
842 diag->p_curtick = hpts->p_curtick;
843 diag->p_lasttick = hpts->p_lasttick;
844 diag->slot_req = slot;
845 diag->p_on_min_sleep = hpts->p_on_min_sleep;
846 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
847 }
848 if (slot == 0) {
849 /* Ok we need to set it on the hpts in the current slot */
850 tp->t_hpts_request = 0;
851 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
852 /*
853 * A sleeping hpts we want in next slot to run
854 * note that in this state p_prev_slot == p_cur_slot
855 */
856 tp->t_hpts_slot = hpts_slot(hpts->p_prev_slot, 1);
857 if ((hpts->p_on_min_sleep == 0) &&
858 (hpts->p_hpts_active == 0))
859 need_wakeup = true;
860 } else
861 tp->t_hpts_slot = hpts->p_runningslot;
862 if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
863 tcp_hpts_insert_internal(tp, hpts);
864 if (need_wakeup) {
865 /*
866 * Activate the hpts if it is sleeping and its
867 * timeout is not 1.
868 */
869 hpts->p_direct_wake = 1;
870 tcp_wakehpts(hpts);
871 }
872 slot_on = hpts->p_nxt_slot;
873 HPTS_UNLOCK(hpts);
874
875 return (slot_on);
876 }
877 /* Get the current time relative to the wheel */
878 wheel_cts = tcp_tv_to_hptstick(&tv);
879 /* Map it onto the wheel */
880 wheel_slot = tick_to_wheel(wheel_cts);
881 /* Now what's the max we can place it at? */
882 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
883 if (diag) {
884 diag->wheel_slot = wheel_slot;
885 diag->maxslots = maxslots;
886 diag->wheel_cts = wheel_cts;
887 }
888 if (maxslots == 0) {
889 /* The pacer is in a wheel wrap behind, yikes! */
890 if (slot > 1) {
891 /*
892 * Reduce by 1 to prevent a forever loop in
893 * case something else is wrong. Note this
894 * probably does not hurt because the pacer
895 * if its true is so far behind we will be
896 * > 1second late calling anyway.
897 */
898 slot--;
899 }
900 tp->t_hpts_slot = last_slot;
901 tp->t_hpts_request = slot;
902 } else if (maxslots >= slot) {
903 /* It all fits on the wheel */
904 tp->t_hpts_request = 0;
905 tp->t_hpts_slot = hpts_slot(wheel_slot, slot);
906 } else {
907 /* It does not fit */
908 tp->t_hpts_request = slot - maxslots;
909 tp->t_hpts_slot = last_slot;
910 }
911 if (diag) {
912 diag->slot_remaining = tp->t_hpts_request;
913 diag->inp_hptsslot = tp->t_hpts_slot;
914 }
915 #ifdef INVARIANTS
916 check_if_slot_would_be_wrong(hpts, tp, tp->t_hpts_slot, line);
917 #endif
918 if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
919 tcp_hpts_insert_internal(tp, hpts);
920 if ((hpts->p_hpts_active == 0) &&
921 (tp->t_hpts_request == 0) &&
922 (hpts->p_on_min_sleep == 0)) {
923 /*
924 * The hpts is sleeping and NOT on a minimum
925 * sleep time, we need to figure out where
926 * it will wake up at and if we need to reschedule
927 * its time-out.
928 */
929 uint32_t have_slept, yet_to_sleep;
930
931 /* Now do we need to restart the hpts's timer? */
932 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
933 if (have_slept < hpts->p_hpts_sleep_time)
934 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
935 else {
936 /* We are over-due */
937 yet_to_sleep = 0;
938 need_wakeup = 1;
939 }
940 if (diag) {
941 diag->have_slept = have_slept;
942 diag->yet_to_sleep = yet_to_sleep;
943 }
944 if (yet_to_sleep &&
945 (yet_to_sleep > slot)) {
946 /*
947 * We need to reschedule the hpts's time-out.
948 */
949 hpts->p_hpts_sleep_time = slot;
950 need_new_to = slot * HPTS_TICKS_PER_SLOT;
951 }
952 }
953 /*
954 * Now how far is the hpts sleeping to? if active is 1, its
955 * up and ticking we do nothing, otherwise we may need to
956 * reschedule its callout if need_new_to is set from above.
957 */
958 if (need_wakeup) {
959 hpts->p_direct_wake = 1;
960 tcp_wakehpts(hpts);
961 if (diag) {
962 diag->need_new_to = 0;
963 diag->co_ret = 0xffff0000;
964 }
965 } else if (need_new_to) {
966 int32_t co_ret;
967 struct timeval tv;
968 sbintime_t sb;
969
970 tv.tv_sec = 0;
971 tv.tv_usec = 0;
972 while (need_new_to > HPTS_USEC_IN_SEC) {
973 tv.tv_sec++;
974 need_new_to -= HPTS_USEC_IN_SEC;
975 }
976 tv.tv_usec = need_new_to;
977 sb = tvtosbt(tv);
978 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
979 hpts_timeout_swi, hpts, hpts->p_cpu,
980 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
981 if (diag) {
982 diag->need_new_to = need_new_to;
983 diag->co_ret = co_ret;
984 }
985 }
986 slot_on = hpts->p_nxt_slot;
987 HPTS_UNLOCK(hpts);
988
989 return (slot_on);
990 }
991
992 static uint16_t
hpts_cpuid(struct tcpcb * tp,int * failed)993 hpts_cpuid(struct tcpcb *tp, int *failed)
994 {
995 struct inpcb *inp = tptoinpcb(tp);
996 u_int cpuid;
997 #ifdef NUMA
998 struct hpts_domain_info *di;
999 #endif
1000
1001 *failed = 0;
1002 if (tp->t_flags2 & TF2_HPTS_CPU_SET) {
1003 return (tp->t_hpts_cpu);
1004 }
1005 /*
1006 * If we are using the irq cpu set by LRO or
1007 * the driver then it overrides all other domains.
1008 */
1009 if (tcp_use_irq_cpu) {
1010 if (tp->t_lro_cpu == HPTS_CPU_NONE) {
1011 *failed = 1;
1012 return (0);
1013 }
1014 return (tp->t_lro_cpu);
1015 }
1016 /* If one is set the other must be the same */
1017 #ifdef RSS
1018 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1019 if (cpuid == NETISR_CPUID_NONE)
1020 return (hpts_random_cpu());
1021 else
1022 return (cpuid);
1023 #endif
1024 /*
1025 * We don't have a flowid -> cpuid mapping, so cheat and just map
1026 * unknown cpuids to curcpu. Not the best, but apparently better
1027 * than defaulting to swi 0.
1028 */
1029 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1030 counter_u64_add(cpu_uses_random, 1);
1031 return (hpts_random_cpu());
1032 }
1033 /*
1034 * Hash to a thread based on the flowid. If we are using numa,
1035 * then restrict the hash to the numa domain where the inp lives.
1036 */
1037
1038 #ifdef NUMA
1039 if ((vm_ndomains == 1) ||
1040 (inp->inp_numa_domain == M_NODOM)) {
1041 #endif
1042 cpuid = inp->inp_flowid % mp_ncpus;
1043 #ifdef NUMA
1044 } else {
1045 /* Hash into the cpu's that use that domain */
1046 di = &hpts_domains[inp->inp_numa_domain];
1047 cpuid = di->cpu[inp->inp_flowid % di->count];
1048 }
1049 #endif
1050 counter_u64_add(cpu_uses_flowid, 1);
1051 return (cpuid);
1052 }
1053
1054 static void
tcp_hpts_set_max_sleep(struct tcp_hpts_entry * hpts,int wrap_loop_cnt)1055 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1056 {
1057 uint32_t t = 0, i;
1058
1059 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1060 /*
1061 * Find next slot that is occupied and use that to
1062 * be the sleep time.
1063 */
1064 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1065 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1066 break;
1067 }
1068 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1069 }
1070 KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1071 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1072 } else {
1073 /* No one on the wheel sleep for all but 400 slots or sleep max */
1074 hpts->p_hpts_sleep_time = hpts_sleep_max;
1075 }
1076 }
1077
1078 static int32_t
tcp_hptsi(struct tcp_hpts_entry * hpts,bool from_callout)1079 tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout)
1080 {
1081 struct tcpcb *tp;
1082 struct timeval tv;
1083 int32_t slots_to_run, i, error;
1084 int32_t loop_cnt = 0;
1085 int32_t did_prefetch = 0;
1086 int32_t prefetch_tp = 0;
1087 int32_t wrap_loop_cnt = 0;
1088 int32_t slot_pos_of_endpoint = 0;
1089 int32_t orig_exit_slot;
1090 bool completed_measure, seen_endpoint;
1091
1092 completed_measure = false;
1093 seen_endpoint = false;
1094
1095 HPTS_MTX_ASSERT(hpts);
1096 NET_EPOCH_ASSERT();
1097 /* record previous info for any logging */
1098 hpts->saved_lasttick = hpts->p_lasttick;
1099 hpts->saved_curtick = hpts->p_curtick;
1100 hpts->saved_curslot = hpts->p_cur_slot;
1101 hpts->saved_prev_slot = hpts->p_prev_slot;
1102
1103 hpts->p_lasttick = hpts->p_curtick;
1104 hpts->p_curtick = tcp_gethptstick(&tv);
1105 tcp_pace.cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1106 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1107 if ((hpts->p_on_queue_cnt == 0) ||
1108 (hpts->p_lasttick == hpts->p_curtick)) {
1109 /*
1110 * No time has yet passed,
1111 * or nothing to do.
1112 */
1113 hpts->p_prev_slot = hpts->p_cur_slot;
1114 hpts->p_lasttick = hpts->p_curtick;
1115 goto no_run;
1116 }
1117 again:
1118 hpts->p_wheel_complete = 0;
1119 HPTS_MTX_ASSERT(hpts);
1120 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1121 if (((hpts->p_curtick - hpts->p_lasttick) >
1122 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1123 (hpts->p_on_queue_cnt != 0)) {
1124 /*
1125 * Wheel wrap is occuring, basically we
1126 * are behind and the distance between
1127 * run's has spread so much it has exceeded
1128 * the time on the wheel (1.024 seconds). This
1129 * is ugly and should NOT be happening. We
1130 * need to run the entire wheel. We last processed
1131 * p_prev_slot, so that needs to be the last slot
1132 * we run. The next slot after that should be our
1133 * reserved first slot for new, and then starts
1134 * the running position. Now the problem is the
1135 * reserved "not to yet" place does not exist
1136 * and there may be inp's in there that need
1137 * running. We can merge those into the
1138 * first slot at the head.
1139 */
1140 wrap_loop_cnt++;
1141 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1142 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1143 /*
1144 * Adjust p_cur_slot to be where we are starting from
1145 * hopefully we will catch up (fat chance if something
1146 * is broken this bad :( )
1147 */
1148 hpts->p_cur_slot = hpts->p_prev_slot;
1149 /*
1150 * The next slot has guys to run too, and that would
1151 * be where we would normally start, lets move them into
1152 * the next slot (p_prev_slot + 2) so that we will
1153 * run them, the extra 10usecs of late (by being
1154 * put behind) does not really matter in this situation.
1155 */
1156 TAILQ_FOREACH(tp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1157 t_hpts) {
1158 MPASS(tp->t_hpts_slot == hpts->p_nxt_slot);
1159 MPASS(tp->t_hpts_gencnt ==
1160 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1161 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
1162
1163 /*
1164 * Update gencnt and nextslot accordingly to match
1165 * the new location. This is safe since it takes both
1166 * the INP lock and the pacer mutex to change the
1167 * t_hptsslot and t_hpts_gencnt.
1168 */
1169 tp->t_hpts_gencnt =
1170 hpts->p_hptss[hpts->p_runningslot].gencnt;
1171 tp->t_hpts_slot = hpts->p_runningslot;
1172 }
1173 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1174 &hpts->p_hptss[hpts->p_nxt_slot].head, t_hpts);
1175 hpts->p_hptss[hpts->p_runningslot].count +=
1176 hpts->p_hptss[hpts->p_nxt_slot].count;
1177 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1178 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1179 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1180 counter_u64_add(wheel_wrap, 1);
1181 } else {
1182 /*
1183 * Nxt slot is always one after p_runningslot though
1184 * its not used usually unless we are doing wheel wrap.
1185 */
1186 hpts->p_nxt_slot = hpts->p_prev_slot;
1187 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1188 }
1189 if (hpts->p_on_queue_cnt == 0) {
1190 goto no_one;
1191 }
1192 for (i = 0; i < slots_to_run; i++) {
1193 struct tcpcb *tp, *ntp;
1194 TAILQ_HEAD(, tcpcb) head = TAILQ_HEAD_INITIALIZER(head);
1195 struct hptsh *hptsh;
1196 uint32_t runningslot;
1197
1198 /*
1199 * Calculate our delay, if there are no extra ticks there
1200 * was not any (i.e. if slots_to_run == 1, no delay).
1201 */
1202 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1203 HPTS_TICKS_PER_SLOT;
1204
1205 runningslot = hpts->p_runningslot;
1206 hptsh = &hpts->p_hptss[runningslot];
1207 TAILQ_SWAP(&head, &hptsh->head, tcpcb, t_hpts);
1208 hpts->p_on_queue_cnt -= hptsh->count;
1209 hptsh->count = 0;
1210 hptsh->gencnt++;
1211
1212 HPTS_UNLOCK(hpts);
1213
1214 TAILQ_FOREACH_SAFE(tp, &head, t_hpts, ntp) {
1215 struct inpcb *inp = tptoinpcb(tp);
1216 bool set_cpu;
1217
1218 if (ntp != NULL) {
1219 /*
1220 * If we have a next tcpcb, see if we can
1221 * prefetch it. Note this may seem
1222 * "risky" since we have no locks (other
1223 * than the previous inp) and there no
1224 * assurance that ntp was not pulled while
1225 * we were processing tp and freed. If this
1226 * occurred it could mean that either:
1227 *
1228 * a) Its NULL (which is fine we won't go
1229 * here) <or> b) Its valid (which is cool we
1230 * will prefetch it) <or> c) The inp got
1231 * freed back to the slab which was
1232 * reallocated. Then the piece of memory was
1233 * re-used and something else (not an
1234 * address) is in inp_ppcb. If that occurs
1235 * we don't crash, but take a TLB shootdown
1236 * performance hit (same as if it was NULL
1237 * and we tried to pre-fetch it).
1238 *
1239 * Considering that the likelyhood of <c> is
1240 * quite rare we will take a risk on doing
1241 * this. If performance drops after testing
1242 * we can always take this out. NB: the
1243 * kern_prefetch on amd64 actually has
1244 * protection against a bad address now via
1245 * the DMAP_() tests. This will prevent the
1246 * TLB hit, and instead if <c> occurs just
1247 * cause us to load cache with a useless
1248 * address (to us).
1249 *
1250 * XXXGL: this comment and the prefetch action
1251 * could be outdated after tp == inp change.
1252 */
1253 kern_prefetch(ntp, &prefetch_tp);
1254 prefetch_tp = 1;
1255 }
1256
1257 /* For debugging */
1258 if (!seen_endpoint) {
1259 seen_endpoint = true;
1260 orig_exit_slot = slot_pos_of_endpoint =
1261 runningslot;
1262 } else if (!completed_measure) {
1263 /* Record the new position */
1264 orig_exit_slot = runningslot;
1265 }
1266
1267 INP_WLOCK(inp);
1268 if ((tp->t_flags2 & TF2_HPTS_CPU_SET) == 0) {
1269 set_cpu = true;
1270 } else {
1271 set_cpu = false;
1272 }
1273
1274 if (__predict_false(tp->t_in_hpts == IHPTS_MOVING)) {
1275 if (tp->t_hpts_slot == -1) {
1276 tp->t_in_hpts = IHPTS_NONE;
1277 if (in_pcbrele_wlocked(inp) == false)
1278 INP_WUNLOCK(inp);
1279 } else {
1280 HPTS_LOCK(hpts);
1281 tcp_hpts_insert_internal(tp, hpts);
1282 HPTS_UNLOCK(hpts);
1283 INP_WUNLOCK(inp);
1284 }
1285 continue;
1286 }
1287
1288 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
1289 MPASS(!(inp->inp_flags & INP_DROPPED));
1290 KASSERT(runningslot == tp->t_hpts_slot,
1291 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1292 hpts, inp, runningslot, tp->t_hpts_slot));
1293
1294 if (tp->t_hpts_request) {
1295 /*
1296 * This guy is deferred out further in time
1297 * then our wheel had available on it.
1298 * Push him back on the wheel or run it
1299 * depending.
1300 */
1301 uint32_t maxslots, last_slot, remaining_slots;
1302
1303 remaining_slots = slots_to_run - (i + 1);
1304 if (tp->t_hpts_request > remaining_slots) {
1305 HPTS_LOCK(hpts);
1306 /*
1307 * How far out can we go?
1308 */
1309 maxslots = max_slots_available(hpts,
1310 hpts->p_cur_slot, &last_slot);
1311 if (maxslots >= tp->t_hpts_request) {
1312 /* We can place it finally to
1313 * be processed. */
1314 tp->t_hpts_slot = hpts_slot(
1315 hpts->p_runningslot,
1316 tp->t_hpts_request);
1317 tp->t_hpts_request = 0;
1318 } else {
1319 /* Work off some more time */
1320 tp->t_hpts_slot = last_slot;
1321 tp->t_hpts_request -=
1322 maxslots;
1323 }
1324 tcp_hpts_insert_internal(tp, hpts);
1325 HPTS_UNLOCK(hpts);
1326 INP_WUNLOCK(inp);
1327 continue;
1328 }
1329 tp->t_hpts_request = 0;
1330 /* Fall through we will so do it now */
1331 }
1332
1333 tcp_hpts_release(tp);
1334 if (set_cpu) {
1335 /*
1336 * Setup so the next time we will move to
1337 * the right CPU. This should be a rare
1338 * event. It will sometimes happens when we
1339 * are the client side (usually not the
1340 * server). Somehow tcp_output() gets called
1341 * before the tcp_do_segment() sets the
1342 * intial state. This means the r_cpu and
1343 * r_hpts_cpu is 0. We get on the hpts, and
1344 * then tcp_input() gets called setting up
1345 * the r_cpu to the correct value. The hpts
1346 * goes off and sees the mis-match. We
1347 * simply correct it here and the CPU will
1348 * switch to the new hpts nextime the tcb
1349 * gets added to the hpts (not this one)
1350 * :-)
1351 */
1352 tcp_set_hpts(tp);
1353 }
1354 CURVNET_SET(inp->inp_vnet);
1355 /* Lets do any logging that we might want to */
1356 if (hpts_does_tp_logging && tcp_bblogging_on(tp)) {
1357 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i,
1358 from_callout);
1359 }
1360
1361 if (tp->t_fb_ptr != NULL) {
1362 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1363 did_prefetch = 1;
1364 }
1365 /*
1366 * We set TF2_HPTS_CALLS before any possible output.
1367 * The contract with the transport is that if it cares
1368 * about hpts calling it should clear the flag. That
1369 * way next time it is called it will know it is hpts.
1370 *
1371 * We also only call tfb_do_queued_segments() <or>
1372 * tcp_output(). It is expected that if segments are
1373 * queued and come in that the final input mbuf will
1374 * cause a call to output if it is needed so we do
1375 * not need a second call to tcp_output(). So we do
1376 * one or the other but not both.
1377 *
1378 * XXXGL: some KPI abuse here. tfb_do_queued_segments
1379 * returns unlocked with positive error (always 1) and
1380 * tcp_output returns unlocked with negative error.
1381 */
1382 tp->t_flags2 |= TF2_HPTS_CALLS;
1383 if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) &&
1384 !STAILQ_EMPTY(&tp->t_inqueue))
1385 error = -(*tp->t_fb->tfb_do_queued_segments)(tp,
1386 0);
1387 else
1388 error = tcp_output(tp);
1389 if (__predict_true(error >= 0))
1390 INP_WUNLOCK(inp);
1391 CURVNET_RESTORE();
1392 }
1393 if (seen_endpoint) {
1394 /*
1395 * We now have a accurate distance between
1396 * slot_pos_of_endpoint <-> orig_exit_slot
1397 * to tell us how late we were, orig_exit_slot
1398 * is where we calculated the end of our cycle to
1399 * be when we first entered.
1400 */
1401 completed_measure = true;
1402 }
1403 HPTS_LOCK(hpts);
1404 hpts->p_runningslot++;
1405 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1406 hpts->p_runningslot = 0;
1407 }
1408 }
1409 no_one:
1410 HPTS_MTX_ASSERT(hpts);
1411 hpts->p_delayed_by = 0;
1412 /*
1413 * Check to see if we took an excess amount of time and need to run
1414 * more ticks (if we did not hit eno-bufs).
1415 */
1416 hpts->p_prev_slot = hpts->p_cur_slot;
1417 hpts->p_lasttick = hpts->p_curtick;
1418 if (!from_callout || (loop_cnt > max_pacer_loops)) {
1419 /*
1420 * Something is serious slow we have
1421 * looped through processing the wheel
1422 * and by the time we cleared the
1423 * needs to run max_pacer_loops time
1424 * we still needed to run. That means
1425 * the system is hopelessly behind and
1426 * can never catch up :(
1427 *
1428 * We will just lie to this thread
1429 * and let it thing p_curtick is
1430 * correct. When it next awakens
1431 * it will find itself further behind.
1432 */
1433 if (from_callout)
1434 counter_u64_add(hpts_hopelessly_behind, 1);
1435 goto no_run;
1436 }
1437 hpts->p_curtick = tcp_gethptstick(&tv);
1438 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1439 if (!seen_endpoint) {
1440 /* We saw no endpoint but we may be looping */
1441 orig_exit_slot = hpts->p_cur_slot;
1442 }
1443 if ((wrap_loop_cnt < 2) &&
1444 (hpts->p_lasttick != hpts->p_curtick)) {
1445 counter_u64_add(hpts_loops, 1);
1446 loop_cnt++;
1447 goto again;
1448 }
1449 no_run:
1450 tcp_pace.cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1451 /*
1452 * Set flag to tell that we are done for
1453 * any slot input that happens during
1454 * input.
1455 */
1456 hpts->p_wheel_complete = 1;
1457 /*
1458 * Now did we spend too long running input and need to run more ticks?
1459 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1460 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1461 * is greater than 2, then the condtion most likely are *not* true.
1462 * Also if we are called not from the callout, we don't run the wheel
1463 * multiple times so the slots may not align either.
1464 */
1465 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1466 (wrap_loop_cnt >= 2) || !from_callout),
1467 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1468 hpts->p_prev_slot, hpts->p_cur_slot));
1469 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1470 || (wrap_loop_cnt >= 2) || !from_callout),
1471 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1472 hpts->p_lasttick, hpts->p_curtick));
1473 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1474 hpts->p_curtick = tcp_gethptstick(&tv);
1475 counter_u64_add(hpts_loops, 1);
1476 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1477 goto again;
1478 }
1479
1480 if (from_callout) {
1481 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1482 }
1483 if (seen_endpoint)
1484 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1485 else
1486 return (0);
1487 }
1488
1489 void
__tcp_set_hpts(struct tcpcb * tp,int32_t line)1490 __tcp_set_hpts(struct tcpcb *tp, int32_t line)
1491 {
1492 struct tcp_hpts_entry *hpts;
1493 int failed;
1494
1495 INP_WLOCK_ASSERT(tptoinpcb(tp));
1496
1497 hpts = tcp_hpts_lock(tp);
1498 if (tp->t_in_hpts == IHPTS_NONE && !(tp->t_flags2 & TF2_HPTS_CPU_SET)) {
1499 tp->t_hpts_cpu = hpts_cpuid(tp, &failed);
1500 if (failed == 0)
1501 tp->t_flags2 |= TF2_HPTS_CPU_SET;
1502 }
1503 HPTS_UNLOCK(hpts);
1504 }
1505
1506 static struct tcp_hpts_entry *
tcp_choose_hpts_to_run(void)1507 tcp_choose_hpts_to_run(void)
1508 {
1509 int i, oldest_idx, start, end;
1510 uint32_t cts, time_since_ran, calc;
1511
1512 cts = tcp_get_usecs(NULL);
1513 time_since_ran = 0;
1514 /* Default is all one group */
1515 start = 0;
1516 end = tcp_pace.rp_num_hptss;
1517 /*
1518 * If we have more than one L3 group figure out which one
1519 * this CPU is in.
1520 */
1521 if (tcp_pace.grp_cnt > 1) {
1522 for (i = 0; i < tcp_pace.grp_cnt; i++) {
1523 if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
1524 start = tcp_pace.grps[i]->cg_first;
1525 end = (tcp_pace.grps[i]->cg_last + 1);
1526 break;
1527 }
1528 }
1529 }
1530 oldest_idx = -1;
1531 for (i = start; i < end; i++) {
1532 if (TSTMP_GT(cts, tcp_pace.cts_last_ran[i]))
1533 calc = cts - tcp_pace.cts_last_ran[i];
1534 else
1535 calc = 0;
1536 if (calc > time_since_ran) {
1537 oldest_idx = i;
1538 time_since_ran = calc;
1539 }
1540 }
1541 if (oldest_idx >= 0)
1542 return(tcp_pace.rp_ent[oldest_idx]);
1543 else
1544 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1545 }
1546
1547 static void
__tcp_run_hpts(void)1548 __tcp_run_hpts(void)
1549 {
1550 struct epoch_tracker et;
1551 struct tcp_hpts_entry *hpts;
1552 int ticks_ran;
1553
1554 hpts = tcp_choose_hpts_to_run();
1555
1556 if (hpts->p_hpts_active) {
1557 /* Already active */
1558 return;
1559 }
1560 if (!HPTS_TRYLOCK(hpts)) {
1561 /* Someone else got the lock */
1562 return;
1563 }
1564 NET_EPOCH_ENTER(et);
1565 if (hpts->p_hpts_active)
1566 goto out_with_mtx;
1567 hpts->syscall_cnt++;
1568 counter_u64_add(hpts_direct_call, 1);
1569 hpts->p_hpts_active = 1;
1570 ticks_ran = tcp_hptsi(hpts, false);
1571 /* We may want to adjust the sleep values here */
1572 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1573 if (ticks_ran > ticks_indicate_less_sleep) {
1574 struct timeval tv;
1575 sbintime_t sb;
1576
1577 hpts->p_mysleep.tv_usec /= 2;
1578 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1579 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1580 /* Reschedule with new to value */
1581 tcp_hpts_set_max_sleep(hpts, 0);
1582 tv.tv_sec = 0;
1583 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1584 /* Validate its in the right ranges */
1585 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1586 hpts->overidden_sleep = tv.tv_usec;
1587 tv.tv_usec = hpts->p_mysleep.tv_usec;
1588 } else if (tv.tv_usec > dynamic_max_sleep) {
1589 /* Lets not let sleep get above this value */
1590 hpts->overidden_sleep = tv.tv_usec;
1591 tv.tv_usec = dynamic_max_sleep;
1592 }
1593 /*
1594 * In this mode the timer is a backstop to
1595 * all the userret/lro_flushes so we use
1596 * the dynamic value and set the on_min_sleep
1597 * flag so we will not be awoken.
1598 */
1599 sb = tvtosbt(tv);
1600 /* Store off to make visible the actual sleep time */
1601 hpts->sleeping = tv.tv_usec;
1602 callout_reset_sbt_on(&hpts->co, sb, 0,
1603 hpts_timeout_swi, hpts, hpts->p_cpu,
1604 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1605 } else if (ticks_ran < ticks_indicate_more_sleep) {
1606 /* For the further sleep, don't reschedule hpts */
1607 hpts->p_mysleep.tv_usec *= 2;
1608 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1609 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1610 }
1611 hpts->p_on_min_sleep = 1;
1612 }
1613 hpts->p_hpts_active = 0;
1614 out_with_mtx:
1615 HPTS_UNLOCK(hpts);
1616 NET_EPOCH_EXIT(et);
1617 }
1618
1619 static void
tcp_hpts_thread(void * ctx)1620 tcp_hpts_thread(void *ctx)
1621 {
1622 struct tcp_hpts_entry *hpts;
1623 struct epoch_tracker et;
1624 struct timeval tv;
1625 sbintime_t sb;
1626 int ticks_ran;
1627
1628 hpts = (struct tcp_hpts_entry *)ctx;
1629 HPTS_LOCK(hpts);
1630 if (hpts->p_direct_wake) {
1631 /* Signaled by input or output with low occupancy count. */
1632 callout_stop(&hpts->co);
1633 counter_u64_add(hpts_direct_awakening, 1);
1634 } else {
1635 /* Timed out, the normal case. */
1636 counter_u64_add(hpts_wake_timeout, 1);
1637 if (callout_pending(&hpts->co) ||
1638 !callout_active(&hpts->co)) {
1639 HPTS_UNLOCK(hpts);
1640 return;
1641 }
1642 }
1643 callout_deactivate(&hpts->co);
1644 hpts->p_hpts_wake_scheduled = 0;
1645 NET_EPOCH_ENTER(et);
1646 if (hpts->p_hpts_active) {
1647 /*
1648 * We are active already. This means that a syscall
1649 * trap or LRO is running in behalf of hpts. In that case
1650 * we need to double our timeout since there seems to be
1651 * enough activity in the system that we don't need to
1652 * run as often (if we were not directly woken).
1653 */
1654 tv.tv_sec = 0;
1655 if (hpts->p_direct_wake == 0) {
1656 counter_u64_add(hpts_back_tosleep, 1);
1657 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1658 hpts->p_mysleep.tv_usec *= 2;
1659 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1660 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1661 tv.tv_usec = hpts->p_mysleep.tv_usec;
1662 hpts->p_on_min_sleep = 1;
1663 } else {
1664 /*
1665 * Here we have low count on the wheel, but
1666 * somehow we still collided with one of the
1667 * connections. Lets go back to sleep for a
1668 * min sleep time, but clear the flag so we
1669 * can be awoken by insert.
1670 */
1671 hpts->p_on_min_sleep = 0;
1672 tv.tv_usec = tcp_min_hptsi_time;
1673 }
1674 } else {
1675 /*
1676 * Directly woken most likely to reset the
1677 * callout time.
1678 */
1679 tv.tv_usec = hpts->p_mysleep.tv_usec;
1680 }
1681 goto back_to_sleep;
1682 }
1683 hpts->sleeping = 0;
1684 hpts->p_hpts_active = 1;
1685 ticks_ran = tcp_hptsi(hpts, true);
1686 tv.tv_sec = 0;
1687 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1688 if ((hpts->p_on_queue_cnt > conn_cnt_thresh) && (hpts->hit_callout_thresh == 0)) {
1689 hpts->hit_callout_thresh = 1;
1690 atomic_add_int(&hpts_that_need_softclock, 1);
1691 } else if ((hpts->p_on_queue_cnt <= conn_cnt_thresh) && (hpts->hit_callout_thresh == 1)) {
1692 hpts->hit_callout_thresh = 0;
1693 atomic_subtract_int(&hpts_that_need_softclock, 1);
1694 }
1695 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1696 if(hpts->p_direct_wake == 0) {
1697 /*
1698 * Only adjust sleep time if we were
1699 * called from the callout i.e. direct_wake == 0.
1700 */
1701 if (ticks_ran < ticks_indicate_more_sleep) {
1702 hpts->p_mysleep.tv_usec *= 2;
1703 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1704 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1705 } else if (ticks_ran > ticks_indicate_less_sleep) {
1706 hpts->p_mysleep.tv_usec /= 2;
1707 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1708 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1709 }
1710 }
1711 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1712 hpts->overidden_sleep = tv.tv_usec;
1713 tv.tv_usec = hpts->p_mysleep.tv_usec;
1714 } else if (tv.tv_usec > dynamic_max_sleep) {
1715 /* Lets not let sleep get above this value */
1716 hpts->overidden_sleep = tv.tv_usec;
1717 tv.tv_usec = dynamic_max_sleep;
1718 }
1719 /*
1720 * In this mode the timer is a backstop to
1721 * all the userret/lro_flushes so we use
1722 * the dynamic value and set the on_min_sleep
1723 * flag so we will not be awoken.
1724 */
1725 hpts->p_on_min_sleep = 1;
1726 } else if (hpts->p_on_queue_cnt == 0) {
1727 /*
1728 * No one on the wheel, please wake us up
1729 * if you insert on the wheel.
1730 */
1731 hpts->p_on_min_sleep = 0;
1732 hpts->overidden_sleep = 0;
1733 } else {
1734 /*
1735 * We hit here when we have a low number of
1736 * clients on the wheel (our else clause).
1737 * We may need to go on min sleep, if we set
1738 * the flag we will not be awoken if someone
1739 * is inserted ahead of us. Clearing the flag
1740 * means we can be awoken. This is "old mode"
1741 * where the timer is what runs hpts mainly.
1742 */
1743 if (tv.tv_usec < tcp_min_hptsi_time) {
1744 /*
1745 * Yes on min sleep, which means
1746 * we cannot be awoken.
1747 */
1748 hpts->overidden_sleep = tv.tv_usec;
1749 tv.tv_usec = tcp_min_hptsi_time;
1750 hpts->p_on_min_sleep = 1;
1751 } else {
1752 /* Clear the min sleep flag */
1753 hpts->overidden_sleep = 0;
1754 hpts->p_on_min_sleep = 0;
1755 }
1756 }
1757 HPTS_MTX_ASSERT(hpts);
1758 hpts->p_hpts_active = 0;
1759 back_to_sleep:
1760 hpts->p_direct_wake = 0;
1761 sb = tvtosbt(tv);
1762 /* Store off to make visible the actual sleep time */
1763 hpts->sleeping = tv.tv_usec;
1764 callout_reset_sbt_on(&hpts->co, sb, 0,
1765 hpts_timeout_swi, hpts, hpts->p_cpu,
1766 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1767 NET_EPOCH_EXIT(et);
1768 HPTS_UNLOCK(hpts);
1769 }
1770
1771 #undef timersub
1772
1773 static int32_t
hpts_count_level(struct cpu_group * cg)1774 hpts_count_level(struct cpu_group *cg)
1775 {
1776 int32_t count_l3, i;
1777
1778 count_l3 = 0;
1779 if (cg->cg_level == CG_SHARE_L3)
1780 count_l3++;
1781 /* Walk all the children looking for L3 */
1782 for (i = 0; i < cg->cg_children; i++) {
1783 count_l3 += hpts_count_level(&cg->cg_child[i]);
1784 }
1785 return (count_l3);
1786 }
1787
1788 static void
hpts_gather_grps(struct cpu_group ** grps,int32_t * at,int32_t max,struct cpu_group * cg)1789 hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
1790 {
1791 int32_t idx, i;
1792
1793 idx = *at;
1794 if (cg->cg_level == CG_SHARE_L3) {
1795 grps[idx] = cg;
1796 idx++;
1797 if (idx == max) {
1798 *at = idx;
1799 return;
1800 }
1801 }
1802 *at = idx;
1803 /* Walk all the children looking for L3 */
1804 for (i = 0; i < cg->cg_children; i++) {
1805 hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
1806 }
1807 }
1808
1809 static void
tcp_hpts_mod_load(void)1810 tcp_hpts_mod_load(void)
1811 {
1812 struct cpu_group *cpu_top;
1813 int32_t error __diagused;
1814 int32_t i, j, bound = 0, created = 0;
1815 size_t sz, asz;
1816 struct timeval tv;
1817 sbintime_t sb;
1818 struct tcp_hpts_entry *hpts;
1819 struct pcpu *pc;
1820 char unit[16];
1821 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1822 int count, domain;
1823
1824 #ifdef SMP
1825 cpu_top = smp_topo();
1826 #else
1827 cpu_top = NULL;
1828 #endif
1829 tcp_pace.rp_num_hptss = ncpus;
1830 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1831 hpts_loops = counter_u64_alloc(M_WAITOK);
1832 back_tosleep = counter_u64_alloc(M_WAITOK);
1833 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1834 wheel_wrap = counter_u64_alloc(M_WAITOK);
1835 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1836 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1837 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1838 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1839 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1840 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1841
1842 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1843 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1844 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1845 tcp_pace.cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1846 tcp_pace.grp_cnt = 0;
1847 if (cpu_top == NULL) {
1848 tcp_pace.grp_cnt = 1;
1849 } else {
1850 /* Find out how many cache level 3 domains we have */
1851 count = 0;
1852 tcp_pace.grp_cnt = hpts_count_level(cpu_top);
1853 if (tcp_pace.grp_cnt == 0) {
1854 tcp_pace.grp_cnt = 1;
1855 }
1856 sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
1857 tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
1858 /* Now populate the groups */
1859 if (tcp_pace.grp_cnt == 1) {
1860 /*
1861 * All we need is the top level all cpu's are in
1862 * the same cache so when we use grp[0]->cg_mask
1863 * with the cg_first <-> cg_last it will include
1864 * all cpu's in it. The level here is probably
1865 * zero which is ok.
1866 */
1867 tcp_pace.grps[0] = cpu_top;
1868 } else {
1869 /*
1870 * Here we must find all the level three cache domains
1871 * and setup our pointers to them.
1872 */
1873 count = 0;
1874 hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
1875 }
1876 }
1877 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1878 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1879 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1880 M_TCPHPTS, M_WAITOK | M_ZERO);
1881 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
1882 hpts = tcp_pace.rp_ent[i];
1883 /*
1884 * Init all the hpts structures that are not specifically
1885 * zero'd by the allocations. Also lets attach them to the
1886 * appropriate sysctl block as well.
1887 */
1888 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1889 "hpts", MTX_DEF | MTX_DUPOK);
1890 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1891 TAILQ_INIT(&hpts->p_hptss[j].head);
1892 hpts->p_hptss[j].count = 0;
1893 hpts->p_hptss[j].gencnt = 0;
1894 }
1895 sysctl_ctx_init(&hpts->hpts_ctx);
1896 sprintf(unit, "%d", i);
1897 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1898 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1899 OID_AUTO,
1900 unit,
1901 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1902 "");
1903 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1904 SYSCTL_CHILDREN(hpts->hpts_root),
1905 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1906 &hpts->p_on_queue_cnt, 0,
1907 "Count TCB's awaiting output processing");
1908 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1909 SYSCTL_CHILDREN(hpts->hpts_root),
1910 OID_AUTO, "active", CTLFLAG_RD,
1911 &hpts->p_hpts_active, 0,
1912 "Is the hpts active");
1913 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1914 SYSCTL_CHILDREN(hpts->hpts_root),
1915 OID_AUTO, "curslot", CTLFLAG_RD,
1916 &hpts->p_cur_slot, 0,
1917 "What the current running pacers goal");
1918 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1919 SYSCTL_CHILDREN(hpts->hpts_root),
1920 OID_AUTO, "runtick", CTLFLAG_RD,
1921 &hpts->p_runningslot, 0,
1922 "What the running pacers current slot is");
1923 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1924 SYSCTL_CHILDREN(hpts->hpts_root),
1925 OID_AUTO, "curtick", CTLFLAG_RD,
1926 &hpts->p_curtick, 0,
1927 "What the running pacers last tick mapped to the wheel was");
1928 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1929 SYSCTL_CHILDREN(hpts->hpts_root),
1930 OID_AUTO, "lastran", CTLFLAG_RD,
1931 &tcp_pace.cts_last_ran[i], 0,
1932 "The last usec tick that this hpts ran");
1933 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1934 SYSCTL_CHILDREN(hpts->hpts_root),
1935 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1936 &hpts->p_mysleep.tv_usec,
1937 "What the running pacers is using for p_mysleep.tv_usec");
1938 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1939 SYSCTL_CHILDREN(hpts->hpts_root),
1940 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1941 &hpts->sleeping, 0,
1942 "What the running pacers is actually sleeping for");
1943 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1944 SYSCTL_CHILDREN(hpts->hpts_root),
1945 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1946 &hpts->syscall_cnt, 0,
1947 "How many times we had syscalls on this hpts");
1948
1949 hpts->p_hpts_sleep_time = hpts_sleep_max;
1950 hpts->p_num = i;
1951 hpts->p_curtick = tcp_gethptstick(&tv);
1952 tcp_pace.cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1953 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1954 hpts->p_cpu = 0xffff;
1955 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1956 callout_init(&hpts->co, 1);
1957 }
1958 /* Don't try to bind to NUMA domains if we don't have any */
1959 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1960 tcp_bind_threads = 0;
1961
1962 /*
1963 * Now lets start ithreads to handle the hptss.
1964 */
1965 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1966 hpts = tcp_pace.rp_ent[i];
1967 hpts->p_cpu = i;
1968
1969 error = swi_add(&hpts->ie, "hpts",
1970 tcp_hpts_thread, (void *)hpts,
1971 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1972 KASSERT(error == 0,
1973 ("Can't add hpts:%p i:%d err:%d",
1974 hpts, i, error));
1975 created++;
1976 hpts->p_mysleep.tv_sec = 0;
1977 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1978 if (tcp_bind_threads == 1) {
1979 if (intr_event_bind(hpts->ie, i) == 0)
1980 bound++;
1981 } else if (tcp_bind_threads == 2) {
1982 /* Find the group for this CPU (i) and bind into it */
1983 for (j = 0; j < tcp_pace.grp_cnt; j++) {
1984 if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
1985 if (intr_event_bind_ithread_cpuset(hpts->ie,
1986 &tcp_pace.grps[j]->cg_mask) == 0) {
1987 bound++;
1988 pc = pcpu_find(i);
1989 domain = pc->pc_domain;
1990 count = hpts_domains[domain].count;
1991 hpts_domains[domain].cpu[count] = i;
1992 hpts_domains[domain].count++;
1993 break;
1994 }
1995 }
1996 }
1997 }
1998 tv.tv_sec = 0;
1999 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2000 hpts->sleeping = tv.tv_usec;
2001 sb = tvtosbt(tv);
2002 callout_reset_sbt_on(&hpts->co, sb, 0,
2003 hpts_timeout_swi, hpts, hpts->p_cpu,
2004 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2005 }
2006 /*
2007 * If we somehow have an empty domain, fall back to choosing
2008 * among all htps threads.
2009 */
2010 for (i = 0; i < vm_ndomains; i++) {
2011 if (hpts_domains[i].count == 0) {
2012 tcp_bind_threads = 0;
2013 break;
2014 }
2015 }
2016 tcp_hpts_softclock = __tcp_run_hpts;
2017 tcp_lro_hpts_init();
2018 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2019 created, bound,
2020 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2021 }
2022
2023 static void
tcp_hpts_mod_unload(void)2024 tcp_hpts_mod_unload(void)
2025 {
2026 int rv __diagused;
2027
2028 tcp_lro_hpts_uninit();
2029 atomic_store_ptr(&tcp_hpts_softclock, NULL);
2030
2031 for (int i = 0; i < tcp_pace.rp_num_hptss; i++) {
2032 struct tcp_hpts_entry *hpts = tcp_pace.rp_ent[i];
2033
2034 rv = callout_drain(&hpts->co);
2035 MPASS(rv != 0);
2036
2037 rv = swi_remove(hpts->ie_cookie);
2038 MPASS(rv == 0);
2039
2040 rv = sysctl_ctx_free(&hpts->hpts_ctx);
2041 MPASS(rv == 0);
2042
2043 mtx_destroy(&hpts->p_mtx);
2044 free(hpts->p_hptss, M_TCPHPTS);
2045 free(hpts, M_TCPHPTS);
2046 }
2047
2048 free(tcp_pace.rp_ent, M_TCPHPTS);
2049 free(tcp_pace.cts_last_ran, M_TCPHPTS);
2050 #ifdef SMP
2051 free(tcp_pace.grps, M_TCPHPTS);
2052 #endif
2053
2054 counter_u64_free(hpts_hopelessly_behind);
2055 counter_u64_free(hpts_loops);
2056 counter_u64_free(back_tosleep);
2057 counter_u64_free(combined_wheel_wrap);
2058 counter_u64_free(wheel_wrap);
2059 counter_u64_free(hpts_wake_timeout);
2060 counter_u64_free(hpts_direct_awakening);
2061 counter_u64_free(hpts_back_tosleep);
2062 counter_u64_free(hpts_direct_call);
2063 counter_u64_free(cpu_uses_flowid);
2064 counter_u64_free(cpu_uses_random);
2065 }
2066
2067 static int
tcp_hpts_modevent(module_t mod,int what,void * arg)2068 tcp_hpts_modevent(module_t mod, int what, void *arg)
2069 {
2070
2071 switch (what) {
2072 case MOD_LOAD:
2073 tcp_hpts_mod_load();
2074 return (0);
2075 case MOD_QUIESCE:
2076 /*
2077 * Since we are a dependency of TCP stack modules, they should
2078 * already be unloaded, and the HPTS ring is empty. However,
2079 * function pointer manipulations aren't 100% safe. Although,
2080 * tcp_hpts_mod_unload() use atomic(9) the userret() doesn't.
2081 * Thus, allow only forced unload of HPTS.
2082 */
2083 return (EBUSY);
2084 case MOD_UNLOAD:
2085 tcp_hpts_mod_unload();
2086 return (0);
2087 default:
2088 return (EINVAL);
2089 };
2090 }
2091
2092 static moduledata_t tcp_hpts_module = {
2093 .name = "tcphpts",
2094 .evhand = tcp_hpts_modevent,
2095 };
2096
2097 DECLARE_MODULE(tcphpts, tcp_hpts_module, SI_SUB_SOFTINTR, SI_ORDER_ANY);
2098 MODULE_VERSION(tcphpts, 1);
2099