xref: /freebsd/sys/netinet/tcp_hpts.c (revision f39bffc62c1395bde25d152c7f68fdf7cbaab414)
1 /*-
2  * Copyright (c) 2016-2018 Netflix Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_ipsec.h"
32 #include "opt_tcpdebug.h"
33 /**
34  * Some notes about usage.
35  *
36  * The tcp_hpts system is designed to provide a high precision timer
37  * system for tcp. Its main purpose is to provide a mechanism for
38  * pacing packets out onto the wire. It can be used in two ways
39  * by a given TCP stack (and those two methods can be used simultaneously).
40  *
41  * First, and probably the main thing its used by Rack and BBR for, it can
42  * be used to call tcp_output() of a transport stack at some time in the future.
43  * The normal way this is done is that tcp_output() of the stack schedules
44  * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45  * slot is the time from now that the stack wants to be called but it
46  * must be converted to tcp_hpts's notion of slot. This is done with
47  * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48  * call from the tcp_output() routine might look like:
49  *
50  * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
51  *
52  * The above would schedule tcp_ouput() to be called in 550 useconds.
53  * Note that if using this mechanism the stack will want to add near
54  * its top a check to prevent unwanted calls (from user land or the
55  * arrival of incoming ack's). So it would add something like:
56  *
57  * if (inp->inp_in_hpts)
58  *    return;
59  *
60  * to prevent output processing until the time alotted has gone by.
61  * Of course this is a bare bones example and the stack will probably
62  * have more consideration then just the above.
63  *
64  * Now the tcp_hpts system will call tcp_output in one of two forms,
65  * it will first check to see if the stack as defined a
66  * tfb_tcp_output_wtime() function, if so that is the routine it
67  * will call, if that function is not defined then it will call the
68  * tfb_tcp_output() function. The only difference between these
69  * two calls is that the former passes the time in to the function
70  * so the function does not have to access the time (which tcp_hpts
71  * already has). What these functions do is of course totally up
72  * to the individual tcp stack.
73  *
74  * Now the second function (actually two functions I guess :D)
75  * the tcp_hpts system provides is the  ability to either abort
76  * a connection (later) or process  input on a connection.
77  * Why would you want to do this? To keep processor locality.
78  *
79  * So in order to use the input redirection function the
80  * stack changes its tcp_do_segment() routine to instead
81  * of process the data call the function:
82  *
83  * tcp_queue_pkt_to_input()
84  *
85  * You will note that the arguments to this function look
86  * a lot like tcp_do_segments's arguments. This function
87  * will assure that the tcp_hpts system will
88  * call the functions tfb_tcp_hpts_do_segment() from the
89  * correct CPU. Note that multiple calls can get pushed
90  * into the tcp_hpts system this will be indicated by
91  * the next to last argument to tfb_tcp_hpts_do_segment()
92  * (nxt_pkt). If nxt_pkt is a 1 then another packet is
93  * coming. If nxt_pkt is a 0 then this is the last call
94  * that the tcp_hpts system has available for the tcp stack.
95  *
96  * The other point of the input system is to be able to safely
97  * drop a tcp connection without worrying about the recursive
98  * locking that may be occuring on the INP_WLOCK. So if
99  * a stack wants to drop a connection it calls:
100  *
101  *     tcp_set_inp_to_drop(tp, ETIMEDOUT)
102  *
103  * To schedule the tcp_hpts system to call
104  *
105  *    tcp_drop(tp, drop_reason)
106  *
107  * at a future point. This is quite handy to prevent locking
108  * issues when dropping connections.
109  *
110  */
111 
112 #include <sys/param.h>
113 #include <sys/bus.h>
114 #include <sys/interrupt.h>
115 #include <sys/module.h>
116 #include <sys/kernel.h>
117 #include <sys/hhook.h>
118 #include <sys/malloc.h>
119 #include <sys/mbuf.h>
120 #include <sys/proc.h>		/* for proc0 declaration */
121 #include <sys/socket.h>
122 #include <sys/socketvar.h>
123 #include <sys/sysctl.h>
124 #include <sys/systm.h>
125 #include <sys/refcount.h>
126 #include <sys/sched.h>
127 #include <sys/queue.h>
128 #include <sys/smp.h>
129 #include <sys/counter.h>
130 #include <sys/time.h>
131 #include <sys/kthread.h>
132 #include <sys/kern_prefetch.h>
133 
134 #include <vm/uma.h>
135 
136 #include <net/route.h>
137 #include <net/vnet.h>
138 
139 #define TCPSTATES		/* for logging */
140 
141 #include <netinet/in.h>
142 #include <netinet/in_kdtrace.h>
143 #include <netinet/in_pcb.h>
144 #include <netinet/ip.h>
145 #include <netinet/ip_icmp.h>	/* required for icmp_var.h */
146 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
147 #include <netinet/ip_var.h>
148 #include <netinet/ip6.h>
149 #include <netinet6/in6_pcb.h>
150 #include <netinet6/ip6_var.h>
151 #include <netinet/tcp.h>
152 #include <netinet/tcp_fsm.h>
153 #include <netinet/tcp_seq.h>
154 #include <netinet/tcp_timer.h>
155 #include <netinet/tcp_var.h>
156 #include <netinet/tcpip.h>
157 #include <netinet/cc/cc.h>
158 #include <netinet/tcp_hpts.h>
159 
160 #ifdef tcpdebug
161 #include <netinet/tcp_debug.h>
162 #endif				/* tcpdebug */
163 #ifdef tcp_offload
164 #include <netinet/tcp_offload.h>
165 #endif
166 
167 #ifdef ipsec
168 #include <netipsec/ipsec.h>
169 #include <netipsec/ipsec6.h>
170 #endif				/* ipsec */
171 #include "opt_rss.h"
172 
173 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
174 #ifdef RSS
175 static int tcp_bind_threads = 1;
176 #else
177 static int tcp_bind_threads = 0;
178 #endif
179 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
180 
181 static uint32_t tcp_hpts_logging_size = DEFAULT_HPTS_LOG;
182 
183 TUNABLE_INT("net.inet.tcp.hpts_logging_sz", &tcp_hpts_logging_size);
184 
185 static struct tcp_hptsi tcp_pace;
186 
187 static int
188 tcp_hptsi_lock_inpinfo(struct inpcb *inp,
189     struct tcpcb **tp);
190 static void tcp_wakehpts(struct tcp_hpts_entry *p);
191 static void tcp_wakeinput(struct tcp_hpts_entry *p);
192 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
193 static void tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick);
194 static void tcp_hpts_thread(void *ctx);
195 static void tcp_init_hptsi(void *st);
196 
197 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
198 static int32_t tcp_hpts_callout_skip_swi = 0;
199 
200 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW, 0, "TCP Hpts controls");
201 
202 #define	timersub(tvp, uvp, vvp)						\
203 	do {								\
204 		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
205 		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
206 		if ((vvp)->tv_usec < 0) {				\
207 			(vvp)->tv_sec--;				\
208 			(vvp)->tv_usec += 1000000;			\
209 		}							\
210 	} while (0)
211 
212 static int32_t logging_on = 0;
213 static int32_t hpts_sleep_max = (NUM_OF_HPTSI_SLOTS - 2);
214 static int32_t tcp_hpts_precision = 120;
215 
216 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
217     &tcp_hpts_precision, 120,
218     "Value for PRE() precision of callout");
219 
220 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
221     &logging_on, 0,
222     "Turn on logging if compiled in");
223 
224 counter_u64_t hpts_loops;
225 
226 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD,
227     &hpts_loops, "Number of times hpts had to loop to catch up");
228 
229 counter_u64_t back_tosleep;
230 
231 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
232     &back_tosleep, "Number of times hpts found no tcbs");
233 
234 static int32_t in_newts_every_tcb = 0;
235 
236 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tsperpcb, CTLFLAG_RW,
237     &in_newts_every_tcb, 0,
238     "Do we have a new cts every tcb we process for input");
239 static int32_t in_ts_percision = 0;
240 
241 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tspercision, CTLFLAG_RW,
242     &in_ts_percision, 0,
243     "Do we use percise timestamp for clients on input");
244 static int32_t out_newts_every_tcb = 0;
245 
246 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tsperpcb, CTLFLAG_RW,
247     &out_newts_every_tcb, 0,
248     "Do we have a new cts every tcb we process for output");
249 static int32_t out_ts_percision = 0;
250 
251 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW,
252     &out_ts_percision, 0,
253     "Do we use a percise timestamp for every output cts");
254 
255 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLFLAG_RW,
256     &hpts_sleep_max, 0,
257     "The maximum time the hpts will sleep <1 - 254>");
258 
259 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW,
260     &tcp_min_hptsi_time, 0,
261     "The minimum time the hpts must sleep before processing more slots");
262 
263 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW,
264     &tcp_hpts_callout_skip_swi, 0,
265     "Do we have the callout call directly to the hpts?");
266 
267 static void
268 __tcp_hpts_log_it(struct tcp_hpts_entry *hpts, struct inpcb *inp, int event, uint32_t slot,
269     uint32_t ticknow, int32_t line)
270 {
271 	struct hpts_log *pl;
272 
273 	HPTS_MTX_ASSERT(hpts);
274 	if (hpts->p_log == NULL)
275 		return;
276 	pl = &hpts->p_log[hpts->p_log_at];
277 	hpts->p_log_at++;
278 	if (hpts->p_log_at >= hpts->p_logsize) {
279 		hpts->p_log_at = 0;
280 		hpts->p_log_wrapped = 1;
281 	}
282 	pl->inp = inp;
283 	if (inp) {
284 		pl->t_paceslot = inp->inp_hptsslot;
285 		pl->t_hptsreq = inp->inp_hpts_request;
286 		pl->p_onhpts = inp->inp_in_hpts;
287 		pl->p_oninput = inp->inp_in_input;
288 	} else {
289 		pl->t_paceslot = 0;
290 		pl->t_hptsreq = 0;
291 		pl->p_onhpts = 0;
292 		pl->p_oninput = 0;
293 	}
294 	pl->is_notempty = 1;
295 	pl->event = event;
296 	pl->line = line;
297 	pl->cts = tcp_get_usecs(NULL);
298 	pl->p_curtick = hpts->p_curtick;
299 	pl->p_prevtick = hpts->p_prevtick;
300 	pl->p_on_queue_cnt = hpts->p_on_queue_cnt;
301 	pl->ticknow = ticknow;
302 	pl->slot_req = slot;
303 	pl->p_nxt_slot = hpts->p_nxt_slot;
304 	pl->p_cur_slot = hpts->p_cur_slot;
305 	pl->p_hpts_sleep_time = hpts->p_hpts_sleep_time;
306 	pl->p_flags = (hpts->p_cpu & 0x7f);
307 	pl->p_flags <<= 7;
308 	pl->p_flags |= (hpts->p_num & 0x7f);
309 	pl->p_flags <<= 2;
310 	if (hpts->p_hpts_active) {
311 		pl->p_flags |= HPTS_HPTS_ACTIVE;
312 	}
313 }
314 
315 #define tcp_hpts_log_it(a, b, c, d, e) __tcp_hpts_log_it(a, b, c, d, e, __LINE__)
316 
317 static void
318 hpts_timeout_swi(void *arg)
319 {
320 	struct tcp_hpts_entry *hpts;
321 
322 	hpts = (struct tcp_hpts_entry *)arg;
323 	swi_sched(hpts->ie_cookie, 0);
324 }
325 
326 static void
327 hpts_timeout_dir(void *arg)
328 {
329 	tcp_hpts_thread(arg);
330 }
331 
332 static inline void
333 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
334 {
335 #ifdef INVARIANTS
336 	if (mtx_owned(&hpts->p_mtx) == 0) {
337 		/* We don't own the mutex? */
338 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
339 	}
340 	if (hpts->p_cpu != inp->inp_hpts_cpu) {
341 		/* It is not the right cpu/mutex? */
342 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
343 	}
344 	if (inp->inp_in_hpts == 0) {
345 		/* We are not on the hpts? */
346 		panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp);
347 	}
348 	if (TAILQ_EMPTY(head) &&
349 	    (hpts->p_on_queue_cnt != 0)) {
350 		/* We should not be empty with a queue count */
351 		panic("%s hpts:%p hpts bucket empty but cnt:%d",
352 		    __FUNCTION__, hpts, hpts->p_on_queue_cnt);
353 	}
354 #endif
355 	TAILQ_REMOVE(head, inp, inp_hpts);
356 	hpts->p_on_queue_cnt--;
357 	if (hpts->p_on_queue_cnt < 0) {
358 		/* Count should not go negative .. */
359 #ifdef INVARIANTS
360 		panic("Hpts goes negative inp:%p hpts:%p",
361 		    inp, hpts);
362 #endif
363 		hpts->p_on_queue_cnt = 0;
364 	}
365 	if (clear) {
366 		inp->inp_hpts_request = 0;
367 		inp->inp_in_hpts = 0;
368 	}
369 }
370 
371 static inline void
372 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
373 {
374 #ifdef INVARIANTS
375 	if (mtx_owned(&hpts->p_mtx) == 0) {
376 		/* We don't own the mutex? */
377 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
378 	}
379 	if (hpts->p_cpu != inp->inp_hpts_cpu) {
380 		/* It is not the right cpu/mutex? */
381 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
382 	}
383 	if ((noref == 0) && (inp->inp_in_hpts == 1)) {
384 		/* We are already on the hpts? */
385 		panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp);
386 	}
387 #endif
388 	TAILQ_INSERT_TAIL(head, inp, inp_hpts);
389 	inp->inp_in_hpts = 1;
390 	hpts->p_on_queue_cnt++;
391 	if (noref == 0) {
392 		in_pcbref(inp);
393 	}
394 }
395 
396 static inline void
397 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
398 {
399 #ifdef INVARIANTS
400 	if (mtx_owned(&hpts->p_mtx) == 0) {
401 		/* We don't own the mutex? */
402 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
403 	}
404 	if (hpts->p_cpu != inp->inp_input_cpu) {
405 		/* It is not the right cpu/mutex? */
406 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
407 	}
408 	if (inp->inp_in_input == 0) {
409 		/* We are not on the input hpts? */
410 		panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp);
411 	}
412 #endif
413 	TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
414 	hpts->p_on_inqueue_cnt--;
415 	if (hpts->p_on_inqueue_cnt < 0) {
416 #ifdef INVARIANTS
417 		panic("Hpts in goes negative inp:%p hpts:%p",
418 		    inp, hpts);
419 #endif
420 		hpts->p_on_inqueue_cnt = 0;
421 	}
422 #ifdef INVARIANTS
423 	if (TAILQ_EMPTY(&hpts->p_input) &&
424 	    (hpts->p_on_inqueue_cnt != 0)) {
425 		/* We should not be empty with a queue count */
426 		panic("%s hpts:%p in_hpts input empty but cnt:%d",
427 		    __FUNCTION__, hpts, hpts->p_on_inqueue_cnt);
428 	}
429 #endif
430 	if (clear)
431 		inp->inp_in_input = 0;
432 }
433 
434 static inline void
435 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
436 {
437 #ifdef INVARIANTS
438 	if (mtx_owned(&hpts->p_mtx) == 0) {
439 		/* We don't own the mutex? */
440 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
441 	}
442 	if (hpts->p_cpu != inp->inp_input_cpu) {
443 		/* It is not the right cpu/mutex? */
444 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
445 	}
446 	if (inp->inp_in_input == 1) {
447 		/* We are already on the input hpts? */
448 		panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp);
449 	}
450 #endif
451 	TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
452 	inp->inp_in_input = 1;
453 	hpts->p_on_inqueue_cnt++;
454 	in_pcbref(inp);
455 }
456 
457 static int
458 sysctl_tcp_hpts_log(SYSCTL_HANDLER_ARGS)
459 {
460 	struct tcp_hpts_entry *hpts;
461 	size_t sz;
462 	int32_t logging_was, i;
463 	int32_t error = 0;
464 
465 	/*
466 	 * HACK: Turn off logging so no locks are required this really needs
467 	 * a memory barrier :)
468 	 */
469 	logging_was = logging_on;
470 	logging_on = 0;
471 	if (!req->oldptr) {
472 		/* How much? */
473 		sz = 0;
474 		for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
475 			hpts = tcp_pace.rp_ent[i];
476 			if (hpts->p_log == NULL)
477 				continue;
478 			sz += (sizeof(struct hpts_log) * hpts->p_logsize);
479 		}
480 		error = SYSCTL_OUT(req, 0, sz);
481 	} else {
482 		for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
483 			hpts = tcp_pace.rp_ent[i];
484 			if (hpts->p_log == NULL)
485 				continue;
486 			if (hpts->p_log_wrapped)
487 				sz = (sizeof(struct hpts_log) * hpts->p_logsize);
488 			else
489 				sz = (sizeof(struct hpts_log) * hpts->p_log_at);
490 			error = SYSCTL_OUT(req, hpts->p_log, sz);
491 		}
492 	}
493 	logging_on = logging_was;
494 	return error;
495 }
496 
497 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, log, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
498     0, 0, sysctl_tcp_hpts_log, "A", "tcp hptsi log");
499 
500 
501 /*
502  * Try to get the INP_INFO lock.
503  *
504  * This function always succeeds in getting the lock. It will clear
505  * *tpp and return (1) if something critical changed while the inpcb
506  * was unlocked. Otherwise, it will leave *tpp unchanged and return (0).
507  *
508  * This function relies on the fact that the hpts always holds a
509  * reference on the inpcb while the segment is on the hptsi wheel and
510  * in the input queue.
511  *
512  */
513 static int
514 tcp_hptsi_lock_inpinfo(struct inpcb *inp, struct tcpcb **tpp)
515 {
516 	struct tcp_function_block *tfb;
517 	struct tcpcb *tp;
518 	void *ptr;
519 
520 	/* Try the easy way. */
521 	if (INP_INFO_TRY_RLOCK(&V_tcbinfo))
522 		return (0);
523 
524 	/*
525 	 * OK, let's try the hard way. We'll save the function pointer block
526 	 * to make sure that doesn't change while we aren't holding the
527 	 * lock.
528 	 */
529 	tp = *tpp;
530 	tfb = tp->t_fb;
531 	ptr = tp->t_fb_ptr;
532 	INP_WUNLOCK(inp);
533 	INP_INFO_RLOCK(&V_tcbinfo);
534 	INP_WLOCK(inp);
535 	/* If the session went away, return an error. */
536 	if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
537 	    (inp->inp_flags2 & INP_FREED)) {
538 		*tpp = NULL;
539 		return (1);
540 	}
541 	/*
542 	 * If the function block or stack-specific data block changed,
543 	 * report an error.
544 	 */
545 	tp = intotcpcb(inp);
546 	if ((tp->t_fb != tfb) && (tp->t_fb_ptr != ptr)) {
547 		*tpp = NULL;
548 		return (1);
549 	}
550 	return (0);
551 }
552 
553 
554 static void
555 tcp_wakehpts(struct tcp_hpts_entry *hpts)
556 {
557 	HPTS_MTX_ASSERT(hpts);
558 	swi_sched(hpts->ie_cookie, 0);
559 	if (hpts->p_hpts_active == 2) {
560 		/* Rare sleeping on a ENOBUF */
561 		wakeup_one(hpts);
562 	}
563 }
564 
565 static void
566 tcp_wakeinput(struct tcp_hpts_entry *hpts)
567 {
568 	HPTS_MTX_ASSERT(hpts);
569 	swi_sched(hpts->ie_cookie, 0);
570 	if (hpts->p_hpts_active == 2) {
571 		/* Rare sleeping on a ENOBUF */
572 		wakeup_one(hpts);
573 	}
574 }
575 
576 struct tcp_hpts_entry *
577 tcp_cur_hpts(struct inpcb *inp)
578 {
579 	int32_t hpts_num;
580 	struct tcp_hpts_entry *hpts;
581 
582 	hpts_num = inp->inp_hpts_cpu;
583 	hpts = tcp_pace.rp_ent[hpts_num];
584 	return (hpts);
585 }
586 
587 struct tcp_hpts_entry *
588 tcp_hpts_lock(struct inpcb *inp)
589 {
590 	struct tcp_hpts_entry *hpts;
591 	int32_t hpts_num;
592 
593 again:
594 	hpts_num = inp->inp_hpts_cpu;
595 	hpts = tcp_pace.rp_ent[hpts_num];
596 #ifdef INVARIANTS
597 	if (mtx_owned(&hpts->p_mtx)) {
598 		panic("Hpts:%p owns mtx prior-to lock line:%d",
599 		    hpts, __LINE__);
600 	}
601 #endif
602 	mtx_lock(&hpts->p_mtx);
603 	if (hpts_num != inp->inp_hpts_cpu) {
604 		mtx_unlock(&hpts->p_mtx);
605 		goto again;
606 	}
607 	return (hpts);
608 }
609 
610 struct tcp_hpts_entry *
611 tcp_input_lock(struct inpcb *inp)
612 {
613 	struct tcp_hpts_entry *hpts;
614 	int32_t hpts_num;
615 
616 again:
617 	hpts_num = inp->inp_input_cpu;
618 	hpts = tcp_pace.rp_ent[hpts_num];
619 #ifdef INVARIANTS
620 	if (mtx_owned(&hpts->p_mtx)) {
621 		panic("Hpts:%p owns mtx prior-to lock line:%d",
622 		    hpts, __LINE__);
623 	}
624 #endif
625 	mtx_lock(&hpts->p_mtx);
626 	if (hpts_num != inp->inp_input_cpu) {
627 		mtx_unlock(&hpts->p_mtx);
628 		goto again;
629 	}
630 	return (hpts);
631 }
632 
633 static void
634 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
635 {
636 	int32_t add_freed;
637 
638 	if (inp->inp_flags2 & INP_FREED) {
639 		/*
640 		 * Need to play a special trick so that in_pcbrele_wlocked
641 		 * does not return 1 when it really should have returned 0.
642 		 */
643 		add_freed = 1;
644 		inp->inp_flags2 &= ~INP_FREED;
645 	} else {
646 		add_freed = 0;
647 	}
648 #ifndef INP_REF_DEBUG
649 	if (in_pcbrele_wlocked(inp)) {
650 		/*
651 		 * This should not happen. We have the inpcb referred to by
652 		 * the main socket (why we are called) and the hpts. It
653 		 * should always return 0.
654 		 */
655 		panic("inpcb:%p release ret 1",
656 		    inp);
657 	}
658 #else
659 	if (__in_pcbrele_wlocked(inp, line)) {
660 		/*
661 		 * This should not happen. We have the inpcb referred to by
662 		 * the main socket (why we are called) and the hpts. It
663 		 * should always return 0.
664 		 */
665 		panic("inpcb:%p release ret 1",
666 		    inp);
667 	}
668 #endif
669 	if (add_freed) {
670 		inp->inp_flags2 |= INP_FREED;
671 	}
672 }
673 
674 static void
675 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
676 {
677 	if (inp->inp_in_hpts) {
678 		hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
679 		tcp_remove_hpts_ref(inp, hpts, line);
680 	}
681 }
682 
683 static void
684 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
685 {
686 	HPTS_MTX_ASSERT(hpts);
687 	if (inp->inp_in_input) {
688 		hpts_sane_input_remove(hpts, inp, 1);
689 		tcp_remove_hpts_ref(inp, hpts, line);
690 	}
691 }
692 
693 /*
694  * Called normally with the INP_LOCKED but it
695  * does not matter, the hpts lock is the key
696  * but the lock order allows us to hold the
697  * INP lock and then get the hpts lock.
698  *
699  * Valid values in the flags are
700  * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
701  * HPTS_REMOVE_INPUT - remove from the input of the hpts.
702  * Note that you can or both values together and get two
703  * actions.
704  */
705 void
706 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
707 {
708 	struct tcp_hpts_entry *hpts;
709 
710 	INP_WLOCK_ASSERT(inp);
711 	if (flags & HPTS_REMOVE_OUTPUT) {
712 		hpts = tcp_hpts_lock(inp);
713 		tcp_hpts_remove_locked_output(hpts, inp, flags, line);
714 		mtx_unlock(&hpts->p_mtx);
715 	}
716 	if (flags & HPTS_REMOVE_INPUT) {
717 		hpts = tcp_input_lock(inp);
718 		tcp_hpts_remove_locked_input(hpts, inp, flags, line);
719 		mtx_unlock(&hpts->p_mtx);
720 	}
721 }
722 
723 static inline int
724 hpts_tick(struct tcp_hpts_entry *hpts, int32_t plus)
725 {
726 	return ((hpts->p_prevtick + plus) % NUM_OF_HPTSI_SLOTS);
727 }
728 
729 static int
730 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
731 {
732 	int32_t need_wake = 0;
733 	uint32_t ticknow = 0;
734 
735 	HPTS_MTX_ASSERT(hpts);
736 	if (inp->inp_in_hpts == 0) {
737 		/* Ok we need to set it on the hpts in the current slot */
738 		if (hpts->p_hpts_active == 0) {
739 			/* A sleeping hpts we want in next slot to run */
740 			if (logging_on) {
741 				tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, 0,
742 				    hpts_tick(hpts, 1));
743 			}
744 			inp->inp_hptsslot = hpts_tick(hpts, 1);
745 			inp->inp_hpts_request = 0;
746 			if (logging_on) {
747 				tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEP_BEFORE, 1, ticknow);
748 			}
749 			need_wake = 1;
750 		} else if ((void *)inp == hpts->p_inp) {
751 			/*
752 			 * We can't allow you to go into the same slot we
753 			 * are in. We must put you out.
754 			 */
755 			inp->inp_hptsslot = hpts->p_nxt_slot;
756 		} else
757 			inp->inp_hptsslot = hpts->p_cur_slot;
758 		hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
759 		inp->inp_hpts_request = 0;
760 		if (logging_on) {
761 			tcp_hpts_log_it(hpts, inp, HPTSLOG_IMMEDIATE, 0, 0);
762 		}
763 		if (need_wake) {
764 			/*
765 			 * Activate the hpts if it is sleeping and its
766 			 * timeout is not 1.
767 			 */
768 			if (logging_on) {
769 				tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_HPTS, 0, ticknow);
770 			}
771 			hpts->p_direct_wake = 1;
772 			tcp_wakehpts(hpts);
773 		}
774 	}
775 	return (need_wake);
776 }
777 
778 int
779 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
780 {
781 	int32_t ret;
782 	struct tcp_hpts_entry *hpts;
783 
784 	INP_WLOCK_ASSERT(inp);
785 	hpts = tcp_hpts_lock(inp);
786 	ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
787 	mtx_unlock(&hpts->p_mtx);
788 	return (ret);
789 }
790 
791 static void
792 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, uint32_t cts, int32_t line,
793     struct hpts_diag *diag, int32_t noref)
794 {
795 	int32_t need_new_to = 0;
796 	int32_t need_wakeup = 0;
797 	uint32_t largest_slot;
798 	uint32_t ticknow = 0;
799 	uint32_t slot_calc;
800 
801 	HPTS_MTX_ASSERT(hpts);
802 	if (diag) {
803 		memset(diag, 0, sizeof(struct hpts_diag));
804 		diag->p_hpts_active = hpts->p_hpts_active;
805 		diag->p_nxt_slot = hpts->p_nxt_slot;
806 		diag->p_cur_slot = hpts->p_cur_slot;
807 		diag->slot_req = slot;
808 	}
809 	if ((inp->inp_in_hpts == 0) || noref) {
810 		inp->inp_hpts_request = slot;
811 		if (slot == 0) {
812 			/* Immediate */
813 			tcp_queue_to_hpts_immediate_locked(inp, hpts, line, noref);
814 			return;
815 		}
816 		if (hpts->p_hpts_active) {
817 			/*
818 			 * Its slot - 1 since nxt_slot is the next tick that
819 			 * will go off since the hpts is awake
820 			 */
821 			if (logging_on) {
822 				tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_NORMAL, slot, 0);
823 			}
824 			/*
825 			 * We want to make sure that we don't place a inp in
826 			 * the range of p_cur_slot <-> p_nxt_slot. If we
827 			 * take from p_nxt_slot to the end, plus p_cur_slot
828 			 * and then take away 2, we will know how many is
829 			 * the max slots we can use.
830 			 */
831 			if (hpts->p_nxt_slot > hpts->p_cur_slot) {
832 				/*
833 				 * Non-wrap case nxt_slot <-> cur_slot we
834 				 * don't want to land in. So the diff gives
835 				 * us what is taken away from the number of
836 				 * slots.
837 				 */
838 				largest_slot = NUM_OF_HPTSI_SLOTS - (hpts->p_nxt_slot - hpts->p_cur_slot);
839 			} else if (hpts->p_nxt_slot == hpts->p_cur_slot) {
840 				largest_slot = NUM_OF_HPTSI_SLOTS - 2;
841 			} else {
842 				/*
843 				 * Wrap case so the diff gives us the number
844 				 * of slots that we can land in.
845 				 */
846 				largest_slot = hpts->p_cur_slot - hpts->p_nxt_slot;
847 			}
848 			/*
849 			 * We take away two so we never have a problem (20
850 			 * usec's) out of 1024000 usecs
851 			 */
852 			largest_slot -= 2;
853 			if (inp->inp_hpts_request > largest_slot) {
854 				/*
855 				 * Restrict max jump of slots and remember
856 				 * leftover
857 				 */
858 				slot = largest_slot;
859 				inp->inp_hpts_request -= largest_slot;
860 			} else {
861 				/* This one will run when we hit it */
862 				inp->inp_hpts_request = 0;
863 			}
864 			if (hpts->p_nxt_slot == hpts->p_cur_slot)
865 				slot_calc = (hpts->p_nxt_slot + slot) % NUM_OF_HPTSI_SLOTS;
866 			else
867 				slot_calc = (hpts->p_nxt_slot + slot - 1) % NUM_OF_HPTSI_SLOTS;
868 			if (slot_calc == hpts->p_cur_slot) {
869 #ifdef INVARIANTS
870 				/* TSNH */
871 				panic("Hpts:%p impossible slot calculation slot_calc:%u slot:%u largest:%u\n",
872 				    hpts, slot_calc, slot, largest_slot);
873 #endif
874 				if (slot_calc)
875 					slot_calc--;
876 				else
877 					slot_calc = NUM_OF_HPTSI_SLOTS - 1;
878 			}
879 			inp->inp_hptsslot = slot_calc;
880 			if (diag) {
881 				diag->inp_hptsslot = inp->inp_hptsslot;
882 			}
883 		} else {
884 			/*
885 			 * The hpts is sleeping, we need to figure out where
886 			 * it will wake up at and if we need to reschedule
887 			 * its time-out.
888 			 */
889 			uint32_t have_slept, yet_to_sleep;
890 			uint32_t slot_now;
891 			struct timeval tv;
892 
893 			ticknow = tcp_gethptstick(&tv);
894 			slot_now = ticknow % NUM_OF_HPTSI_SLOTS;
895 			/*
896 			 * The user wants to be inserted at (slot_now +
897 			 * slot) % NUM_OF_HPTSI_SLOTS, so lets set that up.
898 			 */
899 			largest_slot = NUM_OF_HPTSI_SLOTS - 2;
900 			if (inp->inp_hpts_request > largest_slot) {
901 				/* Adjust the residual in inp_hpts_request */
902 				slot = largest_slot;
903 				inp->inp_hpts_request -= largest_slot;
904 			} else {
905 				/* No residual it all fits */
906 				inp->inp_hpts_request = 0;
907 			}
908 			inp->inp_hptsslot = (slot_now + slot) % NUM_OF_HPTSI_SLOTS;
909 			if (diag) {
910 				diag->slot_now = slot_now;
911 				diag->inp_hptsslot = inp->inp_hptsslot;
912 				diag->p_on_min_sleep = hpts->p_on_min_sleep;
913 			}
914 			if (logging_on) {
915 				tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, slot, ticknow);
916 			}
917 			/* Now do we need to restart the hpts's timer? */
918 			if (TSTMP_GT(ticknow, hpts->p_curtick))
919 				have_slept = ticknow - hpts->p_curtick;
920 			else
921 				have_slept = 0;
922 			if (have_slept < hpts->p_hpts_sleep_time) {
923 				/* This should be what happens */
924 				yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
925 			} else {
926 				/* We are over-due */
927 				yet_to_sleep = 0;
928 				need_wakeup = 1;
929 			}
930 			if (diag) {
931 				diag->have_slept = have_slept;
932 				diag->yet_to_sleep = yet_to_sleep;
933 				diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
934 			}
935 			if ((hpts->p_on_min_sleep == 0) && (yet_to_sleep > slot)) {
936 				/*
937 				 * We need to reschedule the hptss time-out.
938 				 */
939 				hpts->p_hpts_sleep_time = slot;
940 				need_new_to = slot * HPTS_TICKS_PER_USEC;
941 			}
942 		}
943 		hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
944 		if (logging_on) {
945 			tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERTED, slot, ticknow);
946 		}
947 		/*
948 		 * Now how far is the hpts sleeping to? if active is 1, its
949 		 * up and ticking we do nothing, otherwise we may need to
950 		 * reschedule its callout if need_new_to is set from above.
951 		 */
952 		if (need_wakeup) {
953 			if (logging_on) {
954 				tcp_hpts_log_it(hpts, inp, HPTSLOG_RESCHEDULE, 1, 0);
955 			}
956 			hpts->p_direct_wake = 1;
957 			tcp_wakehpts(hpts);
958 			if (diag) {
959 				diag->need_new_to = 0;
960 				diag->co_ret = 0xffff0000;
961 			}
962 		} else if (need_new_to) {
963 			int32_t co_ret;
964 			struct timeval tv;
965 			sbintime_t sb;
966 
967 			tv.tv_sec = 0;
968 			tv.tv_usec = 0;
969 			while (need_new_to > HPTS_USEC_IN_SEC) {
970 				tv.tv_sec++;
971 				need_new_to -= HPTS_USEC_IN_SEC;
972 			}
973 			tv.tv_usec = need_new_to;
974 			sb = tvtosbt(tv);
975 			if (tcp_hpts_callout_skip_swi == 0) {
976 				co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
977 				    hpts_timeout_swi, hpts, hpts->p_cpu,
978 				    (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
979 			} else {
980 				co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
981 				    hpts_timeout_dir, hpts,
982 				    hpts->p_cpu,
983 				    C_PREL(tcp_hpts_precision));
984 			}
985 			if (diag) {
986 				diag->need_new_to = need_new_to;
987 				diag->co_ret = co_ret;
988 			}
989 		}
990 	} else {
991 #ifdef INVARIANTS
992 		panic("Hpts:%p tp:%p already on hpts and add?", hpts, inp);
993 #endif
994 	}
995 }
996 
997 uint32_t
998 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag){
999 	struct tcp_hpts_entry *hpts;
1000 	uint32_t slot_on, cts;
1001 	struct timeval tv;
1002 
1003 	/*
1004 	 * We now return the next-slot the hpts will be on, beyond its
1005 	 * current run (if up) or where it was when it stopped if it is
1006 	 * sleeping.
1007 	 */
1008 	INP_WLOCK_ASSERT(inp);
1009 	hpts = tcp_hpts_lock(inp);
1010 	if (in_ts_percision)
1011 		microuptime(&tv);
1012 	else
1013 		getmicrouptime(&tv);
1014 	cts = tcp_tv_to_usectick(&tv);
1015 	tcp_hpts_insert_locked(hpts, inp, slot, cts, line, diag, 0);
1016 	slot_on = hpts->p_nxt_slot;
1017 	mtx_unlock(&hpts->p_mtx);
1018 	return (slot_on);
1019 }
1020 
1021 uint32_t
1022 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
1023 	return (tcp_hpts_insert_diag(inp, slot, line, NULL));
1024 }
1025 
1026 int
1027 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
1028 {
1029 	int32_t retval = 0;
1030 
1031 	HPTS_MTX_ASSERT(hpts);
1032 	if (inp->inp_in_input == 0) {
1033 		/* Ok we need to set it on the hpts in the current slot */
1034 		hpts_sane_input_insert(hpts, inp, line);
1035 		retval = 1;
1036 		if (hpts->p_hpts_active == 0) {
1037 			/*
1038 			 * Activate the hpts if it is sleeping.
1039 			 */
1040 			if (logging_on) {
1041 				tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_INPUT, 0, 0);
1042 			}
1043 			retval = 2;
1044 			hpts->p_direct_wake = 1;
1045 			tcp_wakeinput(hpts);
1046 		}
1047 	} else if (hpts->p_hpts_active == 0) {
1048 		retval = 4;
1049 		hpts->p_direct_wake = 1;
1050 		tcp_wakeinput(hpts);
1051 	}
1052 	return (retval);
1053 }
1054 
1055 void
1056 tcp_queue_pkt_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1057     int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked)
1058 {
1059 	/* Setup packet for input first */
1060 	INP_WLOCK_ASSERT(tp->t_inpcb);
1061 	m->m_pkthdr.pace_thoff = (uint16_t) ((caddr_t)th - mtod(m, caddr_t));
1062 	m->m_pkthdr.pace_tlen = (uint16_t) tlen;
1063 	m->m_pkthdr.pace_drphdrlen = drop_hdrlen;
1064 	m->m_pkthdr.pace_tos = iptos;
1065 	m->m_pkthdr.pace_lock = (uint8_t) ti_locked;
1066 	if (tp->t_in_pkt == NULL) {
1067 		tp->t_in_pkt = m;
1068 		tp->t_tail_pkt = m;
1069 	} else {
1070 		tp->t_tail_pkt->m_nextpkt = m;
1071 		tp->t_tail_pkt = m;
1072 	}
1073 }
1074 
1075 
1076 int32_t
1077 __tcp_queue_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1078     int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked, int32_t line){
1079 	struct tcp_hpts_entry *hpts;
1080 	int32_t ret;
1081 
1082 	tcp_queue_pkt_to_input(tp, m, th, tlen, drop_hdrlen, iptos, ti_locked);
1083 	hpts = tcp_input_lock(tp->t_inpcb);
1084 	ret = __tcp_queue_to_input_locked(tp->t_inpcb, hpts, line);
1085 	mtx_unlock(&hpts->p_mtx);
1086 	return (ret);
1087 }
1088 
1089 void
1090 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1091 {
1092 	struct tcp_hpts_entry *hpts;
1093 	struct tcpcb *tp;
1094 
1095 	tp = intotcpcb(inp);
1096 	hpts = tcp_input_lock(tp->t_inpcb);
1097 	if (inp->inp_in_input == 0) {
1098 		/* Ok we need to set it on the hpts in the current slot */
1099 		hpts_sane_input_insert(hpts, inp, line);
1100 		if (hpts->p_hpts_active == 0) {
1101 			/*
1102 			 * Activate the hpts if it is sleeping.
1103 			 */
1104 			hpts->p_direct_wake = 1;
1105 			tcp_wakeinput(hpts);
1106 		}
1107 	} else if (hpts->p_hpts_active == 0) {
1108 		hpts->p_direct_wake = 1;
1109 		tcp_wakeinput(hpts);
1110 	}
1111 	inp->inp_hpts_drop_reas = reason;
1112 	mtx_unlock(&hpts->p_mtx);
1113 }
1114 
1115 static uint16_t
1116 hpts_random_cpu(struct inpcb *inp){
1117 	/*
1118 	 * No flow type set distribute the load randomly.
1119 	 */
1120 	uint16_t cpuid;
1121 	uint32_t ran;
1122 
1123 	/*
1124 	 * If one has been set use it i.e. we want both in and out on the
1125 	 * same hpts.
1126 	 */
1127 	if (inp->inp_input_cpu_set) {
1128 		return (inp->inp_input_cpu);
1129 	} else if (inp->inp_hpts_cpu_set) {
1130 		return (inp->inp_hpts_cpu);
1131 	}
1132 	/* Nothing set use a random number */
1133 	ran = arc4random();
1134 	cpuid = (ran & 0xffff) % mp_ncpus;
1135 	return (cpuid);
1136 }
1137 
1138 static uint16_t
1139 hpts_cpuid(struct inpcb *inp){
1140 	uint16_t cpuid;
1141 
1142 
1143 	/*
1144 	 * If one has been set use it i.e. we want both in and out on the
1145 	 * same hpts.
1146 	 */
1147 	if (inp->inp_input_cpu_set) {
1148 		return (inp->inp_input_cpu);
1149 	} else if (inp->inp_hpts_cpu_set) {
1150 		return (inp->inp_hpts_cpu);
1151 	}
1152 	/* If one is set the other must be the same */
1153 #ifdef	RSS
1154 	cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1155 	if (cpuid == NETISR_CPUID_NONE)
1156 		return (hpts_random_cpu(inp));
1157 	else
1158 		return (cpuid);
1159 #else
1160 	/*
1161 	 * We don't have a flowid -> cpuid mapping, so cheat and just map
1162 	 * unknown cpuids to curcpu.  Not the best, but apparently better
1163 	 * than defaulting to swi 0.
1164 	 */
1165 	if (inp->inp_flowtype != M_HASHTYPE_NONE) {
1166 		cpuid = inp->inp_flowid % mp_ncpus;
1167 		return (cpuid);
1168 	}
1169 	cpuid = hpts_random_cpu(inp);
1170 	return (cpuid);
1171 #endif
1172 }
1173 
1174 /*
1175  * Do NOT try to optimize the processing of inp's
1176  * by first pulling off all the inp's into a temporary
1177  * list (e.g. TAILQ_CONCAT). If you do that the subtle
1178  * interactions of switching CPU's will kill because of
1179  * problems in the linked list manipulation. Basically
1180  * you would switch cpu's with the hpts mutex locked
1181  * but then while you were processing one of the inp's
1182  * some other one that you switch will get a new
1183  * packet on the different CPU. It will insert it
1184  * on the new hptss input list. Creating a temporary
1185  * link in the inp will not fix it either, since
1186  * the other hpts will be doing the same thing and
1187  * you will both end up using the temporary link.
1188  *
1189  * You will die in an ASSERT for tailq corruption if you
1190  * run INVARIANTS or you will die horribly without
1191  * INVARIANTS in some unknown way with a corrupt linked
1192  * list.
1193  */
1194 static void
1195 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1196 {
1197 	struct mbuf *m, *n;
1198 	struct tcpcb *tp;
1199 	struct inpcb *inp;
1200 	uint16_t drop_reason;
1201 	int16_t set_cpu;
1202 	uint32_t did_prefetch = 0;
1203 	int32_t ti_locked = TI_UNLOCKED;
1204 
1205 	HPTS_MTX_ASSERT(hpts);
1206 	while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1207 		HPTS_MTX_ASSERT(hpts);
1208 		hpts_sane_input_remove(hpts, inp, 0);
1209 		if (inp->inp_input_cpu_set == 0) {
1210 			set_cpu = 1;
1211 		} else {
1212 			set_cpu = 0;
1213 		}
1214 		hpts->p_inp = inp;
1215 		drop_reason = inp->inp_hpts_drop_reas;
1216 		inp->inp_in_input = 0;
1217 		tp = intotcpcb(inp);
1218 		mtx_unlock(&hpts->p_mtx);
1219 		CURVNET_SET(tp->t_vnet);
1220 		if (drop_reason) {
1221 			INP_INFO_RLOCK(&V_tcbinfo);
1222 			ti_locked = TI_RLOCKED;
1223 		} else {
1224 			ti_locked = TI_UNLOCKED;
1225 		}
1226 		INP_WLOCK(inp);
1227 		if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1228 		    (inp->inp_flags2 & INP_FREED)) {
1229 out:
1230 			hpts->p_inp = NULL;
1231 			if (ti_locked == TI_RLOCKED) {
1232 				INP_INFO_RUNLOCK(&V_tcbinfo);
1233 			}
1234 			if (in_pcbrele_wlocked(inp) == 0) {
1235 				INP_WUNLOCK(inp);
1236 			}
1237 			ti_locked = TI_UNLOCKED;
1238 			CURVNET_RESTORE();
1239 			mtx_lock(&hpts->p_mtx);
1240 			continue;
1241 		}
1242 		if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1243 			goto out;
1244 		}
1245 		if (drop_reason) {
1246 			/* This tcb is being destroyed for drop_reason */
1247 			m = tp->t_in_pkt;
1248 			if (m)
1249 				n = m->m_nextpkt;
1250 			else
1251 				n = NULL;
1252 			tp->t_in_pkt = NULL;
1253 			while (m) {
1254 				m_freem(m);
1255 				m = n;
1256 				if (m)
1257 					n = m->m_nextpkt;
1258 			}
1259 			tp = tcp_drop(tp, drop_reason);
1260 			INP_INFO_RUNLOCK(&V_tcbinfo);
1261 			if (tp == NULL) {
1262 				INP_WLOCK(inp);
1263 			}
1264 			if (in_pcbrele_wlocked(inp) == 0)
1265 				INP_WUNLOCK(inp);
1266 			CURVNET_RESTORE();
1267 			mtx_lock(&hpts->p_mtx);
1268 			continue;
1269 		}
1270 		if (set_cpu) {
1271 			/*
1272 			 * Setup so the next time we will move to the right
1273 			 * CPU. This should be a rare event. It will
1274 			 * sometimes happens when we are the client side
1275 			 * (usually not the server). Somehow tcp_output()
1276 			 * gets called before the tcp_do_segment() sets the
1277 			 * intial state. This means the r_cpu and r_hpts_cpu
1278 			 * is 0. We get on the hpts, and then tcp_input()
1279 			 * gets called setting up the r_cpu to the correct
1280 			 * value. The hpts goes off and sees the mis-match.
1281 			 * We simply correct it here and the CPU will switch
1282 			 * to the new hpts nextime the tcb gets added to the
1283 			 * the hpts (not this time) :-)
1284 			 */
1285 			tcp_set_hpts(inp);
1286 		}
1287 		m = tp->t_in_pkt;
1288 		n = NULL;
1289 		if (m != NULL &&
1290 		    (m->m_pkthdr.pace_lock == TI_RLOCKED ||
1291 		    tp->t_state != TCPS_ESTABLISHED)) {
1292 			ti_locked = TI_RLOCKED;
1293 			if (tcp_hptsi_lock_inpinfo(inp, &tp)) {
1294 				CURVNET_RESTORE();
1295 				goto out;
1296 			}
1297 			m = tp->t_in_pkt;
1298 		}
1299 		if (in_newts_every_tcb) {
1300 			if (in_ts_percision)
1301 				microuptime(tv);
1302 			else
1303 				getmicrouptime(tv);
1304 		}
1305 		if (tp->t_fb_ptr != NULL) {
1306 			kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1307 			did_prefetch = 1;
1308 		}
1309 		/* Any input work to do, if so do it first */
1310 		if ((m != NULL) && (m == tp->t_in_pkt)) {
1311 			struct tcphdr *th;
1312 			int32_t tlen, drop_hdrlen, nxt_pkt;
1313 			uint8_t iptos;
1314 
1315 			n = m->m_nextpkt;
1316 			tp->t_in_pkt = tp->t_tail_pkt = NULL;
1317 			while (m) {
1318 				th = (struct tcphdr *)(mtod(m, caddr_t)+m->m_pkthdr.pace_thoff);
1319 				tlen = m->m_pkthdr.pace_tlen;
1320 				drop_hdrlen = m->m_pkthdr.pace_drphdrlen;
1321 				iptos = m->m_pkthdr.pace_tos;
1322 				m->m_nextpkt = NULL;
1323 				if (n)
1324 					nxt_pkt = 1;
1325 				else
1326 					nxt_pkt = 0;
1327 				inp->inp_input_calls = 1;
1328 				if (tp->t_fb->tfb_tcp_hpts_do_segment) {
1329 					/* Use the hpts specific do_segment */
1330 					(*tp->t_fb->tfb_tcp_hpts_do_segment) (m, th, inp->inp_socket,
1331 					    tp, drop_hdrlen,
1332 					    tlen, iptos, ti_locked, nxt_pkt, tv);
1333 				} else {
1334 					/* Use the default do_segment */
1335 					(*tp->t_fb->tfb_tcp_do_segment) (m, th, inp->inp_socket,
1336 					    tp, drop_hdrlen,
1337 					    tlen, iptos, ti_locked);
1338 				}
1339 				/*
1340 				 * Do segment returns unlocked we need the
1341 				 * lock again but we also need some kasserts
1342 				 * here.
1343 				 */
1344 				INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1345 				INP_UNLOCK_ASSERT(inp);
1346 				m = n;
1347 				if (m)
1348 					n = m->m_nextpkt;
1349 				if (m != NULL &&
1350 				    m->m_pkthdr.pace_lock == TI_RLOCKED) {
1351 					INP_INFO_RLOCK(&V_tcbinfo);
1352 					ti_locked = TI_RLOCKED;
1353 				} else
1354 					ti_locked = TI_UNLOCKED;
1355 				INP_WLOCK(inp);
1356 				/*
1357 				 * Since we have an opening here we must
1358 				 * re-check if the tcb went away while we
1359 				 * were getting the lock(s).
1360 				 */
1361 				if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1362 				    (inp->inp_flags2 & INP_FREED)) {
1363 			out_free:
1364 					while (m) {
1365 						m_freem(m);
1366 						m = n;
1367 						if (m)
1368 							n = m->m_nextpkt;
1369 					}
1370 					goto out;
1371 				}
1372 				/*
1373 				 * Now that we hold the INP lock, check if
1374 				 * we need to upgrade our lock.
1375 				 */
1376 				if (ti_locked == TI_UNLOCKED &&
1377 				    (tp->t_state != TCPS_ESTABLISHED)) {
1378 					ti_locked = TI_RLOCKED;
1379 					if (tcp_hptsi_lock_inpinfo(inp, &tp))
1380 						goto out_free;
1381 				}
1382 			}	/** end while(m) */
1383 		}		/** end if ((m != NULL)  && (m == tp->t_in_pkt)) */
1384 		if (in_pcbrele_wlocked(inp) == 0)
1385 			INP_WUNLOCK(inp);
1386 		if (ti_locked == TI_RLOCKED)
1387 			INP_INFO_RUNLOCK(&V_tcbinfo);
1388 		INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1389 		INP_UNLOCK_ASSERT(inp);
1390 		ti_locked = TI_UNLOCKED;
1391 		mtx_lock(&hpts->p_mtx);
1392 		hpts->p_inp = NULL;
1393 		CURVNET_RESTORE();
1394 	}
1395 }
1396 
1397 static int
1398 tcp_hpts_est_run(struct tcp_hpts_entry *hpts)
1399 {
1400 	int32_t ticks_to_run;
1401 
1402 	if (hpts->p_prevtick && (SEQ_GT(hpts->p_curtick, hpts->p_prevtick))) {
1403 		ticks_to_run = hpts->p_curtick - hpts->p_prevtick;
1404 		if (ticks_to_run >= (NUM_OF_HPTSI_SLOTS - 1)) {
1405 			ticks_to_run = NUM_OF_HPTSI_SLOTS - 2;
1406 		}
1407 	} else {
1408 		if (hpts->p_prevtick == hpts->p_curtick) {
1409 			/* This happens when we get woken up right away */
1410 			return (-1);
1411 		}
1412 		ticks_to_run = 1;
1413 	}
1414 	/* Set in where we will be when we catch up */
1415 	hpts->p_nxt_slot = (hpts->p_cur_slot + ticks_to_run) % NUM_OF_HPTSI_SLOTS;
1416 	if (hpts->p_nxt_slot == hpts->p_cur_slot) {
1417 		panic("Impossible math -- hpts:%p p_nxt_slot:%d p_cur_slot:%d ticks_to_run:%d",
1418 		    hpts, hpts->p_nxt_slot, hpts->p_cur_slot, ticks_to_run);
1419 	}
1420 	return (ticks_to_run);
1421 }
1422 
1423 static void
1424 tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick)
1425 {
1426 	struct tcpcb *tp;
1427 	struct inpcb *inp = NULL, *ninp;
1428 	struct timeval tv;
1429 	int32_t ticks_to_run, i, error, tick_now, interum_tick;
1430 	int32_t paced_cnt = 0;
1431 	int32_t did_prefetch = 0;
1432 	int32_t prefetch_ninp = 0;
1433 	int32_t prefetch_tp = 0;
1434 	uint32_t cts;
1435 	int16_t set_cpu;
1436 
1437 	HPTS_MTX_ASSERT(hpts);
1438 	hpts->p_curtick = tcp_tv_to_hptstick(ctick);
1439 	cts = tcp_tv_to_usectick(ctick);
1440 	memcpy(&tv, ctick, sizeof(struct timeval));
1441 	hpts->p_cur_slot = hpts_tick(hpts, 1);
1442 
1443 	/* Figure out if we had missed ticks */
1444 again:
1445 	HPTS_MTX_ASSERT(hpts);
1446 	ticks_to_run = tcp_hpts_est_run(hpts);
1447 	if (!TAILQ_EMPTY(&hpts->p_input)) {
1448 		tcp_input_data(hpts, &tv);
1449 	}
1450 #ifdef INVARIANTS
1451 	if (TAILQ_EMPTY(&hpts->p_input) &&
1452 	    (hpts->p_on_inqueue_cnt != 0)) {
1453 		panic("tp:%p in_hpts input empty but cnt:%d",
1454 		    hpts, hpts->p_on_inqueue_cnt);
1455 	}
1456 #endif
1457 	HPTS_MTX_ASSERT(hpts);
1458 	/* Reset the ticks to run and time if we need too */
1459 	interum_tick = tcp_gethptstick(&tv);
1460 	if (interum_tick != hpts->p_curtick) {
1461 		/* Save off the new time we execute to */
1462 		*ctick = tv;
1463 		hpts->p_curtick = interum_tick;
1464 		cts = tcp_tv_to_usectick(&tv);
1465 		hpts->p_cur_slot = hpts_tick(hpts, 1);
1466 		ticks_to_run = tcp_hpts_est_run(hpts);
1467 	}
1468 	if (ticks_to_run == -1) {
1469 		goto no_run;
1470 	}
1471 	if (logging_on) {
1472 		tcp_hpts_log_it(hpts, inp, HPTSLOG_SETTORUN, ticks_to_run, 0);
1473 	}
1474 	if (hpts->p_on_queue_cnt == 0) {
1475 		goto no_one;
1476 	}
1477 	HPTS_MTX_ASSERT(hpts);
1478 	for (i = 0; i < ticks_to_run; i++) {
1479 		/*
1480 		 * Calculate our delay, if there are no extra ticks there
1481 		 * was not any
1482 		 */
1483 		hpts->p_delayed_by = (ticks_to_run - (i + 1)) * HPTS_TICKS_PER_USEC;
1484 		HPTS_MTX_ASSERT(hpts);
1485 		while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1486 			/* For debugging */
1487 			if (logging_on) {
1488 				tcp_hpts_log_it(hpts, inp, HPTSLOG_HPTSI, ticks_to_run, i);
1489 			}
1490 			hpts->p_inp = inp;
1491 			paced_cnt++;
1492 			if (hpts->p_cur_slot != inp->inp_hptsslot) {
1493 				panic("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1494 				    hpts, inp, hpts->p_cur_slot, inp->inp_hptsslot);
1495 			}
1496 			/* Now pull it */
1497 			if (inp->inp_hpts_cpu_set == 0) {
1498 				set_cpu = 1;
1499 			} else {
1500 				set_cpu = 0;
1501 			}
1502 			hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_cur_slot], 0);
1503 			if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1504 				/* We prefetch the next inp if possible */
1505 				kern_prefetch(ninp, &prefetch_ninp);
1506 				prefetch_ninp = 1;
1507 			}
1508 			if (inp->inp_hpts_request) {
1509 				/*
1510 				 * This guy is deferred out further in time
1511 				 * then our wheel had on it. Push him back
1512 				 * on the wheel.
1513 				 */
1514 				int32_t remaining_slots;
1515 
1516 				remaining_slots = ticks_to_run - (i + 1);
1517 				if (inp->inp_hpts_request > remaining_slots) {
1518 					/*
1519 					 * Keep INVARIANTS happy by clearing
1520 					 * the flag
1521 					 */
1522 					tcp_hpts_insert_locked(hpts, inp, inp->inp_hpts_request, cts, __LINE__, NULL, 1);
1523 					hpts->p_inp = NULL;
1524 					continue;
1525 				}
1526 				inp->inp_hpts_request = 0;
1527 			}
1528 			/*
1529 			 * We clear the hpts flag here after dealing with
1530 			 * remaining slots. This way anyone looking with the
1531 			 * TCB lock will see its on the hpts until just
1532 			 * before we unlock.
1533 			 */
1534 			inp->inp_in_hpts = 0;
1535 			mtx_unlock(&hpts->p_mtx);
1536 			INP_WLOCK(inp);
1537 			if (in_pcbrele_wlocked(inp)) {
1538 				mtx_lock(&hpts->p_mtx);
1539 				if (logging_on)
1540 					tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 1);
1541 				hpts->p_inp = NULL;
1542 				continue;
1543 			}
1544 			if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1545 out_now:
1546 #ifdef INVARIANTS
1547 				if (mtx_owned(&hpts->p_mtx)) {
1548 					panic("Hpts:%p owns mtx prior-to lock line:%d",
1549 					    hpts, __LINE__);
1550 				}
1551 #endif
1552 				INP_WUNLOCK(inp);
1553 				mtx_lock(&hpts->p_mtx);
1554 				if (logging_on)
1555 					tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 3);
1556 				hpts->p_inp = NULL;
1557 				continue;
1558 			}
1559 			tp = intotcpcb(inp);
1560 			if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1561 				goto out_now;
1562 			}
1563 			if (set_cpu) {
1564 				/*
1565 				 * Setup so the next time we will move to
1566 				 * the right CPU. This should be a rare
1567 				 * event. It will sometimes happens when we
1568 				 * are the client side (usually not the
1569 				 * server). Somehow tcp_output() gets called
1570 				 * before the tcp_do_segment() sets the
1571 				 * intial state. This means the r_cpu and
1572 				 * r_hpts_cpu is 0. We get on the hpts, and
1573 				 * then tcp_input() gets called setting up
1574 				 * the r_cpu to the correct value. The hpts
1575 				 * goes off and sees the mis-match. We
1576 				 * simply correct it here and the CPU will
1577 				 * switch to the new hpts nextime the tcb
1578 				 * gets added to the the hpts (not this one)
1579 				 * :-)
1580 				 */
1581 				tcp_set_hpts(inp);
1582 			}
1583 			if (out_newts_every_tcb) {
1584 				struct timeval sv;
1585 
1586 				if (out_ts_percision)
1587 					microuptime(&sv);
1588 				else
1589 					getmicrouptime(&sv);
1590 				cts = tcp_tv_to_usectick(&sv);
1591 			}
1592 			CURVNET_SET(tp->t_vnet);
1593 			/*
1594 			 * There is a hole here, we get the refcnt on the
1595 			 * inp so it will still be preserved but to make
1596 			 * sure we can get the INP we need to hold the p_mtx
1597 			 * above while we pull out the tp/inp,  as long as
1598 			 * fini gets the lock first we are assured of having
1599 			 * a sane INP we can lock and test.
1600 			 */
1601 #ifdef INVARIANTS
1602 			if (mtx_owned(&hpts->p_mtx)) {
1603 				panic("Hpts:%p owns mtx before tcp-output:%d",
1604 				    hpts, __LINE__);
1605 			}
1606 #endif
1607 			if (tp->t_fb_ptr != NULL) {
1608 				kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1609 				did_prefetch = 1;
1610 			}
1611 			inp->inp_hpts_calls = 1;
1612 			if (tp->t_fb->tfb_tcp_output_wtime != NULL) {
1613 				error = (*tp->t_fb->tfb_tcp_output_wtime) (tp, &tv);
1614 			} else {
1615 				error = tp->t_fb->tfb_tcp_output(tp);
1616 			}
1617 			if (ninp && ninp->inp_ppcb) {
1618 				/*
1619 				 * If we have a nxt inp, see if we can
1620 				 * prefetch its ppcb. Note this may seem
1621 				 * "risky" since we have no locks (other
1622 				 * than the previous inp) and there no
1623 				 * assurance that ninp was not pulled while
1624 				 * we were processing inp and freed. If this
1625 				 * occured it could mean that either:
1626 				 *
1627 				 * a) Its NULL (which is fine we won't go
1628 				 * here) <or> b) Its valid (which is cool we
1629 				 * will prefetch it) <or> c) The inp got
1630 				 * freed back to the slab which was
1631 				 * reallocated. Then the piece of memory was
1632 				 * re-used and something else (not an
1633 				 * address) is in inp_ppcb. If that occurs
1634 				 * we don't crash, but take a TLB shootdown
1635 				 * performance hit (same as if it was NULL
1636 				 * and we tried to pre-fetch it).
1637 				 *
1638 				 * Considering that the likelyhood of <c> is
1639 				 * quite rare we will take a risk on doing
1640 				 * this. If performance drops after testing
1641 				 * we can always take this out. NB: the
1642 				 * kern_prefetch on amd64 actually has
1643 				 * protection against a bad address now via
1644 				 * the DMAP_() tests. This will prevent the
1645 				 * TLB hit, and instead if <c> occurs just
1646 				 * cause us to load cache with a useless
1647 				 * address (to us).
1648 				 */
1649 				kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1650 				prefetch_tp = 1;
1651 			}
1652 			INP_WUNLOCK(inp);
1653 			INP_UNLOCK_ASSERT(inp);
1654 			CURVNET_RESTORE();
1655 #ifdef INVARIANTS
1656 			if (mtx_owned(&hpts->p_mtx)) {
1657 				panic("Hpts:%p owns mtx prior-to lock line:%d",
1658 				    hpts, __LINE__);
1659 			}
1660 #endif
1661 			mtx_lock(&hpts->p_mtx);
1662 			if (logging_on)
1663 				tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 4);
1664 			hpts->p_inp = NULL;
1665 		}
1666 		HPTS_MTX_ASSERT(hpts);
1667 		hpts->p_inp = NULL;
1668 		hpts->p_cur_slot++;
1669 		if (hpts->p_cur_slot >= NUM_OF_HPTSI_SLOTS) {
1670 			hpts->p_cur_slot = 0;
1671 		}
1672 	}
1673 no_one:
1674 	HPTS_MTX_ASSERT(hpts);
1675 	hpts->p_prevtick = hpts->p_curtick;
1676 	hpts->p_delayed_by = 0;
1677 	/*
1678 	 * Check to see if we took an excess amount of time and need to run
1679 	 * more ticks (if we did not hit eno-bufs).
1680 	 */
1681 	/* Re-run any input that may be there */
1682 	(void)tcp_gethptstick(&tv);
1683 	if (!TAILQ_EMPTY(&hpts->p_input)) {
1684 		tcp_input_data(hpts, &tv);
1685 	}
1686 #ifdef INVARIANTS
1687 	if (TAILQ_EMPTY(&hpts->p_input) &&
1688 	    (hpts->p_on_inqueue_cnt != 0)) {
1689 		panic("tp:%p in_hpts input empty but cnt:%d",
1690 		    hpts, hpts->p_on_inqueue_cnt);
1691 	}
1692 #endif
1693 	tick_now = tcp_gethptstick(&tv);
1694 	if (SEQ_GT(tick_now, hpts->p_prevtick)) {
1695 		struct timeval res;
1696 
1697 		/* Did we really spend a full tick or more in here? */
1698 		timersub(&tv, ctick, &res);
1699 		if (res.tv_sec || (res.tv_usec >= HPTS_TICKS_PER_USEC)) {
1700 			counter_u64_add(hpts_loops, 1);
1701 			if (logging_on) {
1702 				tcp_hpts_log_it(hpts, inp, HPTSLOG_TOLONG, (uint32_t) res.tv_usec, tick_now);
1703 			}
1704 			*ctick = res;
1705 			hpts->p_curtick = tick_now;
1706 			goto again;
1707 		}
1708 	}
1709 no_run:
1710 	{
1711 		uint32_t t = 0, i, fnd = 0;
1712 
1713 		if (hpts->p_on_queue_cnt) {
1714 
1715 
1716 			/*
1717 			 * Find next slot that is occupied and use that to
1718 			 * be the sleep time.
1719 			 */
1720 			for (i = 1, t = hpts->p_nxt_slot; i < NUM_OF_HPTSI_SLOTS; i++) {
1721 				if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1722 					fnd = 1;
1723 					break;
1724 				}
1725 				t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1726 			}
1727 			if (fnd) {
1728 				hpts->p_hpts_sleep_time = i;
1729 			} else {
1730 				counter_u64_add(back_tosleep, 1);
1731 #ifdef INVARIANTS
1732 				panic("Hpts:%p cnt:%d but non found", hpts, hpts->p_on_queue_cnt);
1733 #endif
1734 				hpts->p_on_queue_cnt = 0;
1735 				goto non_found;
1736 			}
1737 			t++;
1738 		} else {
1739 			/* No one on the wheel sleep for all but 2 slots  */
1740 non_found:
1741 			if (hpts_sleep_max == 0)
1742 				hpts_sleep_max = 1;
1743 			hpts->p_hpts_sleep_time = min((NUM_OF_HPTSI_SLOTS - 2), hpts_sleep_max);
1744 			t = 0;
1745 		}
1746 		if (logging_on) {
1747 			tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEPSET, t, (hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC));
1748 		}
1749 	}
1750 }
1751 
1752 void
1753 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1754 {
1755 	struct tcp_hpts_entry *hpts;
1756 
1757 	INP_WLOCK_ASSERT(inp);
1758 	hpts = tcp_hpts_lock(inp);
1759 	if ((inp->inp_in_hpts == 0) &&
1760 	    (inp->inp_hpts_cpu_set == 0)) {
1761 		inp->inp_hpts_cpu = hpts_cpuid(inp);
1762 		inp->inp_hpts_cpu_set = 1;
1763 	}
1764 	mtx_unlock(&hpts->p_mtx);
1765 	hpts = tcp_input_lock(inp);
1766 	if ((inp->inp_input_cpu_set == 0) &&
1767 	    (inp->inp_in_input == 0)) {
1768 		inp->inp_input_cpu = hpts_cpuid(inp);
1769 		inp->inp_input_cpu_set = 1;
1770 	}
1771 	mtx_unlock(&hpts->p_mtx);
1772 }
1773 
1774 uint16_t
1775 tcp_hpts_delayedby(struct inpcb *inp){
1776 	return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1777 }
1778 
1779 static void
1780 tcp_hpts_thread(void *ctx)
1781 {
1782 	struct tcp_hpts_entry *hpts;
1783 	struct timeval tv;
1784 	sbintime_t sb;
1785 
1786 	hpts = (struct tcp_hpts_entry *)ctx;
1787 	mtx_lock(&hpts->p_mtx);
1788 	if (hpts->p_direct_wake) {
1789 		/* Signaled by input */
1790 		if (logging_on)
1791 			tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 1, 1);
1792 		callout_stop(&hpts->co);
1793 	} else {
1794 		/* Timed out */
1795 		if (callout_pending(&hpts->co) ||
1796 		    !callout_active(&hpts->co)) {
1797 			if (logging_on)
1798 				tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 2, 2);
1799 			mtx_unlock(&hpts->p_mtx);
1800 			return;
1801 		}
1802 		callout_deactivate(&hpts->co);
1803 		if (logging_on)
1804 			tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 3, 3);
1805 	}
1806 	hpts->p_hpts_active = 1;
1807 	(void)tcp_gethptstick(&tv);
1808 	tcp_hptsi(hpts, &tv);
1809 	HPTS_MTX_ASSERT(hpts);
1810 	tv.tv_sec = 0;
1811 	tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1812 	if (tcp_min_hptsi_time && (tv.tv_usec < tcp_min_hptsi_time)) {
1813 		tv.tv_usec = tcp_min_hptsi_time;
1814 		hpts->p_on_min_sleep = 1;
1815 	} else {
1816 		/* Clear the min sleep flag */
1817 		hpts->p_on_min_sleep = 0;
1818 	}
1819 	hpts->p_hpts_active = 0;
1820 	sb = tvtosbt(tv);
1821 	if (tcp_hpts_callout_skip_swi == 0) {
1822 		callout_reset_sbt_on(&hpts->co, sb, 0,
1823 		    hpts_timeout_swi, hpts, hpts->p_cpu,
1824 		    (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1825 	} else {
1826 		callout_reset_sbt_on(&hpts->co, sb, 0,
1827 		    hpts_timeout_dir, hpts,
1828 		    hpts->p_cpu,
1829 		    C_PREL(tcp_hpts_precision));
1830 	}
1831 	hpts->p_direct_wake = 0;
1832 	mtx_unlock(&hpts->p_mtx);
1833 }
1834 
1835 #undef	timersub
1836 
1837 static void
1838 tcp_init_hptsi(void *st)
1839 {
1840 	int32_t i, j, error, bound = 0, created = 0;
1841 	size_t sz, asz;
1842 	struct timeval tv;
1843 	sbintime_t sb;
1844 	struct tcp_hpts_entry *hpts;
1845 	char unit[16];
1846 	uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1847 
1848 	tcp_pace.rp_proc = NULL;
1849 	tcp_pace.rp_num_hptss = ncpus;
1850 	hpts_loops = counter_u64_alloc(M_WAITOK);
1851 	back_tosleep = counter_u64_alloc(M_WAITOK);
1852 
1853 	sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1854 	tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1855 	asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1856 	for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1857 		tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1858 		    M_TCPHPTS, M_WAITOK | M_ZERO);
1859 		tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1860 		    M_TCPHPTS, M_WAITOK);
1861 		hpts = tcp_pace.rp_ent[i];
1862 		/*
1863 		 * Init all the hpts structures that are not specifically
1864 		 * zero'd by the allocations. Also lets attach them to the
1865 		 * appropriate sysctl block as well.
1866 		 */
1867 		mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1868 		    "hpts", MTX_DEF | MTX_DUPOK);
1869 		TAILQ_INIT(&hpts->p_input);
1870 		for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1871 			TAILQ_INIT(&hpts->p_hptss[j]);
1872 		}
1873 		sysctl_ctx_init(&hpts->hpts_ctx);
1874 		sprintf(unit, "%d", i);
1875 		hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1876 		    SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1877 		    OID_AUTO,
1878 		    unit,
1879 		    CTLFLAG_RW, 0,
1880 		    "");
1881 		SYSCTL_ADD_INT(&hpts->hpts_ctx,
1882 		    SYSCTL_CHILDREN(hpts->hpts_root),
1883 		    OID_AUTO, "in_qcnt", CTLFLAG_RD,
1884 		    &hpts->p_on_inqueue_cnt, 0,
1885 		    "Count TCB's awaiting input processing");
1886 		SYSCTL_ADD_INT(&hpts->hpts_ctx,
1887 		    SYSCTL_CHILDREN(hpts->hpts_root),
1888 		    OID_AUTO, "out_qcnt", CTLFLAG_RD,
1889 		    &hpts->p_on_queue_cnt, 0,
1890 		    "Count TCB's awaiting output processing");
1891 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1892 		    SYSCTL_CHILDREN(hpts->hpts_root),
1893 		    OID_AUTO, "active", CTLFLAG_RD,
1894 		    &hpts->p_hpts_active, 0,
1895 		    "Is the hpts active");
1896 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1897 		    SYSCTL_CHILDREN(hpts->hpts_root),
1898 		    OID_AUTO, "curslot", CTLFLAG_RD,
1899 		    &hpts->p_cur_slot, 0,
1900 		    "What the current slot is if active");
1901 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1902 		    SYSCTL_CHILDREN(hpts->hpts_root),
1903 		    OID_AUTO, "curtick", CTLFLAG_RD,
1904 		    &hpts->p_curtick, 0,
1905 		    "What the current tick on if active");
1906 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1907 		    SYSCTL_CHILDREN(hpts->hpts_root),
1908 		    OID_AUTO, "logsize", CTLFLAG_RD,
1909 		    &hpts->p_logsize, 0,
1910 		    "Hpts logging buffer size");
1911 		hpts->p_hpts_sleep_time = NUM_OF_HPTSI_SLOTS - 2;
1912 		hpts->p_num = i;
1913 		hpts->p_prevtick = hpts->p_curtick = tcp_gethptstick(&tv);
1914 		hpts->p_prevtick -= 1;
1915 		hpts->p_prevtick %= NUM_OF_HPTSI_SLOTS;
1916 		hpts->p_cpu = 0xffff;
1917 		hpts->p_nxt_slot = 1;
1918 		hpts->p_logsize = tcp_hpts_logging_size;
1919 		if (hpts->p_logsize) {
1920 			sz = (sizeof(struct hpts_log) * hpts->p_logsize);
1921 			hpts->p_log = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1922 		}
1923 		callout_init(&hpts->co, 1);
1924 	}
1925 	/*
1926 	 * Now lets start ithreads to handle the hptss.
1927 	 */
1928 	CPU_FOREACH(i) {
1929 		hpts = tcp_pace.rp_ent[i];
1930 		hpts->p_cpu = i;
1931 		error = swi_add(&hpts->ie, "hpts",
1932 		    tcp_hpts_thread, (void *)hpts,
1933 		    SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1934 		if (error) {
1935 			panic("Can't add hpts:%p i:%d err:%d",
1936 			    hpts, i, error);
1937 		}
1938 		created++;
1939 		if (tcp_bind_threads) {
1940 			if (intr_event_bind(hpts->ie, i) == 0)
1941 				bound++;
1942 		}
1943 		tv.tv_sec = 0;
1944 		tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1945 		sb = tvtosbt(tv);
1946 		if (tcp_hpts_callout_skip_swi == 0) {
1947 			callout_reset_sbt_on(&hpts->co, sb, 0,
1948 			    hpts_timeout_swi, hpts, hpts->p_cpu,
1949 			    (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1950 		} else {
1951 			callout_reset_sbt_on(&hpts->co, sb, 0,
1952 			    hpts_timeout_dir, hpts,
1953 			    hpts->p_cpu,
1954 			    C_PREL(tcp_hpts_precision));
1955 		}
1956 	}
1957 	printf("TCP Hpts created %d swi interrupt thread and bound %d\n",
1958 	    created, bound);
1959 	return;
1960 }
1961 
1962 SYSINIT(tcphptsi, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1963 MODULE_VERSION(tcphpts, 1);
1964