xref: /freebsd/sys/netinet/tcp_hpts.c (revision ecbde90073aef9166e77cecf61bd80c15a707a53)
1 /*-
2  * Copyright (c) 2016-2018 Netflix Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_ipsec.h"
32 #include "opt_tcpdebug.h"
33 /**
34  * Some notes about usage.
35  *
36  * The tcp_hpts system is designed to provide a high precision timer
37  * system for tcp. Its main purpose is to provide a mechanism for
38  * pacing packets out onto the wire. It can be used in two ways
39  * by a given TCP stack (and those two methods can be used simultaneously).
40  *
41  * First, and probably the main thing its used by Rack and BBR for, it can
42  * be used to call tcp_output() of a transport stack at some time in the future.
43  * The normal way this is done is that tcp_output() of the stack schedules
44  * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45  * slot is the time from now that the stack wants to be called but it
46  * must be converted to tcp_hpts's notion of slot. This is done with
47  * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48  * call from the tcp_output() routine might look like:
49  *
50  * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
51  *
52  * The above would schedule tcp_ouput() to be called in 550 useconds.
53  * Note that if using this mechanism the stack will want to add near
54  * its top a check to prevent unwanted calls (from user land or the
55  * arrival of incoming ack's). So it would add something like:
56  *
57  * if (inp->inp_in_hpts)
58  *    return;
59  *
60  * to prevent output processing until the time alotted has gone by.
61  * Of course this is a bare bones example and the stack will probably
62  * have more consideration then just the above.
63  *
64  * Now the tcp_hpts system will call tcp_output in one of two forms,
65  * it will first check to see if the stack as defined a
66  * tfb_tcp_output_wtime() function, if so that is the routine it
67  * will call, if that function is not defined then it will call the
68  * tfb_tcp_output() function. The only difference between these
69  * two calls is that the former passes the time in to the function
70  * so the function does not have to access the time (which tcp_hpts
71  * already has). What these functions do is of course totally up
72  * to the individual tcp stack.
73  *
74  * Now the second function (actually two functions I guess :D)
75  * the tcp_hpts system provides is the  ability to either abort
76  * a connection (later) or process  input on a connection.
77  * Why would you want to do this? To keep processor locality.
78  *
79  * So in order to use the input redirection function the
80  * stack changes its tcp_do_segment() routine to instead
81  * of process the data call the function:
82  *
83  * tcp_queue_pkt_to_input()
84  *
85  * You will note that the arguments to this function look
86  * a lot like tcp_do_segments's arguments. This function
87  * will assure that the tcp_hpts system will
88  * call the functions tfb_tcp_hpts_do_segment() from the
89  * correct CPU. Note that multiple calls can get pushed
90  * into the tcp_hpts system this will be indicated by
91  * the next to last argument to tfb_tcp_hpts_do_segment()
92  * (nxt_pkt). If nxt_pkt is a 1 then another packet is
93  * coming. If nxt_pkt is a 0 then this is the last call
94  * that the tcp_hpts system has available for the tcp stack.
95  *
96  * The other point of the input system is to be able to safely
97  * drop a tcp connection without worrying about the recursive
98  * locking that may be occuring on the INP_WLOCK. So if
99  * a stack wants to drop a connection it calls:
100  *
101  *     tcp_set_inp_to_drop(tp, ETIMEDOUT)
102  *
103  * To schedule the tcp_hpts system to call
104  *
105  *    tcp_drop(tp, drop_reason)
106  *
107  * at a future point. This is quite handy to prevent locking
108  * issues when dropping connections.
109  *
110  */
111 
112 #include <sys/param.h>
113 #include <sys/bus.h>
114 #include <sys/interrupt.h>
115 #include <sys/module.h>
116 #include <sys/kernel.h>
117 #include <sys/hhook.h>
118 #include <sys/malloc.h>
119 #include <sys/mbuf.h>
120 #include <sys/proc.h>		/* for proc0 declaration */
121 #include <sys/socket.h>
122 #include <sys/socketvar.h>
123 #include <sys/sysctl.h>
124 #include <sys/systm.h>
125 #include <sys/refcount.h>
126 #include <sys/sched.h>
127 #include <sys/queue.h>
128 #include <sys/smp.h>
129 #include <sys/counter.h>
130 #include <sys/time.h>
131 #include <sys/kthread.h>
132 #include <sys/kern_prefetch.h>
133 
134 #include <vm/uma.h>
135 
136 #include <net/route.h>
137 #include <net/vnet.h>
138 
139 #define TCPSTATES		/* for logging */
140 
141 #include <netinet/in.h>
142 #include <netinet/in_kdtrace.h>
143 #include <netinet/in_pcb.h>
144 #include <netinet/ip.h>
145 #include <netinet/ip_icmp.h>	/* required for icmp_var.h */
146 #include <netinet/icmp_var.h>	/* for ICMP_BANDLIM */
147 #include <netinet/ip_var.h>
148 #include <netinet/ip6.h>
149 #include <netinet6/in6_pcb.h>
150 #include <netinet6/ip6_var.h>
151 #define	TCPOUTFLAGS
152 #include <netinet/tcp.h>
153 #include <netinet/tcp_fsm.h>
154 #include <netinet/tcp_seq.h>
155 #include <netinet/tcp_timer.h>
156 #include <netinet/tcp_var.h>
157 #include <netinet/tcpip.h>
158 #include <netinet/cc/cc.h>
159 #include <netinet/tcp_hpts.h>
160 
161 #ifdef tcpdebug
162 #include <netinet/tcp_debug.h>
163 #endif				/* tcpdebug */
164 #ifdef tcp_offload
165 #include <netinet/tcp_offload.h>
166 #endif
167 
168 #ifdef ipsec
169 #include <netipsec/ipsec.h>
170 #include <netipsec/ipsec6.h>
171 #endif				/* ipsec */
172 #include "opt_rss.h"
173 
174 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
175 #ifdef RSS
176 static int tcp_bind_threads = 1;
177 #else
178 static int tcp_bind_threads = 0;
179 #endif
180 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
181 
182 static uint32_t tcp_hpts_logging_size = DEFAULT_HPTS_LOG;
183 
184 TUNABLE_INT("net.inet.tcp.hpts_logging_sz", &tcp_hpts_logging_size);
185 
186 static struct tcp_hptsi tcp_pace;
187 
188 static int
189 tcp_hptsi_lock_inpinfo(struct inpcb *inp,
190     struct tcpcb **tp);
191 static void tcp_wakehpts(struct tcp_hpts_entry *p);
192 static void tcp_wakeinput(struct tcp_hpts_entry *p);
193 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
194 static void tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick);
195 static void tcp_hpts_thread(void *ctx);
196 static void tcp_init_hptsi(void *st);
197 
198 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
199 static int32_t tcp_hpts_callout_skip_swi = 0;
200 
201 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW, 0, "TCP Hpts controls");
202 
203 #define	timersub(tvp, uvp, vvp)						\
204 	do {								\
205 		(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;		\
206 		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;	\
207 		if ((vvp)->tv_usec < 0) {				\
208 			(vvp)->tv_sec--;				\
209 			(vvp)->tv_usec += 1000000;			\
210 		}							\
211 	} while (0)
212 
213 static int32_t logging_on = 0;
214 static int32_t hpts_sleep_max = (NUM_OF_HPTSI_SLOTS - 2);
215 static int32_t tcp_hpts_precision = 120;
216 
217 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
218     &tcp_hpts_precision, 120,
219     "Value for PRE() precision of callout");
220 
221 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
222     &logging_on, 0,
223     "Turn on logging if compiled in");
224 
225 counter_u64_t hpts_loops;
226 
227 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD,
228     &hpts_loops, "Number of times hpts had to loop to catch up");
229 
230 counter_u64_t back_tosleep;
231 
232 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
233     &back_tosleep, "Number of times hpts found no tcbs");
234 
235 static int32_t in_newts_every_tcb = 0;
236 
237 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tsperpcb, CTLFLAG_RW,
238     &in_newts_every_tcb, 0,
239     "Do we have a new cts every tcb we process for input");
240 static int32_t in_ts_percision = 0;
241 
242 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tspercision, CTLFLAG_RW,
243     &in_ts_percision, 0,
244     "Do we use percise timestamp for clients on input");
245 static int32_t out_newts_every_tcb = 0;
246 
247 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tsperpcb, CTLFLAG_RW,
248     &out_newts_every_tcb, 0,
249     "Do we have a new cts every tcb we process for output");
250 static int32_t out_ts_percision = 0;
251 
252 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW,
253     &out_ts_percision, 0,
254     "Do we use a percise timestamp for every output cts");
255 
256 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLFLAG_RW,
257     &hpts_sleep_max, 0,
258     "The maximum time the hpts will sleep <1 - 254>");
259 
260 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW,
261     &tcp_min_hptsi_time, 0,
262     "The minimum time the hpts must sleep before processing more slots");
263 
264 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW,
265     &tcp_hpts_callout_skip_swi, 0,
266     "Do we have the callout call directly to the hpts?");
267 
268 static void
269 __tcp_hpts_log_it(struct tcp_hpts_entry *hpts, struct inpcb *inp, int event, uint32_t slot,
270     uint32_t ticknow, int32_t line)
271 {
272 	struct hpts_log *pl;
273 
274 	HPTS_MTX_ASSERT(hpts);
275 	if (hpts->p_log == NULL)
276 		return;
277 	pl = &hpts->p_log[hpts->p_log_at];
278 	hpts->p_log_at++;
279 	if (hpts->p_log_at >= hpts->p_logsize) {
280 		hpts->p_log_at = 0;
281 		hpts->p_log_wrapped = 1;
282 	}
283 	pl->inp = inp;
284 	if (inp) {
285 		pl->t_paceslot = inp->inp_hptsslot;
286 		pl->t_hptsreq = inp->inp_hpts_request;
287 		pl->p_onhpts = inp->inp_in_hpts;
288 		pl->p_oninput = inp->inp_in_input;
289 	} else {
290 		pl->t_paceslot = 0;
291 		pl->t_hptsreq = 0;
292 		pl->p_onhpts = 0;
293 		pl->p_oninput = 0;
294 	}
295 	pl->is_notempty = 1;
296 	pl->event = event;
297 	pl->line = line;
298 	pl->cts = tcp_get_usecs(NULL);
299 	pl->p_curtick = hpts->p_curtick;
300 	pl->p_prevtick = hpts->p_prevtick;
301 	pl->p_on_queue_cnt = hpts->p_on_queue_cnt;
302 	pl->ticknow = ticknow;
303 	pl->slot_req = slot;
304 	pl->p_nxt_slot = hpts->p_nxt_slot;
305 	pl->p_cur_slot = hpts->p_cur_slot;
306 	pl->p_hpts_sleep_time = hpts->p_hpts_sleep_time;
307 	pl->p_flags = (hpts->p_cpu & 0x7f);
308 	pl->p_flags <<= 7;
309 	pl->p_flags |= (hpts->p_num & 0x7f);
310 	pl->p_flags <<= 2;
311 	if (hpts->p_hpts_active) {
312 		pl->p_flags |= HPTS_HPTS_ACTIVE;
313 	}
314 }
315 
316 #define tcp_hpts_log_it(a, b, c, d, e) __tcp_hpts_log_it(a, b, c, d, e, __LINE__)
317 
318 static void
319 hpts_timeout_swi(void *arg)
320 {
321 	struct tcp_hpts_entry *hpts;
322 
323 	hpts = (struct tcp_hpts_entry *)arg;
324 	swi_sched(hpts->ie_cookie, 0);
325 }
326 
327 static void
328 hpts_timeout_dir(void *arg)
329 {
330 	tcp_hpts_thread(arg);
331 }
332 
333 static inline void
334 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
335 {
336 #ifdef INVARIANTS
337 	if (mtx_owned(&hpts->p_mtx) == 0) {
338 		/* We don't own the mutex? */
339 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
340 	}
341 	if (hpts->p_cpu != inp->inp_hpts_cpu) {
342 		/* It is not the right cpu/mutex? */
343 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
344 	}
345 	if (inp->inp_in_hpts == 0) {
346 		/* We are not on the hpts? */
347 		panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp);
348 	}
349 	if (TAILQ_EMPTY(head) &&
350 	    (hpts->p_on_queue_cnt != 0)) {
351 		/* We should not be empty with a queue count */
352 		panic("%s hpts:%p hpts bucket empty but cnt:%d",
353 		    __FUNCTION__, hpts, hpts->p_on_queue_cnt);
354 	}
355 #endif
356 	TAILQ_REMOVE(head, inp, inp_hpts);
357 	hpts->p_on_queue_cnt--;
358 	if (hpts->p_on_queue_cnt < 0) {
359 		/* Count should not go negative .. */
360 #ifdef INVARIANTS
361 		panic("Hpts goes negative inp:%p hpts:%p",
362 		    inp, hpts);
363 #endif
364 		hpts->p_on_queue_cnt = 0;
365 	}
366 	if (clear) {
367 		inp->inp_hpts_request = 0;
368 		inp->inp_in_hpts = 0;
369 	}
370 }
371 
372 static inline void
373 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
374 {
375 #ifdef INVARIANTS
376 	if (mtx_owned(&hpts->p_mtx) == 0) {
377 		/* We don't own the mutex? */
378 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
379 	}
380 	if (hpts->p_cpu != inp->inp_hpts_cpu) {
381 		/* It is not the right cpu/mutex? */
382 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
383 	}
384 	if ((noref == 0) && (inp->inp_in_hpts == 1)) {
385 		/* We are already on the hpts? */
386 		panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp);
387 	}
388 #endif
389 	TAILQ_INSERT_TAIL(head, inp, inp_hpts);
390 	inp->inp_in_hpts = 1;
391 	hpts->p_on_queue_cnt++;
392 	if (noref == 0) {
393 		in_pcbref(inp);
394 	}
395 }
396 
397 static inline void
398 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
399 {
400 #ifdef INVARIANTS
401 	if (mtx_owned(&hpts->p_mtx) == 0) {
402 		/* We don't own the mutex? */
403 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
404 	}
405 	if (hpts->p_cpu != inp->inp_input_cpu) {
406 		/* It is not the right cpu/mutex? */
407 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
408 	}
409 	if (inp->inp_in_input == 0) {
410 		/* We are not on the input hpts? */
411 		panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp);
412 	}
413 #endif
414 	TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
415 	hpts->p_on_inqueue_cnt--;
416 	if (hpts->p_on_inqueue_cnt < 0) {
417 #ifdef INVARIANTS
418 		panic("Hpts in goes negative inp:%p hpts:%p",
419 		    inp, hpts);
420 #endif
421 		hpts->p_on_inqueue_cnt = 0;
422 	}
423 #ifdef INVARIANTS
424 	if (TAILQ_EMPTY(&hpts->p_input) &&
425 	    (hpts->p_on_inqueue_cnt != 0)) {
426 		/* We should not be empty with a queue count */
427 		panic("%s hpts:%p in_hpts input empty but cnt:%d",
428 		    __FUNCTION__, hpts, hpts->p_on_inqueue_cnt);
429 	}
430 #endif
431 	if (clear)
432 		inp->inp_in_input = 0;
433 }
434 
435 static inline void
436 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
437 {
438 #ifdef INVARIANTS
439 	if (mtx_owned(&hpts->p_mtx) == 0) {
440 		/* We don't own the mutex? */
441 		panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
442 	}
443 	if (hpts->p_cpu != inp->inp_input_cpu) {
444 		/* It is not the right cpu/mutex? */
445 		panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
446 	}
447 	if (inp->inp_in_input == 1) {
448 		/* We are already on the input hpts? */
449 		panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp);
450 	}
451 #endif
452 	TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
453 	inp->inp_in_input = 1;
454 	hpts->p_on_inqueue_cnt++;
455 	in_pcbref(inp);
456 }
457 
458 static int
459 sysctl_tcp_hpts_log(SYSCTL_HANDLER_ARGS)
460 {
461 	struct tcp_hpts_entry *hpts;
462 	size_t sz;
463 	int32_t logging_was, i;
464 	int32_t error = 0;
465 
466 	/*
467 	 * HACK: Turn off logging so no locks are required this really needs
468 	 * a memory barrier :)
469 	 */
470 	logging_was = logging_on;
471 	logging_on = 0;
472 	if (!req->oldptr) {
473 		/* How much? */
474 		sz = 0;
475 		for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
476 			hpts = tcp_pace.rp_ent[i];
477 			if (hpts->p_log == NULL)
478 				continue;
479 			sz += (sizeof(struct hpts_log) * hpts->p_logsize);
480 		}
481 		error = SYSCTL_OUT(req, 0, sz);
482 	} else {
483 		for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
484 			hpts = tcp_pace.rp_ent[i];
485 			if (hpts->p_log == NULL)
486 				continue;
487 			if (hpts->p_log_wrapped)
488 				sz = (sizeof(struct hpts_log) * hpts->p_logsize);
489 			else
490 				sz = (sizeof(struct hpts_log) * hpts->p_log_at);
491 			error = SYSCTL_OUT(req, hpts->p_log, sz);
492 		}
493 	}
494 	logging_on = logging_was;
495 	return error;
496 }
497 
498 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, log, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
499     0, 0, sysctl_tcp_hpts_log, "A", "tcp hptsi log");
500 
501 
502 /*
503  * Try to get the INP_INFO lock.
504  *
505  * This function always succeeds in getting the lock. It will clear
506  * *tpp and return (1) if something critical changed while the inpcb
507  * was unlocked. Otherwise, it will leave *tpp unchanged and return (0).
508  *
509  * This function relies on the fact that the hpts always holds a
510  * reference on the inpcb while the segment is on the hptsi wheel and
511  * in the input queue.
512  *
513  */
514 static int
515 tcp_hptsi_lock_inpinfo(struct inpcb *inp, struct tcpcb **tpp)
516 {
517 	struct tcp_function_block *tfb;
518 	struct tcpcb *tp;
519 	void *ptr;
520 
521 	/* Try the easy way. */
522 	if (INP_INFO_TRY_RLOCK(&V_tcbinfo))
523 		return (0);
524 
525 	/*
526 	 * OK, let's try the hard way. We'll save the function pointer block
527 	 * to make sure that doesn't change while we aren't holding the
528 	 * lock.
529 	 */
530 	tp = *tpp;
531 	tfb = tp->t_fb;
532 	ptr = tp->t_fb_ptr;
533 	INP_WUNLOCK(inp);
534 	INP_INFO_RLOCK(&V_tcbinfo);
535 	INP_WLOCK(inp);
536 	/* If the session went away, return an error. */
537 	if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
538 	    (inp->inp_flags2 & INP_FREED)) {
539 		*tpp = NULL;
540 		return (1);
541 	}
542 	/*
543 	 * If the function block or stack-specific data block changed,
544 	 * report an error.
545 	 */
546 	tp = intotcpcb(inp);
547 	if ((tp->t_fb != tfb) && (tp->t_fb_ptr != ptr)) {
548 		*tpp = NULL;
549 		return (1);
550 	}
551 	return (0);
552 }
553 
554 
555 static void
556 tcp_wakehpts(struct tcp_hpts_entry *hpts)
557 {
558 	HPTS_MTX_ASSERT(hpts);
559 	swi_sched(hpts->ie_cookie, 0);
560 	if (hpts->p_hpts_active == 2) {
561 		/* Rare sleeping on a ENOBUF */
562 		wakeup_one(hpts);
563 	}
564 }
565 
566 static void
567 tcp_wakeinput(struct tcp_hpts_entry *hpts)
568 {
569 	HPTS_MTX_ASSERT(hpts);
570 	swi_sched(hpts->ie_cookie, 0);
571 	if (hpts->p_hpts_active == 2) {
572 		/* Rare sleeping on a ENOBUF */
573 		wakeup_one(hpts);
574 	}
575 }
576 
577 struct tcp_hpts_entry *
578 tcp_cur_hpts(struct inpcb *inp)
579 {
580 	int32_t hpts_num;
581 	struct tcp_hpts_entry *hpts;
582 
583 	hpts_num = inp->inp_hpts_cpu;
584 	hpts = tcp_pace.rp_ent[hpts_num];
585 	return (hpts);
586 }
587 
588 struct tcp_hpts_entry *
589 tcp_hpts_lock(struct inpcb *inp)
590 {
591 	struct tcp_hpts_entry *hpts;
592 	int32_t hpts_num;
593 
594 again:
595 	hpts_num = inp->inp_hpts_cpu;
596 	hpts = tcp_pace.rp_ent[hpts_num];
597 #ifdef INVARIANTS
598 	if (mtx_owned(&hpts->p_mtx)) {
599 		panic("Hpts:%p owns mtx prior-to lock line:%d",
600 		    hpts, __LINE__);
601 	}
602 #endif
603 	mtx_lock(&hpts->p_mtx);
604 	if (hpts_num != inp->inp_hpts_cpu) {
605 		mtx_unlock(&hpts->p_mtx);
606 		goto again;
607 	}
608 	return (hpts);
609 }
610 
611 struct tcp_hpts_entry *
612 tcp_input_lock(struct inpcb *inp)
613 {
614 	struct tcp_hpts_entry *hpts;
615 	int32_t hpts_num;
616 
617 again:
618 	hpts_num = inp->inp_input_cpu;
619 	hpts = tcp_pace.rp_ent[hpts_num];
620 #ifdef INVARIANTS
621 	if (mtx_owned(&hpts->p_mtx)) {
622 		panic("Hpts:%p owns mtx prior-to lock line:%d",
623 		    hpts, __LINE__);
624 	}
625 #endif
626 	mtx_lock(&hpts->p_mtx);
627 	if (hpts_num != inp->inp_input_cpu) {
628 		mtx_unlock(&hpts->p_mtx);
629 		goto again;
630 	}
631 	return (hpts);
632 }
633 
634 static void
635 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
636 {
637 	int32_t add_freed;
638 
639 	if (inp->inp_flags2 & INP_FREED) {
640 		/*
641 		 * Need to play a special trick so that in_pcbrele_wlocked
642 		 * does not return 1 when it really should have returned 0.
643 		 */
644 		add_freed = 1;
645 		inp->inp_flags2 &= ~INP_FREED;
646 	} else {
647 		add_freed = 0;
648 	}
649 #ifndef INP_REF_DEBUG
650 	if (in_pcbrele_wlocked(inp)) {
651 		/*
652 		 * This should not happen. We have the inpcb referred to by
653 		 * the main socket (why we are called) and the hpts. It
654 		 * should always return 0.
655 		 */
656 		panic("inpcb:%p release ret 1",
657 		    inp);
658 	}
659 #else
660 	if (__in_pcbrele_wlocked(inp, line)) {
661 		/*
662 		 * This should not happen. We have the inpcb referred to by
663 		 * the main socket (why we are called) and the hpts. It
664 		 * should always return 0.
665 		 */
666 		panic("inpcb:%p release ret 1",
667 		    inp);
668 	}
669 #endif
670 	if (add_freed) {
671 		inp->inp_flags2 |= INP_FREED;
672 	}
673 }
674 
675 static void
676 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
677 {
678 	if (inp->inp_in_hpts) {
679 		hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
680 		tcp_remove_hpts_ref(inp, hpts, line);
681 	}
682 }
683 
684 static void
685 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
686 {
687 	HPTS_MTX_ASSERT(hpts);
688 	if (inp->inp_in_input) {
689 		hpts_sane_input_remove(hpts, inp, 1);
690 		tcp_remove_hpts_ref(inp, hpts, line);
691 	}
692 }
693 
694 /*
695  * Called normally with the INP_LOCKED but it
696  * does not matter, the hpts lock is the key
697  * but the lock order allows us to hold the
698  * INP lock and then get the hpts lock.
699  *
700  * Valid values in the flags are
701  * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
702  * HPTS_REMOVE_INPUT - remove from the input of the hpts.
703  * Note that you can or both values together and get two
704  * actions.
705  */
706 void
707 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
708 {
709 	struct tcp_hpts_entry *hpts;
710 
711 	INP_WLOCK_ASSERT(inp);
712 	if (flags & HPTS_REMOVE_OUTPUT) {
713 		hpts = tcp_hpts_lock(inp);
714 		tcp_hpts_remove_locked_output(hpts, inp, flags, line);
715 		mtx_unlock(&hpts->p_mtx);
716 	}
717 	if (flags & HPTS_REMOVE_INPUT) {
718 		hpts = tcp_input_lock(inp);
719 		tcp_hpts_remove_locked_input(hpts, inp, flags, line);
720 		mtx_unlock(&hpts->p_mtx);
721 	}
722 }
723 
724 static inline int
725 hpts_tick(struct tcp_hpts_entry *hpts, int32_t plus)
726 {
727 	return ((hpts->p_prevtick + plus) % NUM_OF_HPTSI_SLOTS);
728 }
729 
730 static int
731 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
732 {
733 	int32_t need_wake = 0;
734 	uint32_t ticknow = 0;
735 
736 	HPTS_MTX_ASSERT(hpts);
737 	if (inp->inp_in_hpts == 0) {
738 		/* Ok we need to set it on the hpts in the current slot */
739 		if (hpts->p_hpts_active == 0) {
740 			/* A sleeping hpts we want in next slot to run */
741 			if (logging_on) {
742 				tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, 0,
743 				    hpts_tick(hpts, 1));
744 			}
745 			inp->inp_hptsslot = hpts_tick(hpts, 1);
746 			inp->inp_hpts_request = 0;
747 			if (logging_on) {
748 				tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEP_BEFORE, 1, ticknow);
749 			}
750 			need_wake = 1;
751 		} else if ((void *)inp == hpts->p_inp) {
752 			/*
753 			 * We can't allow you to go into the same slot we
754 			 * are in. We must put you out.
755 			 */
756 			inp->inp_hptsslot = hpts->p_nxt_slot;
757 		} else
758 			inp->inp_hptsslot = hpts->p_cur_slot;
759 		hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
760 		inp->inp_hpts_request = 0;
761 		if (logging_on) {
762 			tcp_hpts_log_it(hpts, inp, HPTSLOG_IMMEDIATE, 0, 0);
763 		}
764 		if (need_wake) {
765 			/*
766 			 * Activate the hpts if it is sleeping and its
767 			 * timeout is not 1.
768 			 */
769 			if (logging_on) {
770 				tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_HPTS, 0, ticknow);
771 			}
772 			hpts->p_direct_wake = 1;
773 			tcp_wakehpts(hpts);
774 		}
775 	}
776 	return (need_wake);
777 }
778 
779 int
780 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
781 {
782 	int32_t ret;
783 	struct tcp_hpts_entry *hpts;
784 
785 	INP_WLOCK_ASSERT(inp);
786 	hpts = tcp_hpts_lock(inp);
787 	ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
788 	mtx_unlock(&hpts->p_mtx);
789 	return (ret);
790 }
791 
792 static void
793 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, uint32_t cts, int32_t line,
794     struct hpts_diag *diag, int32_t noref)
795 {
796 	int32_t need_new_to = 0;
797 	int32_t need_wakeup = 0;
798 	uint32_t largest_slot;
799 	uint32_t ticknow = 0;
800 	uint32_t slot_calc;
801 
802 	HPTS_MTX_ASSERT(hpts);
803 	if (diag) {
804 		memset(diag, 0, sizeof(struct hpts_diag));
805 		diag->p_hpts_active = hpts->p_hpts_active;
806 		diag->p_nxt_slot = hpts->p_nxt_slot;
807 		diag->p_cur_slot = hpts->p_cur_slot;
808 		diag->slot_req = slot;
809 	}
810 	if ((inp->inp_in_hpts == 0) || noref) {
811 		inp->inp_hpts_request = slot;
812 		if (slot == 0) {
813 			/* Immediate */
814 			tcp_queue_to_hpts_immediate_locked(inp, hpts, line, noref);
815 			return;
816 		}
817 		if (hpts->p_hpts_active) {
818 			/*
819 			 * Its slot - 1 since nxt_slot is the next tick that
820 			 * will go off since the hpts is awake
821 			 */
822 			if (logging_on) {
823 				tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_NORMAL, slot, 0);
824 			}
825 			/*
826 			 * We want to make sure that we don't place a inp in
827 			 * the range of p_cur_slot <-> p_nxt_slot. If we
828 			 * take from p_nxt_slot to the end, plus p_cur_slot
829 			 * and then take away 2, we will know how many is
830 			 * the max slots we can use.
831 			 */
832 			if (hpts->p_nxt_slot > hpts->p_cur_slot) {
833 				/*
834 				 * Non-wrap case nxt_slot <-> cur_slot we
835 				 * don't want to land in. So the diff gives
836 				 * us what is taken away from the number of
837 				 * slots.
838 				 */
839 				largest_slot = NUM_OF_HPTSI_SLOTS - (hpts->p_nxt_slot - hpts->p_cur_slot);
840 			} else if (hpts->p_nxt_slot == hpts->p_cur_slot) {
841 				largest_slot = NUM_OF_HPTSI_SLOTS - 2;
842 			} else {
843 				/*
844 				 * Wrap case so the diff gives us the number
845 				 * of slots that we can land in.
846 				 */
847 				largest_slot = hpts->p_cur_slot - hpts->p_nxt_slot;
848 			}
849 			/*
850 			 * We take away two so we never have a problem (20
851 			 * usec's) out of 1024000 usecs
852 			 */
853 			largest_slot -= 2;
854 			if (inp->inp_hpts_request > largest_slot) {
855 				/*
856 				 * Restrict max jump of slots and remember
857 				 * leftover
858 				 */
859 				slot = largest_slot;
860 				inp->inp_hpts_request -= largest_slot;
861 			} else {
862 				/* This one will run when we hit it */
863 				inp->inp_hpts_request = 0;
864 			}
865 			if (hpts->p_nxt_slot == hpts->p_cur_slot)
866 				slot_calc = (hpts->p_nxt_slot + slot) % NUM_OF_HPTSI_SLOTS;
867 			else
868 				slot_calc = (hpts->p_nxt_slot + slot - 1) % NUM_OF_HPTSI_SLOTS;
869 			if (slot_calc == hpts->p_cur_slot) {
870 #ifdef INVARIANTS
871 				/* TSNH */
872 				panic("Hpts:%p impossible slot calculation slot_calc:%u slot:%u largest:%u\n",
873 				    hpts, slot_calc, slot, largest_slot);
874 #endif
875 				if (slot_calc)
876 					slot_calc--;
877 				else
878 					slot_calc = NUM_OF_HPTSI_SLOTS - 1;
879 			}
880 			inp->inp_hptsslot = slot_calc;
881 			if (diag) {
882 				diag->inp_hptsslot = inp->inp_hptsslot;
883 			}
884 		} else {
885 			/*
886 			 * The hpts is sleeping, we need to figure out where
887 			 * it will wake up at and if we need to reschedule
888 			 * its time-out.
889 			 */
890 			uint32_t have_slept, yet_to_sleep;
891 			uint32_t slot_now;
892 			struct timeval tv;
893 
894 			ticknow = tcp_gethptstick(&tv);
895 			slot_now = ticknow % NUM_OF_HPTSI_SLOTS;
896 			/*
897 			 * The user wants to be inserted at (slot_now +
898 			 * slot) % NUM_OF_HPTSI_SLOTS, so lets set that up.
899 			 */
900 			largest_slot = NUM_OF_HPTSI_SLOTS - 2;
901 			if (inp->inp_hpts_request > largest_slot) {
902 				/* Adjust the residual in inp_hpts_request */
903 				slot = largest_slot;
904 				inp->inp_hpts_request -= largest_slot;
905 			} else {
906 				/* No residual it all fits */
907 				inp->inp_hpts_request = 0;
908 			}
909 			inp->inp_hptsslot = (slot_now + slot) % NUM_OF_HPTSI_SLOTS;
910 			if (diag) {
911 				diag->slot_now = slot_now;
912 				diag->inp_hptsslot = inp->inp_hptsslot;
913 				diag->p_on_min_sleep = hpts->p_on_min_sleep;
914 			}
915 			if (logging_on) {
916 				tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, slot, ticknow);
917 			}
918 			/* Now do we need to restart the hpts's timer? */
919 			if (TSTMP_GT(ticknow, hpts->p_curtick))
920 				have_slept = ticknow - hpts->p_curtick;
921 			else
922 				have_slept = 0;
923 			if (have_slept < hpts->p_hpts_sleep_time) {
924 				/* This should be what happens */
925 				yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
926 			} else {
927 				/* We are over-due */
928 				yet_to_sleep = 0;
929 				need_wakeup = 1;
930 			}
931 			if (diag) {
932 				diag->have_slept = have_slept;
933 				diag->yet_to_sleep = yet_to_sleep;
934 				diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
935 			}
936 			if ((hpts->p_on_min_sleep == 0) && (yet_to_sleep > slot)) {
937 				/*
938 				 * We need to reschedule the hptss time-out.
939 				 */
940 				hpts->p_hpts_sleep_time = slot;
941 				need_new_to = slot * HPTS_TICKS_PER_USEC;
942 			}
943 		}
944 		hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
945 		if (logging_on) {
946 			tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERTED, slot, ticknow);
947 		}
948 		/*
949 		 * Now how far is the hpts sleeping to? if active is 1, its
950 		 * up and ticking we do nothing, otherwise we may need to
951 		 * reschedule its callout if need_new_to is set from above.
952 		 */
953 		if (need_wakeup) {
954 			if (logging_on) {
955 				tcp_hpts_log_it(hpts, inp, HPTSLOG_RESCHEDULE, 1, 0);
956 			}
957 			hpts->p_direct_wake = 1;
958 			tcp_wakehpts(hpts);
959 			if (diag) {
960 				diag->need_new_to = 0;
961 				diag->co_ret = 0xffff0000;
962 			}
963 		} else if (need_new_to) {
964 			int32_t co_ret;
965 			struct timeval tv;
966 			sbintime_t sb;
967 
968 			tv.tv_sec = 0;
969 			tv.tv_usec = 0;
970 			while (need_new_to > HPTS_USEC_IN_SEC) {
971 				tv.tv_sec++;
972 				need_new_to -= HPTS_USEC_IN_SEC;
973 			}
974 			tv.tv_usec = need_new_to;
975 			sb = tvtosbt(tv);
976 			if (tcp_hpts_callout_skip_swi == 0) {
977 				co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
978 				    hpts_timeout_swi, hpts, hpts->p_cpu,
979 				    (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
980 			} else {
981 				co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
982 				    hpts_timeout_dir, hpts,
983 				    hpts->p_cpu,
984 				    C_PREL(tcp_hpts_precision));
985 			}
986 			if (diag) {
987 				diag->need_new_to = need_new_to;
988 				diag->co_ret = co_ret;
989 			}
990 		}
991 	} else {
992 #ifdef INVARIANTS
993 		panic("Hpts:%p tp:%p already on hpts and add?", hpts, inp);
994 #endif
995 	}
996 }
997 
998 uint32_t
999 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag){
1000 	struct tcp_hpts_entry *hpts;
1001 	uint32_t slot_on, cts;
1002 	struct timeval tv;
1003 
1004 	/*
1005 	 * We now return the next-slot the hpts will be on, beyond its
1006 	 * current run (if up) or where it was when it stopped if it is
1007 	 * sleeping.
1008 	 */
1009 	INP_WLOCK_ASSERT(inp);
1010 	hpts = tcp_hpts_lock(inp);
1011 	if (in_ts_percision)
1012 		microuptime(&tv);
1013 	else
1014 		getmicrouptime(&tv);
1015 	cts = tcp_tv_to_usectick(&tv);
1016 	tcp_hpts_insert_locked(hpts, inp, slot, cts, line, diag, 0);
1017 	slot_on = hpts->p_nxt_slot;
1018 	mtx_unlock(&hpts->p_mtx);
1019 	return (slot_on);
1020 }
1021 
1022 uint32_t
1023 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
1024 	return (tcp_hpts_insert_diag(inp, slot, line, NULL));
1025 }
1026 
1027 int
1028 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
1029 {
1030 	int32_t retval = 0;
1031 
1032 	HPTS_MTX_ASSERT(hpts);
1033 	if (inp->inp_in_input == 0) {
1034 		/* Ok we need to set it on the hpts in the current slot */
1035 		hpts_sane_input_insert(hpts, inp, line);
1036 		retval = 1;
1037 		if (hpts->p_hpts_active == 0) {
1038 			/*
1039 			 * Activate the hpts if it is sleeping.
1040 			 */
1041 			if (logging_on) {
1042 				tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_INPUT, 0, 0);
1043 			}
1044 			retval = 2;
1045 			hpts->p_direct_wake = 1;
1046 			tcp_wakeinput(hpts);
1047 		}
1048 	} else if (hpts->p_hpts_active == 0) {
1049 		retval = 4;
1050 		hpts->p_direct_wake = 1;
1051 		tcp_wakeinput(hpts);
1052 	}
1053 	return (retval);
1054 }
1055 
1056 void
1057 tcp_queue_pkt_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1058     int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked)
1059 {
1060 	/* Setup packet for input first */
1061 	INP_WLOCK_ASSERT(tp->t_inpcb);
1062 	m->m_pkthdr.pace_thoff = (uint16_t) ((caddr_t)th - mtod(m, caddr_t));
1063 	m->m_pkthdr.pace_tlen = (uint16_t) tlen;
1064 	m->m_pkthdr.pace_drphdrlen = drop_hdrlen;
1065 	m->m_pkthdr.pace_tos = iptos;
1066 	m->m_pkthdr.pace_lock = (uint8_t) ti_locked;
1067 	if (tp->t_in_pkt == NULL) {
1068 		tp->t_in_pkt = m;
1069 		tp->t_tail_pkt = m;
1070 	} else {
1071 		tp->t_tail_pkt->m_nextpkt = m;
1072 		tp->t_tail_pkt = m;
1073 	}
1074 }
1075 
1076 
1077 int32_t
1078 __tcp_queue_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1079     int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked, int32_t line){
1080 	struct tcp_hpts_entry *hpts;
1081 	int32_t ret;
1082 
1083 	tcp_queue_pkt_to_input(tp, m, th, tlen, drop_hdrlen, iptos, ti_locked);
1084 	hpts = tcp_input_lock(tp->t_inpcb);
1085 	ret = __tcp_queue_to_input_locked(tp->t_inpcb, hpts, line);
1086 	mtx_unlock(&hpts->p_mtx);
1087 	return (ret);
1088 }
1089 
1090 void
1091 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1092 {
1093 	struct tcp_hpts_entry *hpts;
1094 	struct tcpcb *tp;
1095 
1096 	tp = intotcpcb(inp);
1097 	hpts = tcp_input_lock(tp->t_inpcb);
1098 	if (inp->inp_in_input == 0) {
1099 		/* Ok we need to set it on the hpts in the current slot */
1100 		hpts_sane_input_insert(hpts, inp, line);
1101 		if (hpts->p_hpts_active == 0) {
1102 			/*
1103 			 * Activate the hpts if it is sleeping.
1104 			 */
1105 			hpts->p_direct_wake = 1;
1106 			tcp_wakeinput(hpts);
1107 		}
1108 	} else if (hpts->p_hpts_active == 0) {
1109 		hpts->p_direct_wake = 1;
1110 		tcp_wakeinput(hpts);
1111 	}
1112 	inp->inp_hpts_drop_reas = reason;
1113 	mtx_unlock(&hpts->p_mtx);
1114 }
1115 
1116 static uint16_t
1117 hpts_random_cpu(struct inpcb *inp){
1118 	/*
1119 	 * No flow type set distribute the load randomly.
1120 	 */
1121 	uint16_t cpuid;
1122 	uint32_t ran;
1123 
1124 	/*
1125 	 * If one has been set use it i.e. we want both in and out on the
1126 	 * same hpts.
1127 	 */
1128 	if (inp->inp_input_cpu_set) {
1129 		return (inp->inp_input_cpu);
1130 	} else if (inp->inp_hpts_cpu_set) {
1131 		return (inp->inp_hpts_cpu);
1132 	}
1133 	/* Nothing set use a random number */
1134 	ran = arc4random();
1135 	cpuid = (ran & 0xffff) % mp_ncpus;
1136 	return (cpuid);
1137 }
1138 
1139 static uint16_t
1140 hpts_cpuid(struct inpcb *inp){
1141 	uint16_t cpuid;
1142 
1143 
1144 	/*
1145 	 * If one has been set use it i.e. we want both in and out on the
1146 	 * same hpts.
1147 	 */
1148 	if (inp->inp_input_cpu_set) {
1149 		return (inp->inp_input_cpu);
1150 	} else if (inp->inp_hpts_cpu_set) {
1151 		return (inp->inp_hpts_cpu);
1152 	}
1153 	/* If one is set the other must be the same */
1154 #ifdef	RSS
1155 	cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1156 	if (cpuid == NETISR_CPUID_NONE)
1157 		return (hpts_random_cpu(inp));
1158 	else
1159 		return (cpuid);
1160 #else
1161 	/*
1162 	 * We don't have a flowid -> cpuid mapping, so cheat and just map
1163 	 * unknown cpuids to curcpu.  Not the best, but apparently better
1164 	 * than defaulting to swi 0.
1165 	 */
1166 	if (inp->inp_flowtype != M_HASHTYPE_NONE) {
1167 		cpuid = inp->inp_flowid % mp_ncpus;
1168 		return (cpuid);
1169 	}
1170 	cpuid = hpts_random_cpu(inp);
1171 	return (cpuid);
1172 #endif
1173 }
1174 
1175 /*
1176  * Do NOT try to optimize the processing of inp's
1177  * by first pulling off all the inp's into a temporary
1178  * list (e.g. TAILQ_CONCAT). If you do that the subtle
1179  * interactions of switching CPU's will kill because of
1180  * problems in the linked list manipulation. Basically
1181  * you would switch cpu's with the hpts mutex locked
1182  * but then while you were processing one of the inp's
1183  * some other one that you switch will get a new
1184  * packet on the different CPU. It will insert it
1185  * on the new hptss input list. Creating a temporary
1186  * link in the inp will not fix it either, since
1187  * the other hpts will be doing the same thing and
1188  * you will both end up using the temporary link.
1189  *
1190  * You will die in an ASSERT for tailq corruption if you
1191  * run INVARIANTS or you will die horribly without
1192  * INVARIANTS in some unknown way with a corrupt linked
1193  * list.
1194  */
1195 static void
1196 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1197 {
1198 	struct mbuf *m, *n;
1199 	struct tcpcb *tp;
1200 	struct inpcb *inp;
1201 	uint16_t drop_reason;
1202 	int16_t set_cpu;
1203 	uint32_t did_prefetch = 0;
1204 	int32_t ti_locked = TI_UNLOCKED;
1205 
1206 	HPTS_MTX_ASSERT(hpts);
1207 	while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1208 		HPTS_MTX_ASSERT(hpts);
1209 		hpts_sane_input_remove(hpts, inp, 0);
1210 		if (inp->inp_input_cpu_set == 0) {
1211 			set_cpu = 1;
1212 		} else {
1213 			set_cpu = 0;
1214 		}
1215 		hpts->p_inp = inp;
1216 		drop_reason = inp->inp_hpts_drop_reas;
1217 		inp->inp_in_input = 0;
1218 		mtx_unlock(&hpts->p_mtx);
1219 		if (drop_reason) {
1220 			INP_INFO_RLOCK(&V_tcbinfo);
1221 			ti_locked = TI_RLOCKED;
1222 		} else {
1223 			ti_locked = TI_UNLOCKED;
1224 		}
1225 		INP_WLOCK(inp);
1226 		if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1227 		    (inp->inp_flags2 & INP_FREED)) {
1228 out:
1229 			hpts->p_inp = NULL;
1230 			if (ti_locked == TI_RLOCKED) {
1231 				INP_INFO_RUNLOCK(&V_tcbinfo);
1232 			}
1233 			if (in_pcbrele_wlocked(inp) == 0) {
1234 				INP_WUNLOCK(inp);
1235 			}
1236 			ti_locked = TI_UNLOCKED;
1237 			mtx_lock(&hpts->p_mtx);
1238 			continue;
1239 		}
1240 		tp = intotcpcb(inp);
1241 		if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1242 			goto out;
1243 		}
1244 		if (drop_reason) {
1245 			/* This tcb is being destroyed for drop_reason */
1246 			m = tp->t_in_pkt;
1247 			if (m)
1248 				n = m->m_nextpkt;
1249 			else
1250 				n = NULL;
1251 			tp->t_in_pkt = NULL;
1252 			while (m) {
1253 				m_freem(m);
1254 				m = n;
1255 				if (m)
1256 					n = m->m_nextpkt;
1257 			}
1258 			tp = tcp_drop(tp, drop_reason);
1259 			INP_INFO_RUNLOCK(&V_tcbinfo);
1260 			if (tp == NULL) {
1261 				INP_WLOCK(inp);
1262 			}
1263 			if (in_pcbrele_wlocked(inp) == 0)
1264 				INP_WUNLOCK(inp);
1265 			mtx_lock(&hpts->p_mtx);
1266 			continue;
1267 		}
1268 		if (set_cpu) {
1269 			/*
1270 			 * Setup so the next time we will move to the right
1271 			 * CPU. This should be a rare event. It will
1272 			 * sometimes happens when we are the client side
1273 			 * (usually not the server). Somehow tcp_output()
1274 			 * gets called before the tcp_do_segment() sets the
1275 			 * intial state. This means the r_cpu and r_hpts_cpu
1276 			 * is 0. We get on the hpts, and then tcp_input()
1277 			 * gets called setting up the r_cpu to the correct
1278 			 * value. The hpts goes off and sees the mis-match.
1279 			 * We simply correct it here and the CPU will switch
1280 			 * to the new hpts nextime the tcb gets added to the
1281 			 * the hpts (not this time) :-)
1282 			 */
1283 			tcp_set_hpts(inp);
1284 		}
1285 		CURVNET_SET(tp->t_vnet);
1286 		m = tp->t_in_pkt;
1287 		n = NULL;
1288 		if (m != NULL &&
1289 		    (m->m_pkthdr.pace_lock == TI_RLOCKED ||
1290 		    tp->t_state != TCPS_ESTABLISHED)) {
1291 			ti_locked = TI_RLOCKED;
1292 			if (tcp_hptsi_lock_inpinfo(inp, &tp)) {
1293 				CURVNET_RESTORE();
1294 				goto out;
1295 			}
1296 			m = tp->t_in_pkt;
1297 		}
1298 		if (in_newts_every_tcb) {
1299 			if (in_ts_percision)
1300 				microuptime(tv);
1301 			else
1302 				getmicrouptime(tv);
1303 		}
1304 		if (tp->t_fb_ptr != NULL) {
1305 			kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1306 			did_prefetch = 1;
1307 		}
1308 		/* Any input work to do, if so do it first */
1309 		if ((m != NULL) && (m == tp->t_in_pkt)) {
1310 			struct tcphdr *th;
1311 			int32_t tlen, drop_hdrlen, nxt_pkt;
1312 			uint8_t iptos;
1313 
1314 			n = m->m_nextpkt;
1315 			tp->t_in_pkt = tp->t_tail_pkt = NULL;
1316 			while (m) {
1317 				th = (struct tcphdr *)(mtod(m, caddr_t)+m->m_pkthdr.pace_thoff);
1318 				tlen = m->m_pkthdr.pace_tlen;
1319 				drop_hdrlen = m->m_pkthdr.pace_drphdrlen;
1320 				iptos = m->m_pkthdr.pace_tos;
1321 				m->m_nextpkt = NULL;
1322 				if (n)
1323 					nxt_pkt = 1;
1324 				else
1325 					nxt_pkt = 0;
1326 				inp->inp_input_calls = 1;
1327 				if (tp->t_fb->tfb_tcp_hpts_do_segment) {
1328 					/* Use the hpts specific do_segment */
1329 					(*tp->t_fb->tfb_tcp_hpts_do_segment) (m, th, inp->inp_socket,
1330 					    tp, drop_hdrlen,
1331 					    tlen, iptos, ti_locked, nxt_pkt, tv);
1332 				} else {
1333 					/* Use the default do_segment */
1334 					(*tp->t_fb->tfb_tcp_do_segment) (m, th, inp->inp_socket,
1335 					    tp, drop_hdrlen,
1336 					    tlen, iptos, ti_locked);
1337 				}
1338 				/*
1339 				 * Do segment returns unlocked we need the
1340 				 * lock again but we also need some kasserts
1341 				 * here.
1342 				 */
1343 				INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1344 				INP_UNLOCK_ASSERT(inp);
1345 				m = n;
1346 				if (m)
1347 					n = m->m_nextpkt;
1348 				if (m != NULL &&
1349 				    m->m_pkthdr.pace_lock == TI_RLOCKED) {
1350 					INP_INFO_RLOCK(&V_tcbinfo);
1351 					ti_locked = TI_RLOCKED;
1352 				} else
1353 					ti_locked = TI_UNLOCKED;
1354 				INP_WLOCK(inp);
1355 				/*
1356 				 * Since we have an opening here we must
1357 				 * re-check if the tcb went away while we
1358 				 * were getting the lock(s).
1359 				 */
1360 				if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1361 				    (inp->inp_flags2 & INP_FREED)) {
1362 			out_free:
1363 					while (m) {
1364 						m_freem(m);
1365 						m = n;
1366 						if (m)
1367 							n = m->m_nextpkt;
1368 					}
1369 					CURVNET_RESTORE();
1370 					goto out;
1371 				}
1372 				/*
1373 				 * Now that we hold the INP lock, check if
1374 				 * we need to upgrade our lock.
1375 				 */
1376 				if (ti_locked == TI_UNLOCKED &&
1377 				    (tp->t_state != TCPS_ESTABLISHED)) {
1378 					ti_locked = TI_RLOCKED;
1379 					if (tcp_hptsi_lock_inpinfo(inp, &tp))
1380 						goto out_free;
1381 				}
1382 			}	/** end while(m) */
1383 		}		/** end if ((m != NULL)  && (m == tp->t_in_pkt)) */
1384 		if (in_pcbrele_wlocked(inp) == 0)
1385 			INP_WUNLOCK(inp);
1386 		if (ti_locked == TI_RLOCKED)
1387 			INP_INFO_RUNLOCK(&V_tcbinfo);
1388 		INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1389 		INP_UNLOCK_ASSERT(inp);
1390 		ti_locked = TI_UNLOCKED;
1391 		mtx_lock(&hpts->p_mtx);
1392 		hpts->p_inp = NULL;
1393 		CURVNET_RESTORE();
1394 	}
1395 }
1396 
1397 static int
1398 tcp_hpts_est_run(struct tcp_hpts_entry *hpts)
1399 {
1400 	int32_t ticks_to_run;
1401 
1402 	if (hpts->p_prevtick && (SEQ_GT(hpts->p_curtick, hpts->p_prevtick))) {
1403 		ticks_to_run = hpts->p_curtick - hpts->p_prevtick;
1404 		if (ticks_to_run >= (NUM_OF_HPTSI_SLOTS - 1)) {
1405 			ticks_to_run = NUM_OF_HPTSI_SLOTS - 2;
1406 		}
1407 	} else {
1408 		if (hpts->p_prevtick == hpts->p_curtick) {
1409 			/* This happens when we get woken up right away */
1410 			return (-1);
1411 		}
1412 		ticks_to_run = 1;
1413 	}
1414 	/* Set in where we will be when we catch up */
1415 	hpts->p_nxt_slot = (hpts->p_cur_slot + ticks_to_run) % NUM_OF_HPTSI_SLOTS;
1416 	if (hpts->p_nxt_slot == hpts->p_cur_slot) {
1417 		panic("Impossible math -- hpts:%p p_nxt_slot:%d p_cur_slot:%d ticks_to_run:%d",
1418 		    hpts, hpts->p_nxt_slot, hpts->p_cur_slot, ticks_to_run);
1419 	}
1420 	return (ticks_to_run);
1421 }
1422 
1423 static void
1424 tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick)
1425 {
1426 	struct tcpcb *tp;
1427 	struct inpcb *inp = NULL, *ninp;
1428 	struct timeval tv;
1429 	int32_t ticks_to_run, i, error, tick_now, interum_tick;
1430 	int32_t paced_cnt = 0;
1431 	int32_t did_prefetch = 0;
1432 	int32_t prefetch_ninp = 0;
1433 	int32_t prefetch_tp = 0;
1434 	uint32_t cts;
1435 	int16_t set_cpu;
1436 
1437 	HPTS_MTX_ASSERT(hpts);
1438 	hpts->p_curtick = tcp_tv_to_hptstick(ctick);
1439 	cts = tcp_tv_to_usectick(ctick);
1440 	memcpy(&tv, ctick, sizeof(struct timeval));
1441 	hpts->p_cur_slot = hpts_tick(hpts, 1);
1442 
1443 	/* Figure out if we had missed ticks */
1444 again:
1445 	HPTS_MTX_ASSERT(hpts);
1446 	ticks_to_run = tcp_hpts_est_run(hpts);
1447 	if (!TAILQ_EMPTY(&hpts->p_input)) {
1448 		tcp_input_data(hpts, &tv);
1449 	}
1450 #ifdef INVARIANTS
1451 	if (TAILQ_EMPTY(&hpts->p_input) &&
1452 	    (hpts->p_on_inqueue_cnt != 0)) {
1453 		panic("tp:%p in_hpts input empty but cnt:%d",
1454 		    hpts, hpts->p_on_inqueue_cnt);
1455 	}
1456 #endif
1457 	HPTS_MTX_ASSERT(hpts);
1458 	/* Reset the ticks to run and time if we need too */
1459 	interum_tick = tcp_gethptstick(&tv);
1460 	if (interum_tick != hpts->p_curtick) {
1461 		/* Save off the new time we execute to */
1462 		*ctick = tv;
1463 		hpts->p_curtick = interum_tick;
1464 		cts = tcp_tv_to_usectick(&tv);
1465 		hpts->p_cur_slot = hpts_tick(hpts, 1);
1466 		ticks_to_run = tcp_hpts_est_run(hpts);
1467 	}
1468 	if (ticks_to_run == -1) {
1469 		goto no_run;
1470 	}
1471 	if (logging_on) {
1472 		tcp_hpts_log_it(hpts, inp, HPTSLOG_SETTORUN, ticks_to_run, 0);
1473 	}
1474 	if (hpts->p_on_queue_cnt == 0) {
1475 		goto no_one;
1476 	}
1477 	HPTS_MTX_ASSERT(hpts);
1478 	for (i = 0; i < ticks_to_run; i++) {
1479 		/*
1480 		 * Calculate our delay, if there are no extra ticks there
1481 		 * was not any
1482 		 */
1483 		hpts->p_delayed_by = (ticks_to_run - (i + 1)) * HPTS_TICKS_PER_USEC;
1484 		HPTS_MTX_ASSERT(hpts);
1485 		while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1486 			/* For debugging */
1487 			if (logging_on) {
1488 				tcp_hpts_log_it(hpts, inp, HPTSLOG_HPTSI, ticks_to_run, i);
1489 			}
1490 			hpts->p_inp = inp;
1491 			paced_cnt++;
1492 			if (hpts->p_cur_slot != inp->inp_hptsslot) {
1493 				panic("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1494 				    hpts, inp, hpts->p_cur_slot, inp->inp_hptsslot);
1495 			}
1496 			/* Now pull it */
1497 			if (inp->inp_hpts_cpu_set == 0) {
1498 				set_cpu = 1;
1499 			} else {
1500 				set_cpu = 0;
1501 			}
1502 			hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_cur_slot], 0);
1503 			if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1504 				/* We prefetch the next inp if possible */
1505 				kern_prefetch(ninp, &prefetch_ninp);
1506 				prefetch_ninp = 1;
1507 			}
1508 			if (inp->inp_hpts_request) {
1509 				/*
1510 				 * This guy is deferred out further in time
1511 				 * then our wheel had on it. Push him back
1512 				 * on the wheel.
1513 				 */
1514 				int32_t remaining_slots;
1515 
1516 				remaining_slots = ticks_to_run - (i + 1);
1517 				if (inp->inp_hpts_request > remaining_slots) {
1518 					/*
1519 					 * Keep INVARIANTS happy by clearing
1520 					 * the flag
1521 					 */
1522 					tcp_hpts_insert_locked(hpts, inp, inp->inp_hpts_request, cts, __LINE__, NULL, 1);
1523 					hpts->p_inp = NULL;
1524 					continue;
1525 				}
1526 				inp->inp_hpts_request = 0;
1527 			}
1528 			/*
1529 			 * We clear the hpts flag here after dealing with
1530 			 * remaining slots. This way anyone looking with the
1531 			 * TCB lock will see its on the hpts until just
1532 			 * before we unlock.
1533 			 */
1534 			inp->inp_in_hpts = 0;
1535 			mtx_unlock(&hpts->p_mtx);
1536 			INP_WLOCK(inp);
1537 			if (in_pcbrele_wlocked(inp)) {
1538 				mtx_lock(&hpts->p_mtx);
1539 				if (logging_on)
1540 					tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 1);
1541 				hpts->p_inp = NULL;
1542 				continue;
1543 			}
1544 			if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1545 out_now:
1546 #ifdef INVARIANTS
1547 				if (mtx_owned(&hpts->p_mtx)) {
1548 					panic("Hpts:%p owns mtx prior-to lock line:%d",
1549 					    hpts, __LINE__);
1550 				}
1551 #endif
1552 				INP_WUNLOCK(inp);
1553 				mtx_lock(&hpts->p_mtx);
1554 				if (logging_on)
1555 					tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 3);
1556 				hpts->p_inp = NULL;
1557 				continue;
1558 			}
1559 			tp = intotcpcb(inp);
1560 			if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1561 				goto out_now;
1562 			}
1563 			if (set_cpu) {
1564 				/*
1565 				 * Setup so the next time we will move to
1566 				 * the right CPU. This should be a rare
1567 				 * event. It will sometimes happens when we
1568 				 * are the client side (usually not the
1569 				 * server). Somehow tcp_output() gets called
1570 				 * before the tcp_do_segment() sets the
1571 				 * intial state. This means the r_cpu and
1572 				 * r_hpts_cpu is 0. We get on the hpts, and
1573 				 * then tcp_input() gets called setting up
1574 				 * the r_cpu to the correct value. The hpts
1575 				 * goes off and sees the mis-match. We
1576 				 * simply correct it here and the CPU will
1577 				 * switch to the new hpts nextime the tcb
1578 				 * gets added to the the hpts (not this one)
1579 				 * :-)
1580 				 */
1581 				tcp_set_hpts(inp);
1582 			}
1583 			if (out_newts_every_tcb) {
1584 				struct timeval sv;
1585 
1586 				if (out_ts_percision)
1587 					microuptime(&sv);
1588 				else
1589 					getmicrouptime(&sv);
1590 				cts = tcp_tv_to_usectick(&sv);
1591 			}
1592 			CURVNET_SET(tp->t_vnet);
1593 			/*
1594 			 * There is a hole here, we get the refcnt on the
1595 			 * inp so it will still be preserved but to make
1596 			 * sure we can get the INP we need to hold the p_mtx
1597 			 * above while we pull out the tp/inp,  as long as
1598 			 * fini gets the lock first we are assured of having
1599 			 * a sane INP we can lock and test.
1600 			 */
1601 #ifdef INVARIANTS
1602 			if (mtx_owned(&hpts->p_mtx)) {
1603 				panic("Hpts:%p owns mtx before tcp-output:%d",
1604 				    hpts, __LINE__);
1605 			}
1606 #endif
1607 			if (tp->t_fb_ptr != NULL) {
1608 				kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1609 				did_prefetch = 1;
1610 			}
1611 			inp->inp_hpts_calls = 1;
1612 			if (tp->t_fb->tfb_tcp_output_wtime != NULL) {
1613 				error = (*tp->t_fb->tfb_tcp_output_wtime) (tp, &tv);
1614 			} else {
1615 				error = tp->t_fb->tfb_tcp_output(tp);
1616 			}
1617 			if (ninp && ninp->inp_ppcb) {
1618 				/*
1619 				 * If we have a nxt inp, see if we can
1620 				 * prefetch its ppcb. Note this may seem
1621 				 * "risky" since we have no locks (other
1622 				 * than the previous inp) and there no
1623 				 * assurance that ninp was not pulled while
1624 				 * we were processing inp and freed. If this
1625 				 * occured it could mean that either:
1626 				 *
1627 				 * a) Its NULL (which is fine we won't go
1628 				 * here) <or> b) Its valid (which is cool we
1629 				 * will prefetch it) <or> c) The inp got
1630 				 * freed back to the slab which was
1631 				 * reallocated. Then the piece of memory was
1632 				 * re-used and something else (not an
1633 				 * address) is in inp_ppcb. If that occurs
1634 				 * we don't crash, but take a TLB shootdown
1635 				 * performance hit (same as if it was NULL
1636 				 * and we tried to pre-fetch it).
1637 				 *
1638 				 * Considering that the likelyhood of <c> is
1639 				 * quite rare we will take a risk on doing
1640 				 * this. If performance drops after testing
1641 				 * we can always take this out. NB: the
1642 				 * kern_prefetch on amd64 actually has
1643 				 * protection against a bad address now via
1644 				 * the DMAP_() tests. This will prevent the
1645 				 * TLB hit, and instead if <c> occurs just
1646 				 * cause us to load cache with a useless
1647 				 * address (to us).
1648 				 */
1649 				kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1650 				prefetch_tp = 1;
1651 			}
1652 			INP_WUNLOCK(inp);
1653 			INP_UNLOCK_ASSERT(inp);
1654 			CURVNET_RESTORE();
1655 #ifdef INVARIANTS
1656 			if (mtx_owned(&hpts->p_mtx)) {
1657 				panic("Hpts:%p owns mtx prior-to lock line:%d",
1658 				    hpts, __LINE__);
1659 			}
1660 #endif
1661 			mtx_lock(&hpts->p_mtx);
1662 			if (logging_on)
1663 				tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 4);
1664 			hpts->p_inp = NULL;
1665 		}
1666 		HPTS_MTX_ASSERT(hpts);
1667 		hpts->p_inp = NULL;
1668 		hpts->p_cur_slot++;
1669 		if (hpts->p_cur_slot >= NUM_OF_HPTSI_SLOTS) {
1670 			hpts->p_cur_slot = 0;
1671 		}
1672 	}
1673 no_one:
1674 	HPTS_MTX_ASSERT(hpts);
1675 	hpts->p_prevtick = hpts->p_curtick;
1676 	hpts->p_delayed_by = 0;
1677 	/*
1678 	 * Check to see if we took an excess amount of time and need to run
1679 	 * more ticks (if we did not hit eno-bufs).
1680 	 */
1681 	/* Re-run any input that may be there */
1682 	(void)tcp_gethptstick(&tv);
1683 	if (!TAILQ_EMPTY(&hpts->p_input)) {
1684 		tcp_input_data(hpts, &tv);
1685 	}
1686 #ifdef INVARIANTS
1687 	if (TAILQ_EMPTY(&hpts->p_input) &&
1688 	    (hpts->p_on_inqueue_cnt != 0)) {
1689 		panic("tp:%p in_hpts input empty but cnt:%d",
1690 		    hpts, hpts->p_on_inqueue_cnt);
1691 	}
1692 #endif
1693 	tick_now = tcp_gethptstick(&tv);
1694 	if (SEQ_GT(tick_now, hpts->p_prevtick)) {
1695 		struct timeval res;
1696 
1697 		/* Did we really spend a full tick or more in here? */
1698 		timersub(&tv, ctick, &res);
1699 		if (res.tv_sec || (res.tv_usec >= HPTS_TICKS_PER_USEC)) {
1700 			counter_u64_add(hpts_loops, 1);
1701 			if (logging_on) {
1702 				tcp_hpts_log_it(hpts, inp, HPTSLOG_TOLONG, (uint32_t) res.tv_usec, tick_now);
1703 			}
1704 			*ctick = res;
1705 			hpts->p_curtick = tick_now;
1706 			goto again;
1707 		}
1708 	}
1709 no_run:
1710 	{
1711 		uint32_t t = 0, i, fnd = 0;
1712 
1713 		if (hpts->p_on_queue_cnt) {
1714 
1715 
1716 			/*
1717 			 * Find next slot that is occupied and use that to
1718 			 * be the sleep time.
1719 			 */
1720 			for (i = 1, t = hpts->p_nxt_slot; i < NUM_OF_HPTSI_SLOTS; i++) {
1721 				if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1722 					fnd = 1;
1723 					break;
1724 				}
1725 				t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1726 			}
1727 			if (fnd) {
1728 				hpts->p_hpts_sleep_time = i;
1729 			} else {
1730 				counter_u64_add(back_tosleep, 1);
1731 #ifdef INVARIANTS
1732 				panic("Hpts:%p cnt:%d but non found", hpts, hpts->p_on_queue_cnt);
1733 #endif
1734 				hpts->p_on_queue_cnt = 0;
1735 				goto non_found;
1736 			}
1737 			t++;
1738 		} else {
1739 			/* No one on the wheel sleep for all but 2 slots  */
1740 non_found:
1741 			if (hpts_sleep_max == 0)
1742 				hpts_sleep_max = 1;
1743 			hpts->p_hpts_sleep_time = min((NUM_OF_HPTSI_SLOTS - 2), hpts_sleep_max);
1744 			t = 0;
1745 		}
1746 		if (logging_on) {
1747 			tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEPSET, t, (hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC));
1748 		}
1749 	}
1750 }
1751 
1752 void
1753 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1754 {
1755 	struct tcp_hpts_entry *hpts;
1756 
1757 	INP_WLOCK_ASSERT(inp);
1758 	hpts = tcp_hpts_lock(inp);
1759 	if ((inp->inp_in_hpts == 0) &&
1760 	    (inp->inp_hpts_cpu_set == 0)) {
1761 		inp->inp_hpts_cpu = hpts_cpuid(inp);
1762 		inp->inp_hpts_cpu_set = 1;
1763 	}
1764 	mtx_unlock(&hpts->p_mtx);
1765 	hpts = tcp_input_lock(inp);
1766 	if ((inp->inp_input_cpu_set == 0) &&
1767 	    (inp->inp_in_input == 0)) {
1768 		inp->inp_input_cpu = hpts_cpuid(inp);
1769 		inp->inp_input_cpu_set = 1;
1770 	}
1771 	mtx_unlock(&hpts->p_mtx);
1772 }
1773 
1774 uint16_t
1775 tcp_hpts_delayedby(struct inpcb *inp){
1776 	return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1777 }
1778 
1779 static void
1780 tcp_hpts_thread(void *ctx)
1781 {
1782 	struct tcp_hpts_entry *hpts;
1783 	struct timeval tv;
1784 	sbintime_t sb;
1785 
1786 	hpts = (struct tcp_hpts_entry *)ctx;
1787 	mtx_lock(&hpts->p_mtx);
1788 	if (hpts->p_direct_wake) {
1789 		/* Signaled by input */
1790 		if (logging_on)
1791 			tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 1, 1);
1792 		callout_stop(&hpts->co);
1793 	} else {
1794 		/* Timed out */
1795 		if (callout_pending(&hpts->co) ||
1796 		    !callout_active(&hpts->co)) {
1797 			if (logging_on)
1798 				tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 2, 2);
1799 			mtx_unlock(&hpts->p_mtx);
1800 			return;
1801 		}
1802 		callout_deactivate(&hpts->co);
1803 		if (logging_on)
1804 			tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 3, 3);
1805 	}
1806 	hpts->p_hpts_active = 1;
1807 	(void)tcp_gethptstick(&tv);
1808 	tcp_hptsi(hpts, &tv);
1809 	HPTS_MTX_ASSERT(hpts);
1810 	tv.tv_sec = 0;
1811 	tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1812 	if (tcp_min_hptsi_time && (tv.tv_usec < tcp_min_hptsi_time)) {
1813 		tv.tv_usec = tcp_min_hptsi_time;
1814 		hpts->p_on_min_sleep = 1;
1815 	} else {
1816 		/* Clear the min sleep flag */
1817 		hpts->p_on_min_sleep = 0;
1818 	}
1819 	hpts->p_hpts_active = 0;
1820 	sb = tvtosbt(tv);
1821 	if (tcp_hpts_callout_skip_swi == 0) {
1822 		callout_reset_sbt_on(&hpts->co, sb, 0,
1823 		    hpts_timeout_swi, hpts, hpts->p_cpu,
1824 		    (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1825 	} else {
1826 		callout_reset_sbt_on(&hpts->co, sb, 0,
1827 		    hpts_timeout_dir, hpts,
1828 		    hpts->p_cpu,
1829 		    C_PREL(tcp_hpts_precision));
1830 	}
1831 	hpts->p_direct_wake = 0;
1832 	mtx_unlock(&hpts->p_mtx);
1833 }
1834 
1835 #undef	timersub
1836 
1837 static void
1838 tcp_init_hptsi(void *st)
1839 {
1840 	int32_t i, j, error, bound = 0, created = 0;
1841 	size_t sz, asz;
1842 	struct timeval tv;
1843 	sbintime_t sb;
1844 	struct tcp_hpts_entry *hpts;
1845 	char unit[16];
1846 	uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1847 
1848 	tcp_pace.rp_proc = NULL;
1849 	tcp_pace.rp_num_hptss = ncpus;
1850 	hpts_loops = counter_u64_alloc(M_WAITOK);
1851 	back_tosleep = counter_u64_alloc(M_WAITOK);
1852 
1853 	sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1854 	tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1855 	asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1856 	for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1857 		tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1858 		    M_TCPHPTS, M_WAITOK | M_ZERO);
1859 		tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1860 		    M_TCPHPTS, M_WAITOK);
1861 		hpts = tcp_pace.rp_ent[i];
1862 		/*
1863 		 * Init all the hpts structures that are not specifically
1864 		 * zero'd by the allocations. Also lets attach them to the
1865 		 * appropriate sysctl block as well.
1866 		 */
1867 		mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1868 		    "hpts", MTX_DEF | MTX_DUPOK);
1869 		TAILQ_INIT(&hpts->p_input);
1870 		for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1871 			TAILQ_INIT(&hpts->p_hptss[j]);
1872 		}
1873 		sysctl_ctx_init(&hpts->hpts_ctx);
1874 		sprintf(unit, "%d", i);
1875 		hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1876 		    SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1877 		    OID_AUTO,
1878 		    unit,
1879 		    CTLFLAG_RW, 0,
1880 		    "");
1881 		SYSCTL_ADD_INT(&hpts->hpts_ctx,
1882 		    SYSCTL_CHILDREN(hpts->hpts_root),
1883 		    OID_AUTO, "in_qcnt", CTLFLAG_RD,
1884 		    &hpts->p_on_inqueue_cnt, 0,
1885 		    "Count TCB's awaiting input processing");
1886 		SYSCTL_ADD_INT(&hpts->hpts_ctx,
1887 		    SYSCTL_CHILDREN(hpts->hpts_root),
1888 		    OID_AUTO, "out_qcnt", CTLFLAG_RD,
1889 		    &hpts->p_on_queue_cnt, 0,
1890 		    "Count TCB's awaiting output processing");
1891 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1892 		    SYSCTL_CHILDREN(hpts->hpts_root),
1893 		    OID_AUTO, "active", CTLFLAG_RD,
1894 		    &hpts->p_hpts_active, 0,
1895 		    "Is the hpts active");
1896 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1897 		    SYSCTL_CHILDREN(hpts->hpts_root),
1898 		    OID_AUTO, "curslot", CTLFLAG_RD,
1899 		    &hpts->p_cur_slot, 0,
1900 		    "What the current slot is if active");
1901 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1902 		    SYSCTL_CHILDREN(hpts->hpts_root),
1903 		    OID_AUTO, "curtick", CTLFLAG_RD,
1904 		    &hpts->p_curtick, 0,
1905 		    "What the current tick on if active");
1906 		SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1907 		    SYSCTL_CHILDREN(hpts->hpts_root),
1908 		    OID_AUTO, "logsize", CTLFLAG_RD,
1909 		    &hpts->p_logsize, 0,
1910 		    "Hpts logging buffer size");
1911 		hpts->p_hpts_sleep_time = NUM_OF_HPTSI_SLOTS - 2;
1912 		hpts->p_num = i;
1913 		hpts->p_prevtick = hpts->p_curtick = tcp_gethptstick(&tv);
1914 		hpts->p_prevtick -= 1;
1915 		hpts->p_prevtick %= NUM_OF_HPTSI_SLOTS;
1916 		hpts->p_cpu = 0xffff;
1917 		hpts->p_nxt_slot = 1;
1918 		hpts->p_logsize = tcp_hpts_logging_size;
1919 		if (hpts->p_logsize) {
1920 			sz = (sizeof(struct hpts_log) * hpts->p_logsize);
1921 			hpts->p_log = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1922 		}
1923 		callout_init(&hpts->co, 1);
1924 	}
1925 	/*
1926 	 * Now lets start ithreads to handle the hptss.
1927 	 */
1928 	CPU_FOREACH(i) {
1929 		hpts = tcp_pace.rp_ent[i];
1930 		hpts->p_cpu = i;
1931 		error = swi_add(&hpts->ie, "hpts",
1932 		    tcp_hpts_thread, (void *)hpts,
1933 		    SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1934 		if (error) {
1935 			panic("Can't add hpts:%p i:%d err:%d",
1936 			    hpts, i, error);
1937 		}
1938 		created++;
1939 		if (tcp_bind_threads) {
1940 			if (intr_event_bind(hpts->ie, i) == 0)
1941 				bound++;
1942 		}
1943 		tv.tv_sec = 0;
1944 		tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1945 		sb = tvtosbt(tv);
1946 		if (tcp_hpts_callout_skip_swi == 0) {
1947 			callout_reset_sbt_on(&hpts->co, sb, 0,
1948 			    hpts_timeout_swi, hpts, hpts->p_cpu,
1949 			    (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1950 		} else {
1951 			callout_reset_sbt_on(&hpts->co, sb, 0,
1952 			    hpts_timeout_dir, hpts,
1953 			    hpts->p_cpu,
1954 			    C_PREL(tcp_hpts_precision));
1955 		}
1956 	}
1957 	printf("TCP Hpts created %d swi interrupt thread and bound %d\n",
1958 	    created, bound);
1959 	return;
1960 }
1961 
1962 SYSINIT(tcphptsi, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1963 MODULE_VERSION(tcphpts, 1);
1964