xref: /freebsd/sys/kern/kern_poll.c (revision 0b37c1590418417c894529d371800dfac71ef887)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2001-2002 Luigi Rizzo
5  *
6  * Supported by: the Xorp Project (www.xorp.org)
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_device_polling.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/proc.h>
40 #include <sys/epoch.h>
41 #include <sys/eventhandler.h>
42 #include <sys/resourcevar.h>
43 #include <sys/socket.h>			/* needed by net/if.h		*/
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/syslog.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/netisr.h>			/* for NETISR_POLL		*/
51 #include <net/vnet.h>
52 
53 void hardclock_device_poll(void);	/* hook from hardclock		*/
54 
55 static struct mtx	poll_mtx;
56 
57 /*
58  * Polling support for [network] device drivers.
59  *
60  * Drivers which support this feature can register with the
61  * polling code.
62  *
63  * If registration is successful, the driver must disable interrupts,
64  * and further I/O is performed through the handler, which is invoked
65  * (at least once per clock tick) with 3 arguments: the "arg" passed at
66  * register time (a struct ifnet pointer), a command, and a "count" limit.
67  *
68  * The command can be one of the following:
69  *  POLL_ONLY: quick move of "count" packets from input/output queues.
70  *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
71  *	other more expensive operations. This command is issued periodically
72  *	but less frequently than POLL_ONLY.
73  *
74  * The count limit specifies how much work the handler can do during the
75  * call -- typically this is the number of packets to be received, or
76  * transmitted, etc. (drivers are free to interpret this number, as long
77  * as the max time spent in the function grows roughly linearly with the
78  * count).
79  *
80  * Polling is enabled and disabled via setting IFCAP_POLLING flag on
81  * the interface. The driver ioctl handler should register interface
82  * with polling and disable interrupts, if registration was successful.
83  *
84  * A second variable controls the sharing of CPU between polling/kernel
85  * network processing, and other activities (typically userlevel tasks):
86  * kern.polling.user_frac (between 0 and 100, default 50) sets the share
87  * of CPU allocated to user tasks. CPU is allocated proportionally to the
88  * shares, by dynamically adjusting the "count" (poll_burst).
89  *
90  * Other parameters can should be left to their default values.
91  * The following constraints hold
92  *
93  *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
94  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
95  */
96 
97 #define MIN_POLL_BURST_MAX	10
98 #define MAX_POLL_BURST_MAX	20000
99 
100 static uint32_t poll_burst = 5;
101 static uint32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
102 static uint32_t poll_each_burst = 5;
103 
104 static SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
105 	"Device polling parameters");
106 
107 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
108 	&poll_burst, 0, "Current polling burst size");
109 
110 static int	netisr_poll_scheduled;
111 static int	netisr_pollmore_scheduled;
112 static int	poll_shutting_down;
113 
114 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
115 {
116 	uint32_t val = poll_burst_max;
117 	int error;
118 
119 	error = sysctl_handle_int(oidp, &val, 0, req);
120 	if (error || !req->newptr )
121 		return (error);
122 	if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
123 		return (EINVAL);
124 
125 	mtx_lock(&poll_mtx);
126 	poll_burst_max = val;
127 	if (poll_burst > poll_burst_max)
128 		poll_burst = poll_burst_max;
129 	if (poll_each_burst > poll_burst_max)
130 		poll_each_burst = MIN_POLL_BURST_MAX;
131 	mtx_unlock(&poll_mtx);
132 
133 	return (0);
134 }
135 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW,
136 	0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size");
137 
138 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
139 {
140 	uint32_t val = poll_each_burst;
141 	int error;
142 
143 	error = sysctl_handle_int(oidp, &val, 0, req);
144 	if (error || !req->newptr )
145 		return (error);
146 	if (val < 1)
147 		return (EINVAL);
148 
149 	mtx_lock(&poll_mtx);
150 	if (val > poll_burst_max) {
151 		mtx_unlock(&poll_mtx);
152 		return (EINVAL);
153 	}
154 	poll_each_burst = val;
155 	mtx_unlock(&poll_mtx);
156 
157 	return (0);
158 }
159 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW,
160 	0, sizeof(uint32_t), poll_each_burst_sysctl, "I",
161 	"Max size of each burst");
162 
163 static uint32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
164 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
165 	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
166 
167 static uint32_t user_frac = 50;
168 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
169 {
170 	uint32_t val = user_frac;
171 	int error;
172 
173 	error = sysctl_handle_int(oidp, &val, 0, req);
174 	if (error || !req->newptr )
175 		return (error);
176 	if (val > 99)
177 		return (EINVAL);
178 
179 	mtx_lock(&poll_mtx);
180 	user_frac = val;
181 	mtx_unlock(&poll_mtx);
182 
183 	return (0);
184 }
185 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW,
186 	0, sizeof(uint32_t), user_frac_sysctl, "I",
187 	"Desired user fraction of cpu time");
188 
189 static uint32_t reg_frac_count = 0;
190 static uint32_t reg_frac = 20 ;
191 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
192 {
193 	uint32_t val = reg_frac;
194 	int error;
195 
196 	error = sysctl_handle_int(oidp, &val, 0, req);
197 	if (error || !req->newptr )
198 		return (error);
199 	if (val < 1 || val > hz)
200 		return (EINVAL);
201 
202 	mtx_lock(&poll_mtx);
203 	reg_frac = val;
204 	if (reg_frac_count >= reg_frac)
205 		reg_frac_count = 0;
206 	mtx_unlock(&poll_mtx);
207 
208 	return (0);
209 }
210 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW,
211 	0, sizeof(uint32_t), reg_frac_sysctl, "I",
212 	"Every this many cycles check registers");
213 
214 static uint32_t short_ticks;
215 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
216 	&short_ticks, 0, "Hardclock ticks shorter than they should be");
217 
218 static uint32_t lost_polls;
219 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
220 	&lost_polls, 0, "How many times we would have lost a poll tick");
221 
222 static uint32_t pending_polls;
223 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
224 	&pending_polls, 0, "Do we need to poll again");
225 
226 static int residual_burst = 0;
227 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
228 	&residual_burst, 0, "# of residual cycles in burst");
229 
230 static uint32_t poll_handlers; /* next free entry in pr[]. */
231 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
232 	&poll_handlers, 0, "Number of registered poll handlers");
233 
234 static uint32_t phase;
235 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
236 	&phase, 0, "Polling phase");
237 
238 static uint32_t suspect;
239 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
240 	&suspect, 0, "suspect event");
241 
242 static uint32_t stalled;
243 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
244 	&stalled, 0, "potential stalls");
245 
246 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
247 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
248 	&idlepoll_sleeping, 0, "idlepoll is sleeping");
249 
250 
251 #define POLL_LIST_LEN  128
252 struct pollrec {
253 	poll_handler_t	*handler;
254 	struct ifnet	*ifp;
255 };
256 
257 static struct pollrec pr[POLL_LIST_LEN];
258 
259 static void
260 poll_shutdown(void *arg, int howto)
261 {
262 
263 	poll_shutting_down = 1;
264 }
265 
266 static void
267 init_device_poll(void)
268 {
269 
270 	mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
271 	EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL,
272 	    SHUTDOWN_PRI_LAST);
273 }
274 SYSINIT(device_poll, SI_SUB_SOFTINTR, SI_ORDER_MIDDLE, init_device_poll, NULL);
275 
276 
277 /*
278  * Hook from hardclock. Tries to schedule a netisr, but keeps track
279  * of lost ticks due to the previous handler taking too long.
280  * Normally, this should not happen, because polling handler should
281  * run for a short time. However, in some cases (e.g. when there are
282  * changes in link status etc.) the drivers take a very long time
283  * (even in the order of milliseconds) to reset and reconfigure the
284  * device, causing apparent lost polls.
285  *
286  * The first part of the code is just for debugging purposes, and tries
287  * to count how often hardclock ticks are shorter than they should,
288  * meaning either stray interrupts or delayed events.
289  */
290 void
291 hardclock_device_poll(void)
292 {
293 	static struct timeval prev_t, t;
294 	int delta;
295 
296 	if (poll_handlers == 0 || poll_shutting_down)
297 		return;
298 
299 	microuptime(&t);
300 	delta = (t.tv_usec - prev_t.tv_usec) +
301 		(t.tv_sec - prev_t.tv_sec)*1000000;
302 	if (delta * hz < 500000)
303 		short_ticks++;
304 	else
305 		prev_t = t;
306 
307 	if (pending_polls > 100) {
308 		/*
309 		 * Too much, assume it has stalled (not always true
310 		 * see comment above).
311 		 */
312 		stalled++;
313 		pending_polls = 0;
314 		phase = 0;
315 	}
316 
317 	if (phase <= 2) {
318 		if (phase != 0)
319 			suspect++;
320 		phase = 1;
321 		netisr_poll_scheduled = 1;
322 		netisr_pollmore_scheduled = 1;
323 		netisr_sched_poll();
324 		phase = 2;
325 	}
326 	if (pending_polls++ > 0)
327 		lost_polls++;
328 }
329 
330 /*
331  * ether_poll is called from the idle loop.
332  */
333 static void
334 ether_poll(int count)
335 {
336 	struct epoch_tracker et;
337 	int i;
338 
339 	mtx_lock(&poll_mtx);
340 
341 	if (count > poll_each_burst)
342 		count = poll_each_burst;
343 
344 	NET_EPOCH_ENTER(et);
345 	for (i = 0 ; i < poll_handlers ; i++)
346 		pr[i].handler(pr[i].ifp, POLL_ONLY, count);
347 	NET_EPOCH_EXIT(et);
348 
349 	mtx_unlock(&poll_mtx);
350 }
351 
352 /*
353  * netisr_pollmore is called after other netisr's, possibly scheduling
354  * another NETISR_POLL call, or adapting the burst size for the next cycle.
355  *
356  * It is very bad to fetch large bursts of packets from a single card at once,
357  * because the burst could take a long time to be completely processed, or
358  * could saturate the intermediate queue (ipintrq or similar) leading to
359  * losses or unfairness. To reduce the problem, and also to account better for
360  * time spent in network-related processing, we split the burst in smaller
361  * chunks of fixed size, giving control to the other netisr's between chunks.
362  * This helps in improving the fairness, reducing livelock (because we
363  * emulate more closely the "process to completion" that we have with
364  * fastforwarding) and accounting for the work performed in low level
365  * handling and forwarding.
366  */
367 
368 static struct timeval poll_start_t;
369 
370 void
371 netisr_pollmore()
372 {
373 	struct timeval t;
374 	int kern_load;
375 
376 	if (poll_handlers == 0)
377 		return;
378 
379 	mtx_lock(&poll_mtx);
380 	if (!netisr_pollmore_scheduled) {
381 		mtx_unlock(&poll_mtx);
382 		return;
383 	}
384 	netisr_pollmore_scheduled = 0;
385 	phase = 5;
386 	if (residual_burst > 0) {
387 		netisr_poll_scheduled = 1;
388 		netisr_pollmore_scheduled = 1;
389 		netisr_sched_poll();
390 		mtx_unlock(&poll_mtx);
391 		/* will run immediately on return, followed by netisrs */
392 		return;
393 	}
394 	/* here we can account time spent in netisr's in this tick */
395 	microuptime(&t);
396 	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
397 		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
398 	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
399 	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
400 		if (poll_burst > 1)
401 			poll_burst--;
402 	} else {
403 		if (poll_burst < poll_burst_max)
404 			poll_burst++;
405 	}
406 
407 	pending_polls--;
408 	if (pending_polls == 0) /* we are done */
409 		phase = 0;
410 	else {
411 		/*
412 		 * Last cycle was long and caused us to miss one or more
413 		 * hardclock ticks. Restart processing again, but slightly
414 		 * reduce the burst size to prevent that this happens again.
415 		 */
416 		poll_burst -= (poll_burst / 8);
417 		if (poll_burst < 1)
418 			poll_burst = 1;
419 		netisr_poll_scheduled = 1;
420 		netisr_pollmore_scheduled = 1;
421 		netisr_sched_poll();
422 		phase = 6;
423 	}
424 	mtx_unlock(&poll_mtx);
425 }
426 
427 /*
428  * netisr_poll is typically scheduled once per tick.
429  */
430 void
431 netisr_poll(void)
432 {
433 	int i, cycles;
434 	enum poll_cmd arg = POLL_ONLY;
435 
436 	NET_EPOCH_ASSERT();
437 
438 	if (poll_handlers == 0)
439 		return;
440 
441 	mtx_lock(&poll_mtx);
442 	if (!netisr_poll_scheduled) {
443 		mtx_unlock(&poll_mtx);
444 		return;
445 	}
446 	netisr_poll_scheduled = 0;
447 	phase = 3;
448 	if (residual_burst == 0) { /* first call in this tick */
449 		microuptime(&poll_start_t);
450 		if (++reg_frac_count == reg_frac) {
451 			arg = POLL_AND_CHECK_STATUS;
452 			reg_frac_count = 0;
453 		}
454 
455 		residual_burst = poll_burst;
456 	}
457 	cycles = (residual_burst < poll_each_burst) ?
458 		residual_burst : poll_each_burst;
459 	residual_burst -= cycles;
460 
461 	for (i = 0 ; i < poll_handlers ; i++)
462 		pr[i].handler(pr[i].ifp, arg, cycles);
463 
464 	phase = 4;
465 	mtx_unlock(&poll_mtx);
466 }
467 
468 /*
469  * Try to register routine for polling. Returns 0 if successful
470  * (and polling should be enabled), error code otherwise.
471  * A device is not supposed to register itself multiple times.
472  *
473  * This is called from within the *_ioctl() functions.
474  */
475 int
476 ether_poll_register(poll_handler_t *h, if_t ifp)
477 {
478 	int i;
479 
480 	KASSERT(h != NULL, ("%s: handler is NULL", __func__));
481 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
482 
483 	mtx_lock(&poll_mtx);
484 	if (poll_handlers >= POLL_LIST_LEN) {
485 		/*
486 		 * List full, cannot register more entries.
487 		 * This should never happen; if it does, it is probably a
488 		 * broken driver trying to register multiple times. Checking
489 		 * this at runtime is expensive, and won't solve the problem
490 		 * anyways, so just report a few times and then give up.
491 		 */
492 		static int verbose = 10 ;
493 		if (verbose >0) {
494 			log(LOG_ERR, "poll handlers list full, "
495 			    "maybe a broken driver ?\n");
496 			verbose--;
497 		}
498 		mtx_unlock(&poll_mtx);
499 		return (ENOMEM); /* no polling for you */
500 	}
501 
502 	for (i = 0 ; i < poll_handlers ; i++)
503 		if (pr[i].ifp == ifp && pr[i].handler != NULL) {
504 			mtx_unlock(&poll_mtx);
505 			log(LOG_DEBUG, "ether_poll_register: %s: handler"
506 			    " already registered\n", ifp->if_xname);
507 			return (EEXIST);
508 		}
509 
510 	pr[poll_handlers].handler = h;
511 	pr[poll_handlers].ifp = ifp;
512 	poll_handlers++;
513 	mtx_unlock(&poll_mtx);
514 	if (idlepoll_sleeping)
515 		wakeup(&idlepoll_sleeping);
516 	return (0);
517 }
518 
519 /*
520  * Remove interface from the polling list. Called from *_ioctl(), too.
521  */
522 int
523 ether_poll_deregister(if_t ifp)
524 {
525 	int i;
526 
527 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
528 
529 	mtx_lock(&poll_mtx);
530 
531 	for (i = 0 ; i < poll_handlers ; i++)
532 		if (pr[i].ifp == ifp) /* found it */
533 			break;
534 	if (i == poll_handlers) {
535 		log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
536 		    ifp->if_xname);
537 		mtx_unlock(&poll_mtx);
538 		return (ENOENT);
539 	}
540 	poll_handlers--;
541 	if (i < poll_handlers) { /* Last entry replaces this one. */
542 		pr[i].handler = pr[poll_handlers].handler;
543 		pr[i].ifp = pr[poll_handlers].ifp;
544 	}
545 	mtx_unlock(&poll_mtx);
546 	return (0);
547 }
548 
549 static void
550 poll_idle(void)
551 {
552 	struct thread *td = curthread;
553 	struct rtprio rtp;
554 
555 	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
556 	rtp.type = RTP_PRIO_IDLE;
557 	PROC_SLOCK(td->td_proc);
558 	rtp_to_pri(&rtp, td);
559 	PROC_SUNLOCK(td->td_proc);
560 
561 	for (;;) {
562 		if (poll_in_idle_loop && poll_handlers > 0) {
563 			idlepoll_sleeping = 0;
564 			ether_poll(poll_each_burst);
565 			thread_lock(td);
566 			mi_switch(SW_VOL);
567 		} else {
568 			idlepoll_sleeping = 1;
569 			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
570 		}
571 	}
572 }
573 
574 static struct proc *idlepoll;
575 static struct kproc_desc idlepoll_kp = {
576 	 "idlepoll",
577 	 poll_idle,
578 	 &idlepoll
579 };
580 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,
581     &idlepoll_kp);
582