xref: /freebsd/sys/kern/kern_poll.c (revision 6829dae12bb055451fa467da4589c43bd03b1e64)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2001-2002 Luigi Rizzo
5  *
6  * Supported by: the Xorp Project (www.xorp.org)
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_device_polling.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/proc.h>
40 #include <sys/eventhandler.h>
41 #include <sys/resourcevar.h>
42 #include <sys/socket.h>			/* needed by net/if.h		*/
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/syslog.h>
46 
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/netisr.h>			/* for NETISR_POLL		*/
50 #include <net/vnet.h>
51 
52 void hardclock_device_poll(void);	/* hook from hardclock		*/
53 
54 static struct mtx	poll_mtx;
55 
56 /*
57  * Polling support for [network] device drivers.
58  *
59  * Drivers which support this feature can register with the
60  * polling code.
61  *
62  * If registration is successful, the driver must disable interrupts,
63  * and further I/O is performed through the handler, which is invoked
64  * (at least once per clock tick) with 3 arguments: the "arg" passed at
65  * register time (a struct ifnet pointer), a command, and a "count" limit.
66  *
67  * The command can be one of the following:
68  *  POLL_ONLY: quick move of "count" packets from input/output queues.
69  *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
70  *	other more expensive operations. This command is issued periodically
71  *	but less frequently than POLL_ONLY.
72  *
73  * The count limit specifies how much work the handler can do during the
74  * call -- typically this is the number of packets to be received, or
75  * transmitted, etc. (drivers are free to interpret this number, as long
76  * as the max time spent in the function grows roughly linearly with the
77  * count).
78  *
79  * Polling is enabled and disabled via setting IFCAP_POLLING flag on
80  * the interface. The driver ioctl handler should register interface
81  * with polling and disable interrupts, if registration was successful.
82  *
83  * A second variable controls the sharing of CPU between polling/kernel
84  * network processing, and other activities (typically userlevel tasks):
85  * kern.polling.user_frac (between 0 and 100, default 50) sets the share
86  * of CPU allocated to user tasks. CPU is allocated proportionally to the
87  * shares, by dynamically adjusting the "count" (poll_burst).
88  *
89  * Other parameters can should be left to their default values.
90  * The following constraints hold
91  *
92  *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
93  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
94  */
95 
96 #define MIN_POLL_BURST_MAX	10
97 #define MAX_POLL_BURST_MAX	20000
98 
99 static uint32_t poll_burst = 5;
100 static uint32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
101 static uint32_t poll_each_burst = 5;
102 
103 static SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
104 	"Device polling parameters");
105 
106 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
107 	&poll_burst, 0, "Current polling burst size");
108 
109 static int	netisr_poll_scheduled;
110 static int	netisr_pollmore_scheduled;
111 static int	poll_shutting_down;
112 
113 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
114 {
115 	uint32_t val = poll_burst_max;
116 	int error;
117 
118 	error = sysctl_handle_int(oidp, &val, 0, req);
119 	if (error || !req->newptr )
120 		return (error);
121 	if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
122 		return (EINVAL);
123 
124 	mtx_lock(&poll_mtx);
125 	poll_burst_max = val;
126 	if (poll_burst > poll_burst_max)
127 		poll_burst = poll_burst_max;
128 	if (poll_each_burst > poll_burst_max)
129 		poll_each_burst = MIN_POLL_BURST_MAX;
130 	mtx_unlock(&poll_mtx);
131 
132 	return (0);
133 }
134 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW,
135 	0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size");
136 
137 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
138 {
139 	uint32_t val = poll_each_burst;
140 	int error;
141 
142 	error = sysctl_handle_int(oidp, &val, 0, req);
143 	if (error || !req->newptr )
144 		return (error);
145 	if (val < 1)
146 		return (EINVAL);
147 
148 	mtx_lock(&poll_mtx);
149 	if (val > poll_burst_max) {
150 		mtx_unlock(&poll_mtx);
151 		return (EINVAL);
152 	}
153 	poll_each_burst = val;
154 	mtx_unlock(&poll_mtx);
155 
156 	return (0);
157 }
158 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW,
159 	0, sizeof(uint32_t), poll_each_burst_sysctl, "I",
160 	"Max size of each burst");
161 
162 static uint32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
163 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
164 	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
165 
166 static uint32_t user_frac = 50;
167 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
168 {
169 	uint32_t val = user_frac;
170 	int error;
171 
172 	error = sysctl_handle_int(oidp, &val, 0, req);
173 	if (error || !req->newptr )
174 		return (error);
175 	if (val > 99)
176 		return (EINVAL);
177 
178 	mtx_lock(&poll_mtx);
179 	user_frac = val;
180 	mtx_unlock(&poll_mtx);
181 
182 	return (0);
183 }
184 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW,
185 	0, sizeof(uint32_t), user_frac_sysctl, "I",
186 	"Desired user fraction of cpu time");
187 
188 static uint32_t reg_frac_count = 0;
189 static uint32_t reg_frac = 20 ;
190 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
191 {
192 	uint32_t val = reg_frac;
193 	int error;
194 
195 	error = sysctl_handle_int(oidp, &val, 0, req);
196 	if (error || !req->newptr )
197 		return (error);
198 	if (val < 1 || val > hz)
199 		return (EINVAL);
200 
201 	mtx_lock(&poll_mtx);
202 	reg_frac = val;
203 	if (reg_frac_count >= reg_frac)
204 		reg_frac_count = 0;
205 	mtx_unlock(&poll_mtx);
206 
207 	return (0);
208 }
209 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW,
210 	0, sizeof(uint32_t), reg_frac_sysctl, "I",
211 	"Every this many cycles check registers");
212 
213 static uint32_t short_ticks;
214 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
215 	&short_ticks, 0, "Hardclock ticks shorter than they should be");
216 
217 static uint32_t lost_polls;
218 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
219 	&lost_polls, 0, "How many times we would have lost a poll tick");
220 
221 static uint32_t pending_polls;
222 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
223 	&pending_polls, 0, "Do we need to poll again");
224 
225 static int residual_burst = 0;
226 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
227 	&residual_burst, 0, "# of residual cycles in burst");
228 
229 static uint32_t poll_handlers; /* next free entry in pr[]. */
230 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
231 	&poll_handlers, 0, "Number of registered poll handlers");
232 
233 static uint32_t phase;
234 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
235 	&phase, 0, "Polling phase");
236 
237 static uint32_t suspect;
238 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
239 	&suspect, 0, "suspect event");
240 
241 static uint32_t stalled;
242 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
243 	&stalled, 0, "potential stalls");
244 
245 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
246 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
247 	&idlepoll_sleeping, 0, "idlepoll is sleeping");
248 
249 
250 #define POLL_LIST_LEN  128
251 struct pollrec {
252 	poll_handler_t	*handler;
253 	struct ifnet	*ifp;
254 };
255 
256 static struct pollrec pr[POLL_LIST_LEN];
257 
258 static void
259 poll_shutdown(void *arg, int howto)
260 {
261 
262 	poll_shutting_down = 1;
263 }
264 
265 static void
266 init_device_poll(void)
267 {
268 
269 	mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
270 	EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL,
271 	    SHUTDOWN_PRI_LAST);
272 }
273 SYSINIT(device_poll, SI_SUB_SOFTINTR, SI_ORDER_MIDDLE, init_device_poll, NULL);
274 
275 
276 /*
277  * Hook from hardclock. Tries to schedule a netisr, but keeps track
278  * of lost ticks due to the previous handler taking too long.
279  * Normally, this should not happen, because polling handler should
280  * run for a short time. However, in some cases (e.g. when there are
281  * changes in link status etc.) the drivers take a very long time
282  * (even in the order of milliseconds) to reset and reconfigure the
283  * device, causing apparent lost polls.
284  *
285  * The first part of the code is just for debugging purposes, and tries
286  * to count how often hardclock ticks are shorter than they should,
287  * meaning either stray interrupts or delayed events.
288  */
289 void
290 hardclock_device_poll(void)
291 {
292 	static struct timeval prev_t, t;
293 	int delta;
294 
295 	if (poll_handlers == 0 || poll_shutting_down)
296 		return;
297 
298 	microuptime(&t);
299 	delta = (t.tv_usec - prev_t.tv_usec) +
300 		(t.tv_sec - prev_t.tv_sec)*1000000;
301 	if (delta * hz < 500000)
302 		short_ticks++;
303 	else
304 		prev_t = t;
305 
306 	if (pending_polls > 100) {
307 		/*
308 		 * Too much, assume it has stalled (not always true
309 		 * see comment above).
310 		 */
311 		stalled++;
312 		pending_polls = 0;
313 		phase = 0;
314 	}
315 
316 	if (phase <= 2) {
317 		if (phase != 0)
318 			suspect++;
319 		phase = 1;
320 		netisr_poll_scheduled = 1;
321 		netisr_pollmore_scheduled = 1;
322 		netisr_sched_poll();
323 		phase = 2;
324 	}
325 	if (pending_polls++ > 0)
326 		lost_polls++;
327 }
328 
329 /*
330  * ether_poll is called from the idle loop.
331  */
332 static void
333 ether_poll(int count)
334 {
335 	int i;
336 
337 	mtx_lock(&poll_mtx);
338 
339 	if (count > poll_each_burst)
340 		count = poll_each_burst;
341 
342 	for (i = 0 ; i < poll_handlers ; i++)
343 		pr[i].handler(pr[i].ifp, POLL_ONLY, count);
344 
345 	mtx_unlock(&poll_mtx);
346 }
347 
348 /*
349  * netisr_pollmore is called after other netisr's, possibly scheduling
350  * another NETISR_POLL call, or adapting the burst size for the next cycle.
351  *
352  * It is very bad to fetch large bursts of packets from a single card at once,
353  * because the burst could take a long time to be completely processed, or
354  * could saturate the intermediate queue (ipintrq or similar) leading to
355  * losses or unfairness. To reduce the problem, and also to account better for
356  * time spent in network-related processing, we split the burst in smaller
357  * chunks of fixed size, giving control to the other netisr's between chunks.
358  * This helps in improving the fairness, reducing livelock (because we
359  * emulate more closely the "process to completion" that we have with
360  * fastforwarding) and accounting for the work performed in low level
361  * handling and forwarding.
362  */
363 
364 static struct timeval poll_start_t;
365 
366 void
367 netisr_pollmore()
368 {
369 	struct timeval t;
370 	int kern_load;
371 
372 	if (poll_handlers == 0)
373 		return;
374 
375 	mtx_lock(&poll_mtx);
376 	if (!netisr_pollmore_scheduled) {
377 		mtx_unlock(&poll_mtx);
378 		return;
379 	}
380 	netisr_pollmore_scheduled = 0;
381 	phase = 5;
382 	if (residual_burst > 0) {
383 		netisr_poll_scheduled = 1;
384 		netisr_pollmore_scheduled = 1;
385 		netisr_sched_poll();
386 		mtx_unlock(&poll_mtx);
387 		/* will run immediately on return, followed by netisrs */
388 		return;
389 	}
390 	/* here we can account time spent in netisr's in this tick */
391 	microuptime(&t);
392 	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
393 		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
394 	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
395 	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
396 		if (poll_burst > 1)
397 			poll_burst--;
398 	} else {
399 		if (poll_burst < poll_burst_max)
400 			poll_burst++;
401 	}
402 
403 	pending_polls--;
404 	if (pending_polls == 0) /* we are done */
405 		phase = 0;
406 	else {
407 		/*
408 		 * Last cycle was long and caused us to miss one or more
409 		 * hardclock ticks. Restart processing again, but slightly
410 		 * reduce the burst size to prevent that this happens again.
411 		 */
412 		poll_burst -= (poll_burst / 8);
413 		if (poll_burst < 1)
414 			poll_burst = 1;
415 		netisr_poll_scheduled = 1;
416 		netisr_pollmore_scheduled = 1;
417 		netisr_sched_poll();
418 		phase = 6;
419 	}
420 	mtx_unlock(&poll_mtx);
421 }
422 
423 /*
424  * netisr_poll is typically scheduled once per tick.
425  */
426 void
427 netisr_poll(void)
428 {
429 	int i, cycles;
430 	enum poll_cmd arg = POLL_ONLY;
431 
432 	if (poll_handlers == 0)
433 		return;
434 
435 	mtx_lock(&poll_mtx);
436 	if (!netisr_poll_scheduled) {
437 		mtx_unlock(&poll_mtx);
438 		return;
439 	}
440 	netisr_poll_scheduled = 0;
441 	phase = 3;
442 	if (residual_burst == 0) { /* first call in this tick */
443 		microuptime(&poll_start_t);
444 		if (++reg_frac_count == reg_frac) {
445 			arg = POLL_AND_CHECK_STATUS;
446 			reg_frac_count = 0;
447 		}
448 
449 		residual_burst = poll_burst;
450 	}
451 	cycles = (residual_burst < poll_each_burst) ?
452 		residual_burst : poll_each_burst;
453 	residual_burst -= cycles;
454 
455 	for (i = 0 ; i < poll_handlers ; i++)
456 		pr[i].handler(pr[i].ifp, arg, cycles);
457 
458 	phase = 4;
459 	mtx_unlock(&poll_mtx);
460 }
461 
462 /*
463  * Try to register routine for polling. Returns 0 if successful
464  * (and polling should be enabled), error code otherwise.
465  * A device is not supposed to register itself multiple times.
466  *
467  * This is called from within the *_ioctl() functions.
468  */
469 int
470 ether_poll_register(poll_handler_t *h, if_t ifp)
471 {
472 	int i;
473 
474 	KASSERT(h != NULL, ("%s: handler is NULL", __func__));
475 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
476 
477 	mtx_lock(&poll_mtx);
478 	if (poll_handlers >= POLL_LIST_LEN) {
479 		/*
480 		 * List full, cannot register more entries.
481 		 * This should never happen; if it does, it is probably a
482 		 * broken driver trying to register multiple times. Checking
483 		 * this at runtime is expensive, and won't solve the problem
484 		 * anyways, so just report a few times and then give up.
485 		 */
486 		static int verbose = 10 ;
487 		if (verbose >0) {
488 			log(LOG_ERR, "poll handlers list full, "
489 			    "maybe a broken driver ?\n");
490 			verbose--;
491 		}
492 		mtx_unlock(&poll_mtx);
493 		return (ENOMEM); /* no polling for you */
494 	}
495 
496 	for (i = 0 ; i < poll_handlers ; i++)
497 		if (pr[i].ifp == ifp && pr[i].handler != NULL) {
498 			mtx_unlock(&poll_mtx);
499 			log(LOG_DEBUG, "ether_poll_register: %s: handler"
500 			    " already registered\n", ifp->if_xname);
501 			return (EEXIST);
502 		}
503 
504 	pr[poll_handlers].handler = h;
505 	pr[poll_handlers].ifp = ifp;
506 	poll_handlers++;
507 	mtx_unlock(&poll_mtx);
508 	if (idlepoll_sleeping)
509 		wakeup(&idlepoll_sleeping);
510 	return (0);
511 }
512 
513 /*
514  * Remove interface from the polling list. Called from *_ioctl(), too.
515  */
516 int
517 ether_poll_deregister(if_t ifp)
518 {
519 	int i;
520 
521 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
522 
523 	mtx_lock(&poll_mtx);
524 
525 	for (i = 0 ; i < poll_handlers ; i++)
526 		if (pr[i].ifp == ifp) /* found it */
527 			break;
528 	if (i == poll_handlers) {
529 		log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
530 		    ifp->if_xname);
531 		mtx_unlock(&poll_mtx);
532 		return (ENOENT);
533 	}
534 	poll_handlers--;
535 	if (i < poll_handlers) { /* Last entry replaces this one. */
536 		pr[i].handler = pr[poll_handlers].handler;
537 		pr[i].ifp = pr[poll_handlers].ifp;
538 	}
539 	mtx_unlock(&poll_mtx);
540 	return (0);
541 }
542 
543 static void
544 poll_idle(void)
545 {
546 	struct thread *td = curthread;
547 	struct rtprio rtp;
548 
549 	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
550 	rtp.type = RTP_PRIO_IDLE;
551 	PROC_SLOCK(td->td_proc);
552 	rtp_to_pri(&rtp, td);
553 	PROC_SUNLOCK(td->td_proc);
554 
555 	for (;;) {
556 		if (poll_in_idle_loop && poll_handlers > 0) {
557 			idlepoll_sleeping = 0;
558 			ether_poll(poll_each_burst);
559 			thread_lock(td);
560 			mi_switch(SW_VOL, NULL);
561 			thread_unlock(td);
562 		} else {
563 			idlepoll_sleeping = 1;
564 			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
565 		}
566 	}
567 }
568 
569 static struct proc *idlepoll;
570 static struct kproc_desc idlepoll_kp = {
571 	 "idlepoll",
572 	 poll_idle,
573 	 &idlepoll
574 };
575 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,
576     &idlepoll_kp);
577