xref: /freebsd/sys/kern/kern_poll.c (revision 3c4ba5f55438f7afd4f4b0b56f88f2bb505fd6a6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2001-2002 Luigi Rizzo
5  *
6  * Supported by: the Xorp Project (www.xorp.org)
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_device_polling.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/proc.h>
40 #include <sys/epoch.h>
41 #include <sys/eventhandler.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sched.h>
44 #include <sys/socket.h>			/* needed by net/if.h		*/
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/syslog.h>
48 
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/netisr.h>			/* for NETISR_POLL		*/
52 #include <net/vnet.h>
53 
54 void hardclock_device_poll(void);	/* hook from hardclock		*/
55 
56 static struct mtx	poll_mtx;
57 
58 /*
59  * Polling support for [network] device drivers.
60  *
61  * Drivers which support this feature can register with the
62  * polling code.
63  *
64  * If registration is successful, the driver must disable interrupts,
65  * and further I/O is performed through the handler, which is invoked
66  * (at least once per clock tick) with 3 arguments: the "arg" passed at
67  * register time (a struct ifnet pointer), a command, and a "count" limit.
68  *
69  * The command can be one of the following:
70  *  POLL_ONLY: quick move of "count" packets from input/output queues.
71  *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
72  *	other more expensive operations. This command is issued periodically
73  *	but less frequently than POLL_ONLY.
74  *
75  * The count limit specifies how much work the handler can do during the
76  * call -- typically this is the number of packets to be received, or
77  * transmitted, etc. (drivers are free to interpret this number, as long
78  * as the max time spent in the function grows roughly linearly with the
79  * count).
80  *
81  * Polling is enabled and disabled via setting IFCAP_POLLING flag on
82  * the interface. The driver ioctl handler should register interface
83  * with polling and disable interrupts, if registration was successful.
84  *
85  * A second variable controls the sharing of CPU between polling/kernel
86  * network processing, and other activities (typically userlevel tasks):
87  * kern.polling.user_frac (between 0 and 100, default 50) sets the share
88  * of CPU allocated to user tasks. CPU is allocated proportionally to the
89  * shares, by dynamically adjusting the "count" (poll_burst).
90  *
91  * Other parameters can should be left to their default values.
92  * The following constraints hold
93  *
94  *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
95  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
96  */
97 
98 #define MIN_POLL_BURST_MAX	10
99 #define MAX_POLL_BURST_MAX	20000
100 
101 static uint32_t poll_burst = 5;
102 static uint32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
103 static uint32_t poll_each_burst = 5;
104 
105 static SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
106     "Device polling parameters");
107 
108 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
109 	&poll_burst, 0, "Current polling burst size");
110 
111 static int	netisr_poll_scheduled;
112 static int	netisr_pollmore_scheduled;
113 static int	poll_shutting_down;
114 
115 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
116 {
117 	uint32_t val = poll_burst_max;
118 	int error;
119 
120 	error = sysctl_handle_int(oidp, &val, 0, req);
121 	if (error || !req->newptr )
122 		return (error);
123 	if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
124 		return (EINVAL);
125 
126 	mtx_lock(&poll_mtx);
127 	poll_burst_max = val;
128 	if (poll_burst > poll_burst_max)
129 		poll_burst = poll_burst_max;
130 	if (poll_each_burst > poll_burst_max)
131 		poll_each_burst = MIN_POLL_BURST_MAX;
132 	mtx_unlock(&poll_mtx);
133 
134 	return (0);
135 }
136 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max,
137     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
138     poll_burst_max_sysctl, "I",
139     "Max Polling burst size");
140 
141 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
142 {
143 	uint32_t val = poll_each_burst;
144 	int error;
145 
146 	error = sysctl_handle_int(oidp, &val, 0, req);
147 	if (error || !req->newptr )
148 		return (error);
149 	if (val < 1)
150 		return (EINVAL);
151 
152 	mtx_lock(&poll_mtx);
153 	if (val > poll_burst_max) {
154 		mtx_unlock(&poll_mtx);
155 		return (EINVAL);
156 	}
157 	poll_each_burst = val;
158 	mtx_unlock(&poll_mtx);
159 
160 	return (0);
161 }
162 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst,
163     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
164     poll_each_burst_sysctl, "I",
165     "Max size of each burst");
166 
167 static uint32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
168 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
169 	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
170 
171 static uint32_t user_frac = 50;
172 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
173 {
174 	uint32_t val = user_frac;
175 	int error;
176 
177 	error = sysctl_handle_int(oidp, &val, 0, req);
178 	if (error || !req->newptr )
179 		return (error);
180 	if (val > 99)
181 		return (EINVAL);
182 
183 	mtx_lock(&poll_mtx);
184 	user_frac = val;
185 	mtx_unlock(&poll_mtx);
186 
187 	return (0);
188 }
189 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac,
190     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
191     user_frac_sysctl, "I",
192     "Desired user fraction of cpu time");
193 
194 static uint32_t reg_frac_count = 0;
195 static uint32_t reg_frac = 20 ;
196 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
197 {
198 	uint32_t val = reg_frac;
199 	int error;
200 
201 	error = sysctl_handle_int(oidp, &val, 0, req);
202 	if (error || !req->newptr )
203 		return (error);
204 	if (val < 1 || val > hz)
205 		return (EINVAL);
206 
207 	mtx_lock(&poll_mtx);
208 	reg_frac = val;
209 	if (reg_frac_count >= reg_frac)
210 		reg_frac_count = 0;
211 	mtx_unlock(&poll_mtx);
212 
213 	return (0);
214 }
215 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac,
216     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
217     reg_frac_sysctl, "I",
218     "Every this many cycles check registers");
219 
220 static uint32_t short_ticks;
221 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
222 	&short_ticks, 0, "Hardclock ticks shorter than they should be");
223 
224 static uint32_t lost_polls;
225 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
226 	&lost_polls, 0, "How many times we would have lost a poll tick");
227 
228 static uint32_t pending_polls;
229 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
230 	&pending_polls, 0, "Do we need to poll again");
231 
232 static int residual_burst = 0;
233 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
234 	&residual_burst, 0, "# of residual cycles in burst");
235 
236 static uint32_t poll_handlers; /* next free entry in pr[]. */
237 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
238 	&poll_handlers, 0, "Number of registered poll handlers");
239 
240 static uint32_t phase;
241 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
242 	&phase, 0, "Polling phase");
243 
244 static uint32_t suspect;
245 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
246 	&suspect, 0, "suspect event");
247 
248 static uint32_t stalled;
249 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
250 	&stalled, 0, "potential stalls");
251 
252 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
253 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
254 	&idlepoll_sleeping, 0, "idlepoll is sleeping");
255 
256 #define POLL_LIST_LEN  128
257 struct pollrec {
258 	poll_handler_t	*handler;
259 	struct ifnet	*ifp;
260 };
261 
262 static struct pollrec pr[POLL_LIST_LEN];
263 
264 static void
265 poll_shutdown(void *arg, int howto)
266 {
267 
268 	poll_shutting_down = 1;
269 }
270 
271 static void
272 init_device_poll(void)
273 {
274 
275 	mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
276 	EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL,
277 	    SHUTDOWN_PRI_LAST);
278 }
279 SYSINIT(device_poll, SI_SUB_SOFTINTR, SI_ORDER_MIDDLE, init_device_poll, NULL);
280 
281 /*
282  * Hook from hardclock. Tries to schedule a netisr, but keeps track
283  * of lost ticks due to the previous handler taking too long.
284  * Normally, this should not happen, because polling handler should
285  * run for a short time. However, in some cases (e.g. when there are
286  * changes in link status etc.) the drivers take a very long time
287  * (even in the order of milliseconds) to reset and reconfigure the
288  * device, causing apparent lost polls.
289  *
290  * The first part of the code is just for debugging purposes, and tries
291  * to count how often hardclock ticks are shorter than they should,
292  * meaning either stray interrupts or delayed events.
293  */
294 void
295 hardclock_device_poll(void)
296 {
297 	static struct timeval prev_t, t;
298 	int delta;
299 
300 	if (poll_handlers == 0 || poll_shutting_down)
301 		return;
302 
303 	microuptime(&t);
304 	delta = (t.tv_usec - prev_t.tv_usec) +
305 		(t.tv_sec - prev_t.tv_sec)*1000000;
306 	if (delta * hz < 500000)
307 		short_ticks++;
308 	else
309 		prev_t = t;
310 
311 	if (pending_polls > 100) {
312 		/*
313 		 * Too much, assume it has stalled (not always true
314 		 * see comment above).
315 		 */
316 		stalled++;
317 		pending_polls = 0;
318 		phase = 0;
319 	}
320 
321 	if (phase <= 2) {
322 		if (phase != 0)
323 			suspect++;
324 		phase = 1;
325 		netisr_poll_scheduled = 1;
326 		netisr_pollmore_scheduled = 1;
327 		netisr_sched_poll();
328 		phase = 2;
329 	}
330 	if (pending_polls++ > 0)
331 		lost_polls++;
332 }
333 
334 /*
335  * ether_poll is called from the idle loop.
336  */
337 static void
338 ether_poll(int count)
339 {
340 	struct epoch_tracker et;
341 	int i;
342 
343 	mtx_lock(&poll_mtx);
344 
345 	if (count > poll_each_burst)
346 		count = poll_each_burst;
347 
348 	NET_EPOCH_ENTER(et);
349 	for (i = 0 ; i < poll_handlers ; i++)
350 		pr[i].handler(pr[i].ifp, POLL_ONLY, count);
351 	NET_EPOCH_EXIT(et);
352 
353 	mtx_unlock(&poll_mtx);
354 }
355 
356 /*
357  * netisr_pollmore is called after other netisr's, possibly scheduling
358  * another NETISR_POLL call, or adapting the burst size for the next cycle.
359  *
360  * It is very bad to fetch large bursts of packets from a single card at once,
361  * because the burst could take a long time to be completely processed, or
362  * could saturate the intermediate queue (ipintrq or similar) leading to
363  * losses or unfairness. To reduce the problem, and also to account better for
364  * time spent in network-related processing, we split the burst in smaller
365  * chunks of fixed size, giving control to the other netisr's between chunks.
366  * This helps in improving the fairness, reducing livelock (because we
367  * emulate more closely the "process to completion" that we have with
368  * fastforwarding) and accounting for the work performed in low level
369  * handling and forwarding.
370  */
371 
372 static struct timeval poll_start_t;
373 
374 void
375 netisr_pollmore(void)
376 {
377 	struct timeval t;
378 	int kern_load;
379 
380 	if (poll_handlers == 0)
381 		return;
382 
383 	mtx_lock(&poll_mtx);
384 	if (!netisr_pollmore_scheduled) {
385 		mtx_unlock(&poll_mtx);
386 		return;
387 	}
388 	netisr_pollmore_scheduled = 0;
389 	phase = 5;
390 	if (residual_burst > 0) {
391 		netisr_poll_scheduled = 1;
392 		netisr_pollmore_scheduled = 1;
393 		netisr_sched_poll();
394 		mtx_unlock(&poll_mtx);
395 		/* will run immediately on return, followed by netisrs */
396 		return;
397 	}
398 	/* here we can account time spent in netisr's in this tick */
399 	microuptime(&t);
400 	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
401 		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
402 	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
403 	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
404 		if (poll_burst > 1)
405 			poll_burst--;
406 	} else {
407 		if (poll_burst < poll_burst_max)
408 			poll_burst++;
409 	}
410 
411 	pending_polls--;
412 	if (pending_polls == 0) /* we are done */
413 		phase = 0;
414 	else {
415 		/*
416 		 * Last cycle was long and caused us to miss one or more
417 		 * hardclock ticks. Restart processing again, but slightly
418 		 * reduce the burst size to prevent that this happens again.
419 		 */
420 		poll_burst -= (poll_burst / 8);
421 		if (poll_burst < 1)
422 			poll_burst = 1;
423 		netisr_poll_scheduled = 1;
424 		netisr_pollmore_scheduled = 1;
425 		netisr_sched_poll();
426 		phase = 6;
427 	}
428 	mtx_unlock(&poll_mtx);
429 }
430 
431 /*
432  * netisr_poll is typically scheduled once per tick.
433  */
434 void
435 netisr_poll(void)
436 {
437 	int i, cycles;
438 	enum poll_cmd arg = POLL_ONLY;
439 
440 	NET_EPOCH_ASSERT();
441 
442 	if (poll_handlers == 0)
443 		return;
444 
445 	mtx_lock(&poll_mtx);
446 	if (!netisr_poll_scheduled) {
447 		mtx_unlock(&poll_mtx);
448 		return;
449 	}
450 	netisr_poll_scheduled = 0;
451 	phase = 3;
452 	if (residual_burst == 0) { /* first call in this tick */
453 		microuptime(&poll_start_t);
454 		if (++reg_frac_count == reg_frac) {
455 			arg = POLL_AND_CHECK_STATUS;
456 			reg_frac_count = 0;
457 		}
458 
459 		residual_burst = poll_burst;
460 	}
461 	cycles = (residual_burst < poll_each_burst) ?
462 		residual_burst : poll_each_burst;
463 	residual_burst -= cycles;
464 
465 	for (i = 0 ; i < poll_handlers ; i++)
466 		pr[i].handler(pr[i].ifp, arg, cycles);
467 
468 	phase = 4;
469 	mtx_unlock(&poll_mtx);
470 }
471 
472 /*
473  * Try to register routine for polling. Returns 0 if successful
474  * (and polling should be enabled), error code otherwise.
475  * A device is not supposed to register itself multiple times.
476  *
477  * This is called from within the *_ioctl() functions.
478  */
479 int
480 ether_poll_register(poll_handler_t *h, if_t ifp)
481 {
482 	int i;
483 
484 	KASSERT(h != NULL, ("%s: handler is NULL", __func__));
485 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
486 
487 	mtx_lock(&poll_mtx);
488 	if (poll_handlers >= POLL_LIST_LEN) {
489 		/*
490 		 * List full, cannot register more entries.
491 		 * This should never happen; if it does, it is probably a
492 		 * broken driver trying to register multiple times. Checking
493 		 * this at runtime is expensive, and won't solve the problem
494 		 * anyways, so just report a few times and then give up.
495 		 */
496 		static int verbose = 10 ;
497 		if (verbose >0) {
498 			log(LOG_ERR, "poll handlers list full, "
499 			    "maybe a broken driver ?\n");
500 			verbose--;
501 		}
502 		mtx_unlock(&poll_mtx);
503 		return (ENOMEM); /* no polling for you */
504 	}
505 
506 	for (i = 0 ; i < poll_handlers ; i++)
507 		if (pr[i].ifp == ifp && pr[i].handler != NULL) {
508 			mtx_unlock(&poll_mtx);
509 			log(LOG_DEBUG, "ether_poll_register: %s: handler"
510 			    " already registered\n", if_name(ifp));
511 			return (EEXIST);
512 		}
513 
514 	pr[poll_handlers].handler = h;
515 	pr[poll_handlers].ifp = ifp;
516 	poll_handlers++;
517 	mtx_unlock(&poll_mtx);
518 	if (idlepoll_sleeping)
519 		wakeup(&idlepoll_sleeping);
520 	return (0);
521 }
522 
523 /*
524  * Remove interface from the polling list. Called from *_ioctl(), too.
525  */
526 int
527 ether_poll_deregister(if_t ifp)
528 {
529 	int i;
530 
531 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
532 
533 	mtx_lock(&poll_mtx);
534 
535 	for (i = 0 ; i < poll_handlers ; i++)
536 		if (pr[i].ifp == ifp) /* found it */
537 			break;
538 	if (i == poll_handlers) {
539 		log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
540 		    if_name(ifp));
541 		mtx_unlock(&poll_mtx);
542 		return (ENOENT);
543 	}
544 	poll_handlers--;
545 	if (i < poll_handlers) { /* Last entry replaces this one. */
546 		pr[i].handler = pr[poll_handlers].handler;
547 		pr[i].ifp = pr[poll_handlers].ifp;
548 	}
549 	mtx_unlock(&poll_mtx);
550 	return (0);
551 }
552 
553 static void
554 poll_idle(void)
555 {
556 	struct thread *td = curthread;
557 	struct rtprio rtp;
558 
559 	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
560 	rtp.type = RTP_PRIO_IDLE;
561 	PROC_SLOCK(td->td_proc);
562 	rtp_to_pri(&rtp, td);
563 	PROC_SUNLOCK(td->td_proc);
564 
565 	for (;;) {
566 		if (poll_in_idle_loop && poll_handlers > 0) {
567 			idlepoll_sleeping = 0;
568 			ether_poll(poll_each_burst);
569 			sched_relinquish(td);
570 		} else {
571 			idlepoll_sleeping = 1;
572 			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
573 		}
574 	}
575 }
576 
577 static struct proc *idlepoll;
578 static struct kproc_desc idlepoll_kp = {
579 	 "idlepoll",
580 	 poll_idle,
581 	 &idlepoll
582 };
583 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,
584     &idlepoll_kp);
585