xref: /freebsd/sys/kern/kern_poll.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_device_polling.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/socket.h>			/* needed by net/if.h		*/
37 #include <sys/sockio.h>
38 #include <sys/sysctl.h>
39 #include <sys/syslog.h>
40 
41 #include <net/if.h>			/* for IFF_* flags		*/
42 #include <net/netisr.h>			/* for NETISR_POLL		*/
43 
44 #include <sys/proc.h>
45 #include <sys/resourcevar.h>
46 #include <sys/kthread.h>
47 
48 static void netisr_poll(void);		/* the two netisr handlers      */
49 static void netisr_pollmore(void);
50 static int poll_switch(SYSCTL_HANDLER_ARGS);
51 
52 void hardclock_device_poll(void);	/* hook from hardclock		*/
53 void ether_poll(int);			/* polling in idle loop		*/
54 
55 static struct mtx	poll_mtx;
56 
57 /*
58  * Polling support for [network] device drivers.
59  *
60  * Drivers which support this feature can register with the
61  * polling code.
62  *
63  * If registration is successful, the driver must disable interrupts,
64  * and further I/O is performed through the handler, which is invoked
65  * (at least once per clock tick) with 3 arguments: the "arg" passed at
66  * register time (a struct ifnet pointer), a command, and a "count" limit.
67  *
68  * The command can be one of the following:
69  *  POLL_ONLY: quick move of "count" packets from input/output queues.
70  *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
71  *	other more expensive operations. This command is issued periodically
72  *	but less frequently than POLL_ONLY.
73  *
74  * The count limit specifies how much work the handler can do during the
75  * call -- typically this is the number of packets to be received, or
76  * transmitted, etc. (drivers are free to interpret this number, as long
77  * as the max time spent in the function grows roughly linearly with the
78  * count).
79  *
80  * Polling is enabled and disabled via setting IFCAP_POLLING flag on
81  * the interface. The driver ioctl handler should register interface
82  * with polling and disable interrupts, if registration was successful.
83  *
84  * A second variable controls the sharing of CPU between polling/kernel
85  * network processing, and other activities (typically userlevel tasks):
86  * kern.polling.user_frac (between 0 and 100, default 50) sets the share
87  * of CPU allocated to user tasks. CPU is allocated proportionally to the
88  * shares, by dynamically adjusting the "count" (poll_burst).
89  *
90  * Other parameters can should be left to their default values.
91  * The following constraints hold
92  *
93  *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
94  *	0 <= poll_each_burst
95  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
96  */
97 
98 #define MIN_POLL_BURST_MAX	10
99 #define MAX_POLL_BURST_MAX	1000
100 
101 static uint32_t poll_burst = 5;
102 static uint32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
103 static uint32_t poll_each_burst = 5;
104 
105 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
106 	"Device polling parameters");
107 
108 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
109 	&poll_burst, 0, "Current polling burst size");
110 
111 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
112 {
113 	uint32_t val = poll_burst_max;
114 	int error;
115 
116 	error = sysctl_handle_int(oidp, &val, 0, req);
117 	if (error || !req->newptr )
118 		return (error);
119 	if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
120 		return (EINVAL);
121 
122 	mtx_lock(&poll_mtx);
123 	poll_burst_max = val;
124 	if (poll_burst > poll_burst_max)
125 		poll_burst = poll_burst_max;
126 	if (poll_each_burst > poll_burst_max)
127 		poll_each_burst = MIN_POLL_BURST_MAX;
128 	mtx_unlock(&poll_mtx);
129 
130 	return (0);
131 }
132 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW,
133 	0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size");
134 
135 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
136 {
137 	uint32_t val = poll_each_burst;
138 	int error;
139 
140 	error = sysctl_handle_int(oidp, &val, 0, req);
141 	if (error || !req->newptr )
142 		return (error);
143 	if (val < 1)
144 		return (EINVAL);
145 
146 	mtx_lock(&poll_mtx);
147 	if (val > poll_burst_max) {
148 		mtx_unlock(&poll_mtx);
149 		return (EINVAL);
150 	}
151 	poll_each_burst = val;
152 	mtx_unlock(&poll_mtx);
153 
154 	return (0);
155 }
156 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW,
157 	0, sizeof(uint32_t), poll_each_burst_sysctl, "I",
158 	"Max size of each burst");
159 
160 static uint32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
161 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
162 	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
163 
164 static uint32_t user_frac = 50;
165 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
166 {
167 	uint32_t val = user_frac;
168 	int error;
169 
170 	error = sysctl_handle_int(oidp, &val, 0, req);
171 	if (error || !req->newptr )
172 		return (error);
173 	if (val < 0 || val > 99)
174 		return (EINVAL);
175 
176 	mtx_lock(&poll_mtx);
177 	user_frac = val;
178 	mtx_unlock(&poll_mtx);
179 
180 	return (0);
181 }
182 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW,
183 	0, sizeof(uint32_t), user_frac_sysctl, "I",
184 	"Desired user fraction of cpu time");
185 
186 static uint32_t reg_frac_count = 0;
187 static uint32_t reg_frac = 20 ;
188 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
189 {
190 	uint32_t val = reg_frac;
191 	int error;
192 
193 	error = sysctl_handle_int(oidp, &val, 0, req);
194 	if (error || !req->newptr )
195 		return (error);
196 	if (val < 1 || val > hz)
197 		return (EINVAL);
198 
199 	mtx_lock(&poll_mtx);
200 	reg_frac = val;
201 	if (reg_frac_count >= reg_frac)
202 		reg_frac_count = 0;
203 	mtx_unlock(&poll_mtx);
204 
205 	return (0);
206 }
207 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW,
208 	0, sizeof(uint32_t), reg_frac_sysctl, "I",
209 	"Every this many cycles check registers");
210 
211 static uint32_t short_ticks;
212 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
213 	&short_ticks, 0, "Hardclock ticks shorter than they should be");
214 
215 static uint32_t lost_polls;
216 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
217 	&lost_polls, 0, "How many times we would have lost a poll tick");
218 
219 static uint32_t pending_polls;
220 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
221 	&pending_polls, 0, "Do we need to poll again");
222 
223 static int residual_burst = 0;
224 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
225 	&residual_burst, 0, "# of residual cycles in burst");
226 
227 static uint32_t poll_handlers; /* next free entry in pr[]. */
228 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
229 	&poll_handlers, 0, "Number of registered poll handlers");
230 
231 static int polling = 0;
232 SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW,
233 	0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces");
234 
235 static uint32_t phase;
236 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
237 	&phase, 0, "Polling phase");
238 
239 static uint32_t suspect;
240 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
241 	&suspect, 0, "suspect event");
242 
243 static uint32_t stalled;
244 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
245 	&stalled, 0, "potential stalls");
246 
247 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
248 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
249 	&idlepoll_sleeping, 0, "idlepoll is sleeping");
250 
251 
252 #define POLL_LIST_LEN  128
253 struct pollrec {
254 	poll_handler_t	*handler;
255 	struct ifnet	*ifp;
256 };
257 
258 static struct pollrec pr[POLL_LIST_LEN];
259 
260 static void
261 init_device_poll(void)
262 {
263 
264 	mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
265 	netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL,
266 	    NETISR_MPSAFE);
267 	netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL,
268 	    NETISR_MPSAFE);
269 }
270 SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL)
271 
272 
273 /*
274  * Hook from hardclock. Tries to schedule a netisr, but keeps track
275  * of lost ticks due to the previous handler taking too long.
276  * Normally, this should not happen, because polling handler should
277  * run for a short time. However, in some cases (e.g. when there are
278  * changes in link status etc.) the drivers take a very long time
279  * (even in the order of milliseconds) to reset and reconfigure the
280  * device, causing apparent lost polls.
281  *
282  * The first part of the code is just for debugging purposes, and tries
283  * to count how often hardclock ticks are shorter than they should,
284  * meaning either stray interrupts or delayed events.
285  */
286 void
287 hardclock_device_poll(void)
288 {
289 	static struct timeval prev_t, t;
290 	int delta;
291 
292 	if (poll_handlers == 0)
293 		return;
294 
295 	microuptime(&t);
296 	delta = (t.tv_usec - prev_t.tv_usec) +
297 		(t.tv_sec - prev_t.tv_sec)*1000000;
298 	if (delta * hz < 500000)
299 		short_ticks++;
300 	else
301 		prev_t = t;
302 
303 	if (pending_polls > 100) {
304 		/*
305 		 * Too much, assume it has stalled (not always true
306 		 * see comment above).
307 		 */
308 		stalled++;
309 		pending_polls = 0;
310 		phase = 0;
311 	}
312 
313 	if (phase <= 2) {
314 		if (phase != 0)
315 			suspect++;
316 		phase = 1;
317 		schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
318 		phase = 2;
319 	}
320 	if (pending_polls++ > 0)
321 		lost_polls++;
322 }
323 
324 /*
325  * ether_poll is called from the idle loop.
326  */
327 void
328 ether_poll(int count)
329 {
330 	int i;
331 
332 	mtx_lock(&poll_mtx);
333 
334 	if (count > poll_each_burst)
335 		count = poll_each_burst;
336 
337 	for (i = 0 ; i < poll_handlers ; i++)
338 		pr[i].handler(pr[i].ifp, POLL_ONLY, count);
339 
340 	mtx_unlock(&poll_mtx);
341 }
342 
343 /*
344  * netisr_pollmore is called after other netisr's, possibly scheduling
345  * another NETISR_POLL call, or adapting the burst size for the next cycle.
346  *
347  * It is very bad to fetch large bursts of packets from a single card at once,
348  * because the burst could take a long time to be completely processed, or
349  * could saturate the intermediate queue (ipintrq or similar) leading to
350  * losses or unfairness. To reduce the problem, and also to account better for
351  * time spent in network-related processing, we split the burst in smaller
352  * chunks of fixed size, giving control to the other netisr's between chunks.
353  * This helps in improving the fairness, reducing livelock (because we
354  * emulate more closely the "process to completion" that we have with
355  * fastforwarding) and accounting for the work performed in low level
356  * handling and forwarding.
357  */
358 
359 static struct timeval poll_start_t;
360 
361 void
362 netisr_pollmore()
363 {
364 	struct timeval t;
365 	int kern_load;
366 
367 	mtx_lock(&poll_mtx);
368 	phase = 5;
369 	if (residual_burst > 0) {
370 		schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
371 		mtx_unlock(&poll_mtx);
372 		/* will run immediately on return, followed by netisrs */
373 		return;
374 	}
375 	/* here we can account time spent in netisr's in this tick */
376 	microuptime(&t);
377 	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
378 		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
379 	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
380 	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
381 		if (poll_burst > 1)
382 			poll_burst--;
383 	} else {
384 		if (poll_burst < poll_burst_max)
385 			poll_burst++;
386 	}
387 
388 	pending_polls--;
389 	if (pending_polls == 0) /* we are done */
390 		phase = 0;
391 	else {
392 		/*
393 		 * Last cycle was long and caused us to miss one or more
394 		 * hardclock ticks. Restart processing again, but slightly
395 		 * reduce the burst size to prevent that this happens again.
396 		 */
397 		poll_burst -= (poll_burst / 8);
398 		if (poll_burst < 1)
399 			poll_burst = 1;
400 		schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
401 		phase = 6;
402 	}
403 	mtx_unlock(&poll_mtx);
404 }
405 
406 /*
407  * netisr_poll is scheduled by schednetisr when appropriate, typically once
408  * per tick.
409  */
410 static void
411 netisr_poll(void)
412 {
413 	int i, cycles;
414 	enum poll_cmd arg = POLL_ONLY;
415 
416 	mtx_lock(&poll_mtx);
417 	phase = 3;
418 	if (residual_burst == 0) { /* first call in this tick */
419 		microuptime(&poll_start_t);
420 		if (++reg_frac_count == reg_frac) {
421 			arg = POLL_AND_CHECK_STATUS;
422 			reg_frac_count = 0;
423 		}
424 
425 		residual_burst = poll_burst;
426 	}
427 	cycles = (residual_burst < poll_each_burst) ?
428 		residual_burst : poll_each_burst;
429 	residual_burst -= cycles;
430 
431 	for (i = 0 ; i < poll_handlers ; i++)
432 		pr[i].handler(pr[i].ifp, arg, cycles);
433 
434 	phase = 4;
435 	mtx_unlock(&poll_mtx);
436 }
437 
438 /*
439  * Try to register routine for polling. Returns 0 if successful
440  * (and polling should be enabled), error code otherwise.
441  * A device is not supposed to register itself multiple times.
442  *
443  * This is called from within the *_ioctl() functions.
444  */
445 int
446 ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
447 {
448 	int i;
449 
450 	KASSERT(h != NULL, ("%s: handler is NULL", __func__));
451 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
452 
453 	mtx_lock(&poll_mtx);
454 	if (poll_handlers >= POLL_LIST_LEN) {
455 		/*
456 		 * List full, cannot register more entries.
457 		 * This should never happen; if it does, it is probably a
458 		 * broken driver trying to register multiple times. Checking
459 		 * this at runtime is expensive, and won't solve the problem
460 		 * anyways, so just report a few times and then give up.
461 		 */
462 		static int verbose = 10 ;
463 		if (verbose >0) {
464 			log(LOG_ERR, "poll handlers list full, "
465 			    "maybe a broken driver ?\n");
466 			verbose--;
467 		}
468 		mtx_unlock(&poll_mtx);
469 		return (ENOMEM); /* no polling for you */
470 	}
471 
472 	for (i = 0 ; i < poll_handlers ; i++)
473 		if (pr[i].ifp == ifp && pr[i].handler != NULL) {
474 			mtx_unlock(&poll_mtx);
475 			log(LOG_DEBUG, "ether_poll_register: %s: handler"
476 			    " already registered\n", ifp->if_xname);
477 			return (EEXIST);
478 		}
479 
480 	pr[poll_handlers].handler = h;
481 	pr[poll_handlers].ifp = ifp;
482 	poll_handlers++;
483 	mtx_unlock(&poll_mtx);
484 	if (idlepoll_sleeping)
485 		wakeup(&idlepoll_sleeping);
486 	return (0);
487 }
488 
489 /*
490  * Remove interface from the polling list. Called from *_ioctl(), too.
491  */
492 int
493 ether_poll_deregister(struct ifnet *ifp)
494 {
495 	int i;
496 
497 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
498 
499 	mtx_lock(&poll_mtx);
500 
501 	for (i = 0 ; i < poll_handlers ; i++)
502 		if (pr[i].ifp == ifp) /* found it */
503 			break;
504 	if (i == poll_handlers) {
505 		log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
506 		    ifp->if_xname);
507 		mtx_unlock(&poll_mtx);
508 		return (ENOENT);
509 	}
510 	poll_handlers--;
511 	if (i < poll_handlers) { /* Last entry replaces this one. */
512 		pr[i].handler = pr[poll_handlers].handler;
513 		pr[i].ifp = pr[poll_handlers].ifp;
514 	}
515 	mtx_unlock(&poll_mtx);
516 	return (0);
517 }
518 
519 /*
520  * Legacy interface for turning polling on all interfaces at one time.
521  */
522 static int
523 poll_switch(SYSCTL_HANDLER_ARGS)
524 {
525 	struct ifnet *ifp;
526 	int error;
527 	int val = polling;
528 
529 	error = sysctl_handle_int(oidp, &val, 0, req);
530 	if (error || !req->newptr )
531 		return (error);
532 
533 	if (val == polling)
534 		return (0);
535 
536 	if (val < 0 || val > 1)
537 		return (EINVAL);
538 
539 	polling = val;
540 
541 	IFNET_RLOCK();
542 	TAILQ_FOREACH(ifp, &ifnet, if_link) {
543 		if (ifp->if_capabilities & IFCAP_POLLING) {
544 			struct ifreq ifr;
545 
546 			if (val == 1)
547 				ifr.ifr_reqcap =
548 				    ifp->if_capenable | IFCAP_POLLING;
549 			else
550 				ifr.ifr_reqcap =
551 				    ifp->if_capenable & ~IFCAP_POLLING;
552 			IFF_LOCKGIANT(ifp);	/* LOR here */
553 			(void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
554 			IFF_UNLOCKGIANT(ifp);
555 		}
556 	}
557 	IFNET_RUNLOCK();
558 
559 	log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)");
560 
561 	return (0);
562 }
563 
564 static void
565 poll_idle(void)
566 {
567 	struct thread *td = curthread;
568 	struct rtprio rtp;
569 
570 	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
571 	rtp.type = RTP_PRIO_IDLE;
572 	PROC_SLOCK(td->td_proc);
573 	rtp_to_pri(&rtp, td);
574 	PROC_SUNLOCK(td->td_proc);
575 
576 	for (;;) {
577 		if (poll_in_idle_loop && poll_handlers > 0) {
578 			idlepoll_sleeping = 0;
579 			ether_poll(poll_each_burst);
580 			thread_lock(td);
581 			mi_switch(SW_VOL, NULL);
582 			thread_unlock(td);
583 		} else {
584 			idlepoll_sleeping = 1;
585 			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
586 		}
587 	}
588 }
589 
590 static struct proc *idlepoll;
591 static struct kproc_desc idlepoll_kp = {
592 	 "idlepoll",
593 	 poll_idle,
594 	 &idlepoll
595 };
596 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, &idlepoll_kp)
597