xref: /freebsd/sys/kern/kern_poll.c (revision ee2ea5ceafed78a5bd9810beb9e3ca927180c226)
1 /*-
2  * Copyright (c) 2001-2002 Luigi Rizzo
3  *
4  * Supported by: the Xorp Project (www.xorp.org)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/socket.h>			/* needed by net/if.h		*/
34 #include <sys/sysctl.h>
35 
36 #include <net/if.h>			/* for IFF_* flags		*/
37 #include <net/netisr.h>			/* for NETISR_POLL		*/
38 
39 #include <sys/proc.h>
40 #include <sys/resourcevar.h>
41 #include <sys/kthread.h>
42 
43 #ifdef SMP
44 #ifndef COMPILING_LINT
45 #error DEVICE_POLLING is not compatible with SMP
46 #endif
47 #endif
48 
49 static void netisr_poll(void);		/* the two netisr handlers      */
50 void netisr_pollmore(void);
51 
52 void init_device_poll(void);		/* init routine			*/
53 void hardclock_device_poll(void);	/* hook from hardclock		*/
54 void ether_poll(int);			/* polling while in trap	*/
55 
56 /*
57  * Polling support for [network] device drivers.
58  *
59  * Drivers which support this feature try to register with the
60  * polling code.
61  *
62  * If registration is successful, the driver must disable interrupts,
63  * and further I/O is performed through the handler, which is invoked
64  * (at least once per clock tick) with 3 arguments: the "arg" passed at
65  * register time (a struct ifnet pointer), a command, and a "count" limit.
66  *
67  * The command can be one of the following:
68  *  POLL_ONLY: quick move of "count" packets from input/output queues.
69  *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
70  *	other more expensive operations. This command is issued periodically
71  *	but less frequently than POLL_ONLY.
72  *  POLL_DEREGISTER: deregister and return to interrupt mode.
73  *
74  * The first two commands are only issued if the interface is marked as
75  * 'IFF_UP and IFF_RUNNING', the last one only if IFF_RUNNING is set.
76  *
77  * The count limit specifies how much work the handler can do during the
78  * call -- typically this is the number of packets to be received, or
79  * transmitted, etc. (drivers are free to interpret this number, as long
80  * as the max time spent in the function grows roughly linearly with the
81  * count).
82  *
83  * Deregistration can be requested by the driver itself (typically in the
84  * *_stop() routine), or by the polling code, by invoking the handler.
85  *
86  * Polling can be globally enabled or disabled with the sysctl variable
87  * kern.polling.enable (default is 0, disabled)
88  *
89  * A second variable controls the sharing of CPU between polling/kernel
90  * network processing, and other activities (typically userlevel tasks):
91  * kern.polling.user_frac (between 0 and 100, default 50) sets the share
92  * of CPU allocated to user tasks. CPU is allocated proportionally to the
93  * shares, by dynamically adjusting the "count" (poll_burst).
94  *
95  * Other parameters can should be left to their default values.
96  * The following constraints hold
97  *
98  *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
99  *	0 <= poll_in_trap <= poll_each_burst
100  *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
101  */
102 
103 #define MIN_POLL_BURST_MAX	10
104 #define MAX_POLL_BURST_MAX	1000
105 
106 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
107 	"Device polling parameters");
108 
109 static u_int32_t poll_burst = 5;
110 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RW,
111 	&poll_burst, 0, "Current polling burst size");
112 
113 static u_int32_t poll_each_burst = 5;
114 SYSCTL_UINT(_kern_polling, OID_AUTO, each_burst, CTLFLAG_RW,
115 	&poll_each_burst, 0, "Max size of each burst");
116 
117 static u_int32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
118 SYSCTL_UINT(_kern_polling, OID_AUTO, burst_max, CTLFLAG_RW,
119 	&poll_burst_max, 0, "Max Polling burst size");
120 
121 static u_int32_t poll_in_idle_loop=1;	/* do we poll in idle loop ? */
122 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
123 	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
124 
125 u_int32_t poll_in_trap;			/* used in trap.c */
126 SYSCTL_UINT(_kern_polling, OID_AUTO, poll_in_trap, CTLFLAG_RW,
127 	&poll_in_trap, 0, "Poll burst size during a trap");
128 
129 static u_int32_t user_frac = 50;
130 SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW,
131 	&user_frac, 0, "Desired user fraction of cpu time");
132 
133 static u_int32_t reg_frac = 20 ;
134 SYSCTL_UINT(_kern_polling, OID_AUTO, reg_frac, CTLFLAG_RW,
135 	&reg_frac, 0, "Every this many cycles poll register");
136 
137 static u_int32_t short_ticks;
138 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RW,
139 	&short_ticks, 0, "Hardclock ticks shorter than they should be");
140 
141 static u_int32_t lost_polls;
142 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RW,
143 	&lost_polls, 0, "How many times we would have lost a poll tick");
144 
145 static u_int32_t pending_polls;
146 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RW,
147 	&pending_polls, 0, "Do we need to poll again");
148 
149 static int residual_burst = 0;
150 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RW,
151 	&residual_burst, 0, "# of residual cycles in burst");
152 
153 static u_int32_t poll_handlers; /* next free entry in pr[]. */
154 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
155 	&poll_handlers, 0, "Number of registered poll handlers");
156 
157 static int polling = 0;		/* global polling enable */
158 SYSCTL_UINT(_kern_polling, OID_AUTO, enable, CTLFLAG_RW,
159 	&polling, 0, "Polling enabled");
160 
161 static u_int32_t phase;
162 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW,
163 	&phase, 0, "Polling phase");
164 
165 static u_int32_t suspect;
166 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RW,
167 	&suspect, 0, "suspect event");
168 
169 static u_int32_t stalled;
170 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RW,
171 	&stalled, 0, "potential stalls");
172 
173 static u_int32_t idlepoll_sleeping; /* idlepoll is sleeping */
174 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
175 	&idlepoll_sleeping, 0, "idlepoll is sleeping");
176 
177 
178 #define POLL_LIST_LEN  128
179 struct pollrec {
180 	poll_handler_t	*handler;
181 	struct ifnet	*ifp;
182 };
183 
184 static struct pollrec pr[POLL_LIST_LEN];
185 
186 /*
187  * register relevant netisr. Called from kern_clock.c:
188  */
189 void
190 init_device_poll(void)
191 {
192 	register_netisr(NETISR_POLL, netisr_poll);
193 }
194 
195 /*
196  * Hook from hardclock. Tries to schedule a netisr, but keeps track
197  * of lost ticks due to the previous handler taking too long.
198  * The first part of the code is just for debugging purposes, and tries
199  * to count how often hardclock ticks are shorter than they should,
200  * meaning either stray interrupts or delayed events.
201  */
202 void
203 hardclock_device_poll(void)
204 {
205 	static struct timeval prev_t, t;
206 	int delta;
207 
208 	if (poll_handlers == 0)
209 		return;
210 
211 	microuptime(&t);
212 	delta = (t.tv_usec - prev_t.tv_usec) +
213 		(t.tv_sec - prev_t.tv_sec)*1000000;
214 	if (delta * hz < 500000)
215 		short_ticks++;
216 	else
217 		prev_t = t;
218 
219 	if (pending_polls > 100) {
220 		/* too much, assume it has stalled */
221 		stalled++;
222 		printf("poll stalled [%d] in phase %d\n",
223 			stalled, phase);
224 		pending_polls = 0;
225 		phase = 0;
226 	}
227 
228 	if (phase <= 2) {
229 		if (phase != 0)
230 			suspect++;
231 		phase = 1;
232 		schednetisr(NETISR_POLL);
233 		phase = 2;
234 	}
235 	if (pending_polls++ > 0)
236 		lost_polls++;
237 }
238 
239 /*
240  * ether_poll is called from the idle loop or from the trap handler.
241  */
242 void
243 ether_poll(int count)
244 {
245 	int i;
246 
247 	mtx_lock(&Giant);
248 
249 	if (count > poll_each_burst)
250 		count = poll_each_burst;
251 	for (i = 0 ; i < poll_handlers ; i++)
252 		if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
253 		    (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
254 			pr[i].handler(pr[i].ifp, 0, count); /* quick check */
255 	mtx_unlock(&Giant);
256 }
257 
258 /*
259  * netisr_pollmore is called after other netisr's, possibly scheduling
260  * another NETISR_POLL call, or adapting the burst size for the next cycle.
261  *
262  * It is very bad to fetch large bursts of packets from a single card at once,
263  * because the burst could take a long time to be completely processed, or
264  * could saturate the intermediate queue (ipintrq or similar) leading to
265  * losses or unfairness. To reduce the problem, and also to account better for
266  * time spent in network-related processing, we split the burst in smaller
267  * chunks of fixed size, giving control to the other netisr's between chunks.
268  * This helps in improving the fairness, reducing livelock (because we
269  * emulate more closely the "process to completion" that we have with
270  * fastforwarding) and accounting for the work performed in low level
271  * handling and forwarding.
272  */
273 
274 static struct timeval poll_start_t;
275 
276 void
277 netisr_pollmore()
278 {
279 	struct timeval t;
280 	int kern_load;
281 	/* XXX run at splhigh() or equivalent */
282 
283 	phase = 5;
284 	if (residual_burst > 0) {
285 		schednetisr(NETISR_POLL);
286 		/* will run immediately on return, followed by netisrs */
287 		return ;
288 	}
289 	/* here we can account time spent in netisr's in this tick */
290 	microuptime(&t);
291 	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
292 		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
293 	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
294 	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
295 		if (poll_burst > 1)
296 			poll_burst--;
297 	} else {
298 		if (poll_burst < poll_burst_max)
299 			poll_burst++;
300 	}
301 
302 	pending_polls--;
303 	if (pending_polls == 0) /* we are done */
304 		phase = 0;
305 	else {
306 		/*
307 		 * Last cycle was long and caused us to miss one or more
308 		 * hardclock ticks. Restart processing again, but slightly
309 		 * reduce the burst size to prevent that this happens again.
310 		 */
311 		poll_burst -= (poll_burst / 8);
312 		if (poll_burst < 1)
313 			poll_burst = 1;
314 		schednetisr(NETISR_POLL);
315 		phase = 6;
316 	}
317 }
318 
319 /*
320  * netisr_poll is scheduled by schednetisr when appropriate, typically once
321  * per tick. It is called at splnet() so first thing to do is to upgrade to
322  * splimp(), and call all registered handlers.
323  */
324 static void
325 netisr_poll(void)
326 {
327 	static int reg_frac_count;
328 	int i, cycles;
329 	enum poll_cmd arg = POLL_ONLY;
330 	mtx_lock(&Giant);
331 
332 	phase = 3;
333 	if (residual_burst == 0) { /* first call in this tick */
334 		microuptime(&poll_start_t);
335 		/*
336 		 * Check that paremeters are consistent with runtime
337 		 * variables. Some of these tests could be done at sysctl
338 		 * time, but the savings would be very limited because we
339 		 * still have to check against reg_frac_count and
340 		 * poll_each_burst. So, instead of writing separate sysctl
341 		 * handlers, we do all here.
342 		 */
343 
344 		if (reg_frac > hz)
345 			reg_frac = hz;
346 		else if (reg_frac < 1)
347 			reg_frac = 1;
348 		if (reg_frac_count > reg_frac)
349 			reg_frac_count = reg_frac - 1;
350 		if (reg_frac_count-- == 0) {
351 			arg = POLL_AND_CHECK_STATUS;
352 			reg_frac_count = reg_frac - 1;
353 		}
354 		if (poll_burst_max < MIN_POLL_BURST_MAX)
355 			poll_burst_max = MIN_POLL_BURST_MAX;
356 		else if (poll_burst_max > MAX_POLL_BURST_MAX)
357 			poll_burst_max = MAX_POLL_BURST_MAX;
358 
359 		if (poll_each_burst < 1)
360 			poll_each_burst = 1;
361 		else if (poll_each_burst > poll_burst_max)
362 			poll_each_burst = poll_burst_max;
363 
364 		residual_burst = poll_burst;
365 	}
366 	cycles = (residual_burst < poll_each_burst) ?
367 		residual_burst : poll_each_burst;
368 	residual_burst -= cycles;
369 
370 	if (polling) {
371 		for (i = 0 ; i < poll_handlers ; i++)
372 			if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
373 			    (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
374 				pr[i].handler(pr[i].ifp, arg, cycles);
375 	} else {	/* unregister */
376 		for (i = 0 ; i < poll_handlers ; i++) {
377 			if (pr[i].handler &&
378 			    pr[i].ifp->if_flags & IFF_RUNNING) {
379 				pr[i].ifp->if_ipending &= ~IFF_POLLING;
380 				pr[i].handler(pr[i].ifp, POLL_DEREGISTER, 1);
381 			}
382 			pr[i].handler=NULL;
383 		}
384 		residual_burst = 0;
385 		poll_handlers = 0;
386 	}
387 	/* on -stable, schednetisr(NETISR_POLLMORE); */
388 	phase = 4;
389 	mtx_unlock(&Giant);
390 }
391 
392 /*
393  * Try to register routine for polling. Returns 1 if successful
394  * (and polling should be enabled), 0 otherwise.
395  * A device is not supposed to register itself multiple times.
396  *
397  * This is called from within the *_intr() functions, so we do not need
398  * further locking.
399  */
400 int
401 ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
402 {
403 	int s;
404 
405 	if (polling == 0) /* polling disabled, cannot register */
406 		return 0;
407 	if (h == NULL || ifp == NULL)		/* bad arguments	*/
408 		return 0;
409 	if ( !(ifp->if_flags & IFF_UP) )	/* must be up		*/
410 		return 0;
411 	if (ifp->if_ipending & IFF_POLLING)	/* already polling	*/
412 		return 0;
413 
414 	s = splhigh();
415 	if (poll_handlers >= POLL_LIST_LEN) {
416 		/*
417 		 * List full, cannot register more entries.
418 		 * This should never happen; if it does, it is probably a
419 		 * broken driver trying to register multiple times. Checking
420 		 * this at runtime is expensive, and won't solve the problem
421 		 * anyways, so just report a few times and then give up.
422 		 */
423 		static int verbose = 10 ;
424 		splx(s);
425 		if (verbose >0) {
426 			printf("poll handlers list full, "
427 				"maybe a broken driver ?\n");
428 			verbose--;
429 		}
430 		return 0; /* no polling for you */
431 	}
432 
433 	pr[poll_handlers].handler = h;
434 	pr[poll_handlers].ifp = ifp;
435 	poll_handlers++;
436 	ifp->if_ipending |= IFF_POLLING;
437 	splx(s);
438 	if (idlepoll_sleeping)
439 		wakeup(&idlepoll_sleeping);
440 	return 1; /* polling enabled in next call */
441 }
442 
443 /*
444  * Remove interface from the polling list. Normally called by *_stop().
445  * It is not an error to call it with IFF_POLLING clear, the call is
446  * sufficiently rare to be preferable to save the space for the extra
447  * test in each driver in exchange of one additional function call.
448  */
449 int
450 ether_poll_deregister(struct ifnet *ifp)
451 {
452 	int i;
453 
454 	mtx_lock(&Giant);
455 	if ( !ifp || !(ifp->if_ipending & IFF_POLLING) ) {
456 		mtx_unlock(&Giant);
457 		return 0;
458 	}
459 	for (i = 0 ; i < poll_handlers ; i++)
460 		if (pr[i].ifp == ifp) /* found it */
461 			break;
462 	ifp->if_ipending &= ~IFF_POLLING; /* found or not... */
463 	if (i == poll_handlers) {
464 		mtx_unlock(&Giant);
465 		printf("ether_poll_deregister: ifp not found!!!\n");
466 		return 0;
467 	}
468 	poll_handlers--;
469 	if (i < poll_handlers) { /* Last entry replaces this one. */
470 		pr[i].handler = pr[poll_handlers].handler;
471 		pr[i].ifp = pr[poll_handlers].ifp;
472 	}
473 	mtx_unlock(&Giant);
474 	return 1;
475 }
476 
477 static void
478 poll_idle(void)
479 {
480 	struct thread *td = curthread;
481 	struct rtprio rtp;
482 	int pri;
483 
484 	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
485 	rtp.type = RTP_PRIO_IDLE;
486 	mtx_lock_spin(&sched_lock);
487 	rtp_to_pri(&rtp, td->td_ksegrp);
488 	pri = td->td_priority;
489 	mtx_unlock_spin(&sched_lock);
490 
491 	for (;;) {
492 		if (poll_in_idle_loop && poll_handlers > 0) {
493 			idlepoll_sleeping = 0;
494 			mtx_lock(&Giant);
495 			ether_poll(poll_each_burst);
496 			mtx_unlock(&Giant);
497 			mtx_assert(&Giant, MA_NOTOWNED);
498 			mtx_lock_spin(&sched_lock);
499 			setrunqueue(td);
500 			td->td_proc->p_stats->p_ru.ru_nvcsw++;
501 			mi_switch();
502 			mtx_unlock_spin(&sched_lock);
503 		} else {
504 			idlepoll_sleeping = 1;
505 			tsleep(&idlepoll_sleeping, pri, "pollid", hz * 3);
506 		}
507 	}
508 }
509 
510 static struct proc *idlepoll;
511 static struct kproc_desc idlepoll_kp = {
512 	 "idlepoll",
513 	 poll_idle,
514 	 &idlepoll
515 };
516 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, &idlepoll_kp)
517