xref: /freebsd/sys/kern/kern_poll.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1  /*-
2   * SPDX-License-Identifier: BSD-2-Clause
3   *
4   * Copyright (c) 2001-2002 Luigi Rizzo
5   *
6   * Supported by: the Xorp Project (www.xorp.org)
7   *
8   * Redistribution and use in source and binary forms, with or without
9   * modification, are permitted provided that the following conditions
10   * are met:
11   * 1. Redistributions of source code must retain the above copyright
12   *    notice, this list of conditions and the following disclaimer.
13   * 2. Redistributions in binary form must reproduce the above copyright
14   *    notice, this list of conditions and the following disclaimer in the
15   *    documentation and/or other materials provided with the distribution.
16   *
17   * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18   * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20   * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21   * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22   * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23   * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25   * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27   * SUCH DAMAGE.
28   */
29  
30  #include <sys/cdefs.h>
31  #include "opt_device_polling.h"
32  
33  #include <sys/param.h>
34  #include <sys/systm.h>
35  #include <sys/kernel.h>
36  #include <sys/kthread.h>
37  #include <sys/proc.h>
38  #include <sys/epoch.h>
39  #include <sys/eventhandler.h>
40  #include <sys/resourcevar.h>
41  #include <sys/sched.h>
42  #include <sys/socket.h>			/* needed by net/if.h		*/
43  #include <sys/sockio.h>
44  #include <sys/sysctl.h>
45  #include <sys/syslog.h>
46  
47  #include <net/if.h>
48  #include <net/if_var.h>
49  #include <net/netisr.h>			/* for NETISR_POLL		*/
50  #include <net/vnet.h>
51  
52  void hardclock_device_poll(void);	/* hook from hardclock		*/
53  
54  static struct mtx	poll_mtx;
55  
56  /*
57   * Polling support for [network] device drivers.
58   *
59   * Drivers which support this feature can register with the
60   * polling code.
61   *
62   * If registration is successful, the driver must disable interrupts,
63   * and further I/O is performed through the handler, which is invoked
64   * (at least once per clock tick) with 3 arguments: the "arg" passed at
65   * register time (a struct ifnet pointer), a command, and a "count" limit.
66   *
67   * The command can be one of the following:
68   *  POLL_ONLY: quick move of "count" packets from input/output queues.
69   *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
70   *	other more expensive operations. This command is issued periodically
71   *	but less frequently than POLL_ONLY.
72   *
73   * The count limit specifies how much work the handler can do during the
74   * call -- typically this is the number of packets to be received, or
75   * transmitted, etc. (drivers are free to interpret this number, as long
76   * as the max time spent in the function grows roughly linearly with the
77   * count).
78   *
79   * Polling is enabled and disabled via setting IFCAP_POLLING flag on
80   * the interface. The driver ioctl handler should register interface
81   * with polling and disable interrupts, if registration was successful.
82   *
83   * A second variable controls the sharing of CPU between polling/kernel
84   * network processing, and other activities (typically userlevel tasks):
85   * kern.polling.user_frac (between 0 and 100, default 50) sets the share
86   * of CPU allocated to user tasks. CPU is allocated proportionally to the
87   * shares, by dynamically adjusting the "count" (poll_burst).
88   *
89   * Other parameters can should be left to their default values.
90   * The following constraints hold
91   *
92   *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
93   *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
94   */
95  
96  #define MIN_POLL_BURST_MAX	10
97  #define MAX_POLL_BURST_MAX	20000
98  
99  static uint32_t poll_burst = 5;
100  static uint32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
101  static uint32_t poll_each_burst = 5;
102  
103  static SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
104      "Device polling parameters");
105  
106  SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
107  	&poll_burst, 0, "Current polling burst size");
108  
109  static int	netisr_poll_scheduled;
110  static int	netisr_pollmore_scheduled;
111  static int	poll_shutting_down;
112  
poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)113  static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
114  {
115  	uint32_t val = poll_burst_max;
116  	int error;
117  
118  	error = sysctl_handle_int(oidp, &val, 0, req);
119  	if (error || !req->newptr )
120  		return (error);
121  	if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
122  		return (EINVAL);
123  
124  	mtx_lock(&poll_mtx);
125  	poll_burst_max = val;
126  	if (poll_burst > poll_burst_max)
127  		poll_burst = poll_burst_max;
128  	if (poll_each_burst > poll_burst_max)
129  		poll_each_burst = MIN_POLL_BURST_MAX;
130  	mtx_unlock(&poll_mtx);
131  
132  	return (0);
133  }
134  SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max,
135      CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
136      poll_burst_max_sysctl, "I",
137      "Max Polling burst size");
138  
poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)139  static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
140  {
141  	uint32_t val = poll_each_burst;
142  	int error;
143  
144  	error = sysctl_handle_int(oidp, &val, 0, req);
145  	if (error || !req->newptr )
146  		return (error);
147  	if (val < 1)
148  		return (EINVAL);
149  
150  	mtx_lock(&poll_mtx);
151  	if (val > poll_burst_max) {
152  		mtx_unlock(&poll_mtx);
153  		return (EINVAL);
154  	}
155  	poll_each_burst = val;
156  	mtx_unlock(&poll_mtx);
157  
158  	return (0);
159  }
160  SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst,
161      CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
162      poll_each_burst_sysctl, "I",
163      "Max size of each burst");
164  
165  static uint32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
166  SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
167  	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
168  
169  static uint32_t user_frac = 50;
user_frac_sysctl(SYSCTL_HANDLER_ARGS)170  static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
171  {
172  	uint32_t val = user_frac;
173  	int error;
174  
175  	error = sysctl_handle_int(oidp, &val, 0, req);
176  	if (error || !req->newptr )
177  		return (error);
178  	if (val > 99)
179  		return (EINVAL);
180  
181  	mtx_lock(&poll_mtx);
182  	user_frac = val;
183  	mtx_unlock(&poll_mtx);
184  
185  	return (0);
186  }
187  SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac,
188      CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
189      user_frac_sysctl, "I",
190      "Desired user fraction of cpu time");
191  
192  static uint32_t reg_frac_count = 0;
193  static uint32_t reg_frac = 20 ;
reg_frac_sysctl(SYSCTL_HANDLER_ARGS)194  static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
195  {
196  	uint32_t val = reg_frac;
197  	int error;
198  
199  	error = sysctl_handle_int(oidp, &val, 0, req);
200  	if (error || !req->newptr )
201  		return (error);
202  	if (val < 1 || val > hz)
203  		return (EINVAL);
204  
205  	mtx_lock(&poll_mtx);
206  	reg_frac = val;
207  	if (reg_frac_count >= reg_frac)
208  		reg_frac_count = 0;
209  	mtx_unlock(&poll_mtx);
210  
211  	return (0);
212  }
213  SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac,
214      CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(uint32_t),
215      reg_frac_sysctl, "I",
216      "Every this many cycles check registers");
217  
218  static uint32_t short_ticks;
219  SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
220  	&short_ticks, 0, "Hardclock ticks shorter than they should be");
221  
222  static uint32_t lost_polls;
223  SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
224  	&lost_polls, 0, "How many times we would have lost a poll tick");
225  
226  static uint32_t pending_polls;
227  SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
228  	&pending_polls, 0, "Do we need to poll again");
229  
230  static int residual_burst = 0;
231  SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
232  	&residual_burst, 0, "# of residual cycles in burst");
233  
234  static uint32_t poll_handlers; /* next free entry in pr[]. */
235  SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
236  	&poll_handlers, 0, "Number of registered poll handlers");
237  
238  static uint32_t phase;
239  SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
240  	&phase, 0, "Polling phase");
241  
242  static uint32_t suspect;
243  SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
244  	&suspect, 0, "suspect event");
245  
246  static uint32_t stalled;
247  SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
248  	&stalled, 0, "potential stalls");
249  
250  static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
251  SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
252  	&idlepoll_sleeping, 0, "idlepoll is sleeping");
253  
254  #define POLL_LIST_LEN  128
255  struct pollrec {
256  	poll_handler_t	*handler;
257  	struct ifnet	*ifp;
258  };
259  
260  static struct pollrec pr[POLL_LIST_LEN];
261  
262  static void
poll_shutdown(void * arg,int howto)263  poll_shutdown(void *arg, int howto)
264  {
265  
266  	poll_shutting_down = 1;
267  }
268  
269  static void
init_device_poll(void)270  init_device_poll(void)
271  {
272  
273  	mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
274  	EVENTHANDLER_REGISTER(shutdown_post_sync, poll_shutdown, NULL,
275  	    SHUTDOWN_PRI_LAST);
276  }
277  SYSINIT(device_poll, SI_SUB_SOFTINTR, SI_ORDER_MIDDLE, init_device_poll, NULL);
278  
279  /*
280   * Hook from hardclock. Tries to schedule a netisr, but keeps track
281   * of lost ticks due to the previous handler taking too long.
282   * Normally, this should not happen, because polling handler should
283   * run for a short time. However, in some cases (e.g. when there are
284   * changes in link status etc.) the drivers take a very long time
285   * (even in the order of milliseconds) to reset and reconfigure the
286   * device, causing apparent lost polls.
287   *
288   * The first part of the code is just for debugging purposes, and tries
289   * to count how often hardclock ticks are shorter than they should,
290   * meaning either stray interrupts or delayed events.
291   */
292  void
hardclock_device_poll(void)293  hardclock_device_poll(void)
294  {
295  	static struct timeval prev_t, t;
296  	int delta;
297  
298  	if (poll_handlers == 0 || poll_shutting_down)
299  		return;
300  
301  	microuptime(&t);
302  	delta = (t.tv_usec - prev_t.tv_usec) +
303  		(t.tv_sec - prev_t.tv_sec)*1000000;
304  	if (delta * hz < 500000)
305  		short_ticks++;
306  	else
307  		prev_t = t;
308  
309  	if (pending_polls > 100) {
310  		/*
311  		 * Too much, assume it has stalled (not always true
312  		 * see comment above).
313  		 */
314  		stalled++;
315  		pending_polls = 0;
316  		phase = 0;
317  	}
318  
319  	if (phase <= 2) {
320  		if (phase != 0)
321  			suspect++;
322  		phase = 1;
323  		netisr_poll_scheduled = 1;
324  		netisr_pollmore_scheduled = 1;
325  		netisr_sched_poll();
326  		phase = 2;
327  	}
328  	if (pending_polls++ > 0)
329  		lost_polls++;
330  }
331  
332  /*
333   * ether_poll is called from the idle loop.
334   */
335  static void
ether_poll(int count)336  ether_poll(int count)
337  {
338  	struct epoch_tracker et;
339  	int i;
340  
341  	mtx_lock(&poll_mtx);
342  
343  	if (count > poll_each_burst)
344  		count = poll_each_burst;
345  
346  	NET_EPOCH_ENTER(et);
347  	for (i = 0 ; i < poll_handlers ; i++)
348  		pr[i].handler(pr[i].ifp, POLL_ONLY, count);
349  	NET_EPOCH_EXIT(et);
350  
351  	mtx_unlock(&poll_mtx);
352  }
353  
354  /*
355   * netisr_pollmore is called after other netisr's, possibly scheduling
356   * another NETISR_POLL call, or adapting the burst size for the next cycle.
357   *
358   * It is very bad to fetch large bursts of packets from a single card at once,
359   * because the burst could take a long time to be completely processed, or
360   * could saturate the intermediate queue (ipintrq or similar) leading to
361   * losses or unfairness. To reduce the problem, and also to account better for
362   * time spent in network-related processing, we split the burst in smaller
363   * chunks of fixed size, giving control to the other netisr's between chunks.
364   * This helps in improving the fairness, reducing livelock (because we
365   * emulate more closely the "process to completion" that we have with
366   * fastforwarding) and accounting for the work performed in low level
367   * handling and forwarding.
368   */
369  
370  static struct timeval poll_start_t;
371  
372  void
netisr_pollmore(void)373  netisr_pollmore(void)
374  {
375  	struct timeval t;
376  	int kern_load;
377  
378  	if (poll_handlers == 0)
379  		return;
380  
381  	mtx_lock(&poll_mtx);
382  	if (!netisr_pollmore_scheduled) {
383  		mtx_unlock(&poll_mtx);
384  		return;
385  	}
386  	netisr_pollmore_scheduled = 0;
387  	phase = 5;
388  	if (residual_burst > 0) {
389  		netisr_poll_scheduled = 1;
390  		netisr_pollmore_scheduled = 1;
391  		netisr_sched_poll();
392  		mtx_unlock(&poll_mtx);
393  		/* will run immediately on return, followed by netisrs */
394  		return;
395  	}
396  	/* here we can account time spent in netisr's in this tick */
397  	microuptime(&t);
398  	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
399  		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
400  	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
401  	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
402  		if (poll_burst > 1)
403  			poll_burst--;
404  	} else {
405  		if (poll_burst < poll_burst_max)
406  			poll_burst++;
407  	}
408  
409  	pending_polls--;
410  	if (pending_polls == 0) /* we are done */
411  		phase = 0;
412  	else {
413  		/*
414  		 * Last cycle was long and caused us to miss one or more
415  		 * hardclock ticks. Restart processing again, but slightly
416  		 * reduce the burst size to prevent that this happens again.
417  		 */
418  		poll_burst -= (poll_burst / 8);
419  		if (poll_burst < 1)
420  			poll_burst = 1;
421  		netisr_poll_scheduled = 1;
422  		netisr_pollmore_scheduled = 1;
423  		netisr_sched_poll();
424  		phase = 6;
425  	}
426  	mtx_unlock(&poll_mtx);
427  }
428  
429  /*
430   * netisr_poll is typically scheduled once per tick.
431   */
432  void
netisr_poll(void)433  netisr_poll(void)
434  {
435  	int i, cycles;
436  	enum poll_cmd arg = POLL_ONLY;
437  
438  	NET_EPOCH_ASSERT();
439  
440  	if (poll_handlers == 0)
441  		return;
442  
443  	mtx_lock(&poll_mtx);
444  	if (!netisr_poll_scheduled) {
445  		mtx_unlock(&poll_mtx);
446  		return;
447  	}
448  	netisr_poll_scheduled = 0;
449  	phase = 3;
450  	if (residual_burst == 0) { /* first call in this tick */
451  		microuptime(&poll_start_t);
452  		if (++reg_frac_count == reg_frac) {
453  			arg = POLL_AND_CHECK_STATUS;
454  			reg_frac_count = 0;
455  		}
456  
457  		residual_burst = poll_burst;
458  	}
459  	cycles = (residual_burst < poll_each_burst) ?
460  		residual_burst : poll_each_burst;
461  	residual_burst -= cycles;
462  
463  	for (i = 0 ; i < poll_handlers ; i++)
464  		pr[i].handler(pr[i].ifp, arg, cycles);
465  
466  	phase = 4;
467  	mtx_unlock(&poll_mtx);
468  }
469  
470  /*
471   * Try to register routine for polling. Returns 0 if successful
472   * (and polling should be enabled), error code otherwise.
473   * A device is not supposed to register itself multiple times.
474   *
475   * This is called from within the *_ioctl() functions.
476   */
477  int
ether_poll_register(poll_handler_t * h,if_t ifp)478  ether_poll_register(poll_handler_t *h, if_t ifp)
479  {
480  	int i;
481  
482  	KASSERT(h != NULL, ("%s: handler is NULL", __func__));
483  	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
484  
485  	mtx_lock(&poll_mtx);
486  	if (poll_handlers >= POLL_LIST_LEN) {
487  		/*
488  		 * List full, cannot register more entries.
489  		 * This should never happen; if it does, it is probably a
490  		 * broken driver trying to register multiple times. Checking
491  		 * this at runtime is expensive, and won't solve the problem
492  		 * anyways, so just report a few times and then give up.
493  		 */
494  		static int verbose = 10 ;
495  		if (verbose >0) {
496  			log(LOG_ERR, "poll handlers list full, "
497  			    "maybe a broken driver ?\n");
498  			verbose--;
499  		}
500  		mtx_unlock(&poll_mtx);
501  		return (ENOMEM); /* no polling for you */
502  	}
503  
504  	for (i = 0 ; i < poll_handlers ; i++)
505  		if (pr[i].ifp == ifp && pr[i].handler != NULL) {
506  			mtx_unlock(&poll_mtx);
507  			log(LOG_DEBUG, "ether_poll_register: %s: handler"
508  			    " already registered\n", if_name(ifp));
509  			return (EEXIST);
510  		}
511  
512  	pr[poll_handlers].handler = h;
513  	pr[poll_handlers].ifp = ifp;
514  	poll_handlers++;
515  	mtx_unlock(&poll_mtx);
516  	if (idlepoll_sleeping)
517  		wakeup(&idlepoll_sleeping);
518  	return (0);
519  }
520  
521  /*
522   * Remove interface from the polling list. Called from *_ioctl(), too.
523   */
524  int
ether_poll_deregister(if_t ifp)525  ether_poll_deregister(if_t ifp)
526  {
527  	int i;
528  
529  	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
530  
531  	mtx_lock(&poll_mtx);
532  
533  	for (i = 0 ; i < poll_handlers ; i++)
534  		if (pr[i].ifp == ifp) /* found it */
535  			break;
536  	if (i == poll_handlers) {
537  		log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
538  		    if_name(ifp));
539  		mtx_unlock(&poll_mtx);
540  		return (ENOENT);
541  	}
542  	poll_handlers--;
543  	if (i < poll_handlers) { /* Last entry replaces this one. */
544  		pr[i].handler = pr[poll_handlers].handler;
545  		pr[i].ifp = pr[poll_handlers].ifp;
546  	}
547  	mtx_unlock(&poll_mtx);
548  	return (0);
549  }
550  
551  static void
poll_idle(void)552  poll_idle(void)
553  {
554  	struct thread *td = curthread;
555  	struct rtprio rtp;
556  
557  	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
558  	rtp.type = RTP_PRIO_IDLE;
559  	PROC_SLOCK(td->td_proc);
560  	rtp_to_pri(&rtp, td);
561  	PROC_SUNLOCK(td->td_proc);
562  
563  	for (;;) {
564  		if (poll_in_idle_loop && poll_handlers > 0) {
565  			idlepoll_sleeping = 0;
566  			ether_poll(poll_each_burst);
567  			sched_relinquish(td);
568  		} else {
569  			idlepoll_sleeping = 1;
570  			tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
571  		}
572  	}
573  }
574  
575  static struct proc *idlepoll;
576  static struct kproc_desc idlepoll_kp = {
577  	 "idlepoll",
578  	 poll_idle,
579  	 &idlepoll
580  };
581  SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,
582      &idlepoll_kp);
583