xref: /freebsd/sys/kern/kern_time.c (revision 6780ab54325a71e7e70112b11657973edde8655e)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_time.c	8.1 (Berkeley) 6/10/93
34  * $FreeBSD$
35  */
36 
37 #include "opt_mac.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sysproto.h>
44 #include <sys/resourcevar.h>
45 #include <sys/signalvar.h>
46 #include <sys/kernel.h>
47 #include <sys/mac.h>
48 #include <sys/systm.h>
49 #include <sys/sysent.h>
50 #include <sys/proc.h>
51 #include <sys/time.h>
52 #include <sys/timetc.h>
53 #include <sys/vnode.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_extern.h>
57 
58 int tz_minuteswest;
59 int tz_dsttime;
60 
61 /*
62  * Time of day and interval timer support.
63  *
64  * These routines provide the kernel entry points to get and set
65  * the time-of-day and per-process interval timers.  Subroutines
66  * here provide support for adding and subtracting timeval structures
67  * and decrementing interval timers, optionally reloading the interval
68  * timers when they expire.
69  */
70 
71 static int	nanosleep1(struct thread *td, struct timespec *rqt,
72 		    struct timespec *rmt);
73 static int	settime(struct thread *, struct timeval *);
74 static void	timevalfix(struct timeval *);
75 static void	no_lease_updatetime(int);
76 
77 static void
78 no_lease_updatetime(deltat)
79 	int deltat;
80 {
81 }
82 
83 void (*lease_updatetime)(int)  = no_lease_updatetime;
84 
85 static int
86 settime(struct thread *td, struct timeval *tv)
87 {
88 	struct timeval delta, tv1, tv2;
89 	static struct timeval maxtime, laststep;
90 	struct timespec ts;
91 	int s;
92 
93 	s = splclock();
94 	microtime(&tv1);
95 	delta = *tv;
96 	timevalsub(&delta, &tv1);
97 
98 	/*
99 	 * If the system is secure, we do not allow the time to be
100 	 * set to a value earlier than 1 second less than the highest
101 	 * time we have yet seen. The worst a miscreant can do in
102 	 * this circumstance is "freeze" time. He couldn't go
103 	 * back to the past.
104 	 *
105 	 * We similarly do not allow the clock to be stepped more
106 	 * than one second, nor more than once per second. This allows
107 	 * a miscreant to make the clock march double-time, but no worse.
108 	 */
109 	if (securelevel_gt(td->td_ucred, 1) != 0) {
110 		if (delta.tv_sec < 0 || delta.tv_usec < 0) {
111 			/*
112 			 * Update maxtime to latest time we've seen.
113 			 */
114 			if (tv1.tv_sec > maxtime.tv_sec)
115 				maxtime = tv1;
116 			tv2 = *tv;
117 			timevalsub(&tv2, &maxtime);
118 			if (tv2.tv_sec < -1) {
119 				tv->tv_sec = maxtime.tv_sec - 1;
120 				printf("Time adjustment clamped to -1 second\n");
121 			}
122 		} else {
123 			if (tv1.tv_sec == laststep.tv_sec) {
124 				splx(s);
125 				return (EPERM);
126 			}
127 			if (delta.tv_sec > 1) {
128 				tv->tv_sec = tv1.tv_sec + 1;
129 				printf("Time adjustment clamped to +1 second\n");
130 			}
131 			laststep = *tv;
132 		}
133 	}
134 
135 	ts.tv_sec = tv->tv_sec;
136 	ts.tv_nsec = tv->tv_usec * 1000;
137 	mtx_lock(&Giant);
138 	tc_setclock(&ts);
139 	(void) splsoftclock();
140 	lease_updatetime(delta.tv_sec);
141 	splx(s);
142 	resettodr();
143 	mtx_unlock(&Giant);
144 	return (0);
145 }
146 
147 #ifndef _SYS_SYSPROTO_H_
148 struct clock_gettime_args {
149 	clockid_t clock_id;
150 	struct	timespec *tp;
151 };
152 #endif
153 
154 /*
155  * MPSAFE
156  */
157 /* ARGSUSED */
158 int
159 clock_gettime(struct thread *td, struct clock_gettime_args *uap)
160 {
161 	struct timespec ats;
162 
163 	if (uap->clock_id != CLOCK_REALTIME)
164 		return (EINVAL);
165 	nanotime(&ats);
166 	return (copyout(&ats, uap->tp, sizeof(ats)));
167 }
168 
169 #ifndef _SYS_SYSPROTO_H_
170 struct clock_settime_args {
171 	clockid_t clock_id;
172 	const struct	timespec *tp;
173 };
174 #endif
175 
176 /*
177  * MPSAFE
178  */
179 /* ARGSUSED */
180 int
181 clock_settime(struct thread *td, struct clock_settime_args *uap)
182 {
183 	struct timeval atv;
184 	struct timespec ats;
185 	int error;
186 
187 #ifdef MAC
188 	error = mac_check_system_settime(td->td_ucred);
189 	if (error)
190 		return (error);
191 #endif
192 	if ((error = suser(td)) != 0)
193 		return (error);
194 	if (uap->clock_id != CLOCK_REALTIME)
195 		return (EINVAL);
196 	if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0)
197 		return (error);
198 	if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000)
199 		return (EINVAL);
200 	/* XXX Don't convert nsec->usec and back */
201 	TIMESPEC_TO_TIMEVAL(&atv, &ats);
202 	error = settime(td, &atv);
203 	return (error);
204 }
205 
206 #ifndef _SYS_SYSPROTO_H_
207 struct clock_getres_args {
208 	clockid_t clock_id;
209 	struct	timespec *tp;
210 };
211 #endif
212 
213 int
214 clock_getres(struct thread *td, struct clock_getres_args *uap)
215 {
216 	struct timespec ts;
217 	int error;
218 
219 	if (uap->clock_id != CLOCK_REALTIME)
220 		return (EINVAL);
221 	error = 0;
222 	if (uap->tp) {
223 		ts.tv_sec = 0;
224 		/*
225 		 * Round up the result of the division cheaply by adding 1.
226 		 * Rounding up is especially important if rounding down
227 		 * would give 0.  Perfect rounding is unimportant.
228 		 */
229 		ts.tv_nsec = 1000000000 / tc_getfrequency() + 1;
230 		error = copyout(&ts, uap->tp, sizeof(ts));
231 	}
232 	return (error);
233 }
234 
235 static int nanowait;
236 
237 static int
238 nanosleep1(struct thread *td, struct timespec *rqt, struct timespec *rmt)
239 {
240 	struct timespec ts, ts2, ts3;
241 	struct timeval tv;
242 	int error;
243 
244 	if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
245 		return (EINVAL);
246 	if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
247 		return (0);
248 	getnanouptime(&ts);
249 	timespecadd(&ts, rqt);
250 	TIMESPEC_TO_TIMEVAL(&tv, rqt);
251 	for (;;) {
252 		error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp",
253 		    tvtohz(&tv));
254 		getnanouptime(&ts2);
255 		if (error != EWOULDBLOCK) {
256 			if (error == ERESTART)
257 				error = EINTR;
258 			if (rmt != NULL) {
259 				timespecsub(&ts, &ts2);
260 				if (ts.tv_sec < 0)
261 					timespecclear(&ts);
262 				*rmt = ts;
263 			}
264 			return (error);
265 		}
266 		if (timespeccmp(&ts2, &ts, >=))
267 			return (0);
268 		ts3 = ts;
269 		timespecsub(&ts3, &ts2);
270 		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
271 	}
272 }
273 
274 #ifndef _SYS_SYSPROTO_H_
275 struct nanosleep_args {
276 	struct	timespec *rqtp;
277 	struct	timespec *rmtp;
278 };
279 #endif
280 
281 /*
282  * MPSAFE
283  */
284 /* ARGSUSED */
285 int
286 nanosleep(struct thread *td, struct nanosleep_args *uap)
287 {
288 	struct timespec rmt, rqt;
289 	int error;
290 
291 	error = copyin(uap->rqtp, &rqt, sizeof(rqt));
292 	if (error)
293 		return (error);
294 
295 	if (uap->rmtp &&
296 	    !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE))
297 			return (EFAULT);
298 	error = nanosleep1(td, &rqt, &rmt);
299 	if (error && uap->rmtp) {
300 		int error2;
301 
302 		error2 = copyout(&rmt, uap->rmtp, sizeof(rmt));
303 		if (error2)
304 			error = error2;
305 	}
306 	return (error);
307 }
308 
309 #ifndef _SYS_SYSPROTO_H_
310 struct gettimeofday_args {
311 	struct	timeval *tp;
312 	struct	timezone *tzp;
313 };
314 #endif
315 /*
316  * MPSAFE
317  */
318 /* ARGSUSED */
319 int
320 gettimeofday(struct thread *td, struct gettimeofday_args *uap)
321 {
322 	struct timeval atv;
323 	struct timezone rtz;
324 	int error = 0;
325 
326 	if (uap->tp) {
327 		microtime(&atv);
328 		error = copyout(&atv, uap->tp, sizeof (atv));
329 	}
330 	if (error == 0 && uap->tzp != NULL) {
331 		rtz.tz_minuteswest = tz_minuteswest;
332 		rtz.tz_dsttime = tz_dsttime;
333 		error = copyout(&rtz, uap->tzp, sizeof (rtz));
334 	}
335 	return (error);
336 }
337 
338 #ifndef _SYS_SYSPROTO_H_
339 struct settimeofday_args {
340 	struct	timeval *tv;
341 	struct	timezone *tzp;
342 };
343 #endif
344 /*
345  * MPSAFE
346  */
347 /* ARGSUSED */
348 int
349 settimeofday(struct thread *td, struct settimeofday_args *uap)
350 {
351 	struct timeval atv;
352 	struct timezone atz;
353 	int error = 0;
354 
355 #ifdef MAC
356 	error = mac_check_system_settime(td->td_ucred);
357 	if (error)
358 		return (error);
359 #endif
360 	if ((error = suser(td)))
361 		return (error);
362 	/* Verify all parameters before changing time. */
363 	if (uap->tv) {
364 		if ((error = copyin(uap->tv, &atv, sizeof(atv))))
365 			return (error);
366 		if (atv.tv_usec < 0 || atv.tv_usec >= 1000000)
367 			return (EINVAL);
368 	}
369 	if (uap->tzp &&
370 	    (error = copyin(uap->tzp, &atz, sizeof(atz))))
371 		return (error);
372 
373 	if (uap->tv && (error = settime(td, &atv)))
374 		return (error);
375 	if (uap->tzp) {
376 		tz_minuteswest = atz.tz_minuteswest;
377 		tz_dsttime = atz.tz_dsttime;
378 	}
379 	return (error);
380 }
381 /*
382  * Get value of an interval timer.  The process virtual and
383  * profiling virtual time timers are kept in the p_stats area, since
384  * they can be swapped out.  These are kept internally in the
385  * way they are specified externally: in time until they expire.
386  *
387  * The real time interval timer is kept in the process table slot
388  * for the process, and its value (it_value) is kept as an
389  * absolute time rather than as a delta, so that it is easy to keep
390  * periodic real-time signals from drifting.
391  *
392  * Virtual time timers are processed in the hardclock() routine of
393  * kern_clock.c.  The real time timer is processed by a timeout
394  * routine, called from the softclock() routine.  Since a callout
395  * may be delayed in real time due to interrupt processing in the system,
396  * it is possible for the real time timeout routine (realitexpire, given below),
397  * to be delayed in real time past when it is supposed to occur.  It
398  * does not suffice, therefore, to reload the real timer .it_value from the
399  * real time timers .it_interval.  Rather, we compute the next time in
400  * absolute time the timer should go off.
401  */
402 #ifndef _SYS_SYSPROTO_H_
403 struct getitimer_args {
404 	u_int	which;
405 	struct	itimerval *itv;
406 };
407 #endif
408 /*
409  * MPSAFE
410  */
411 /* ARGSUSED */
412 int
413 getitimer(struct thread *td, struct getitimer_args *uap)
414 {
415 	struct proc *p = td->td_proc;
416 	struct timeval ctv;
417 	struct itimerval aitv;
418 	int s;
419 
420 	if (uap->which > ITIMER_PROF)
421 		return (EINVAL);
422 
423 	mtx_lock(&Giant);
424 
425 	s = splclock(); /* XXX still needed ? */
426 	if (uap->which == ITIMER_REAL) {
427 		/*
428 		 * Convert from absolute to relative time in .it_value
429 		 * part of real time timer.  If time for real time timer
430 		 * has passed return 0, else return difference between
431 		 * current time and time for the timer to go off.
432 		 */
433 		aitv = p->p_realtimer;
434 		if (timevalisset(&aitv.it_value)) {
435 			getmicrouptime(&ctv);
436 			if (timevalcmp(&aitv.it_value, &ctv, <))
437 				timevalclear(&aitv.it_value);
438 			else
439 				timevalsub(&aitv.it_value, &ctv);
440 		}
441 	} else {
442 		aitv = p->p_stats->p_timer[uap->which];
443 	}
444 	splx(s);
445 	mtx_unlock(&Giant);
446 	return (copyout(&aitv, uap->itv, sizeof (struct itimerval)));
447 }
448 
449 #ifndef _SYS_SYSPROTO_H_
450 struct setitimer_args {
451 	u_int	which;
452 	struct	itimerval *itv, *oitv;
453 };
454 #endif
455 /*
456  * MPSAFE
457  */
458 /* ARGSUSED */
459 int
460 setitimer(struct thread *td, struct setitimer_args *uap)
461 {
462 	struct proc *p = td->td_proc;
463 	struct itimerval aitv;
464 	struct timeval ctv;
465 	struct itimerval *itvp;
466 	int s, error = 0;
467 
468 	if (uap->which > ITIMER_PROF)
469 		return (EINVAL);
470 	itvp = uap->itv;
471 	if (itvp && (error = copyin(itvp, &aitv, sizeof(struct itimerval))))
472 		return (error);
473 
474 	mtx_lock(&Giant);
475 
476 	if ((uap->itv = uap->oitv) &&
477 	    (error = getitimer(td, (struct getitimer_args *)uap))) {
478 		goto done2;
479 	}
480 	if (itvp == 0) {
481 		error = 0;
482 		goto done2;
483 	}
484 	if (itimerfix(&aitv.it_value)) {
485 		error = EINVAL;
486 		goto done2;
487 	}
488 	if (!timevalisset(&aitv.it_value)) {
489 		timevalclear(&aitv.it_interval);
490 	} else if (itimerfix(&aitv.it_interval)) {
491 		error = EINVAL;
492 		goto done2;
493 	}
494 	s = splclock(); /* XXX: still needed ? */
495 	if (uap->which == ITIMER_REAL) {
496 		if (timevalisset(&p->p_realtimer.it_value))
497 			callout_stop(&p->p_itcallout);
498 		if (timevalisset(&aitv.it_value))
499 			callout_reset(&p->p_itcallout, tvtohz(&aitv.it_value),
500 			    realitexpire, p);
501 		getmicrouptime(&ctv);
502 		timevaladd(&aitv.it_value, &ctv);
503 		p->p_realtimer = aitv;
504 	} else {
505 		p->p_stats->p_timer[uap->which] = aitv;
506 	}
507 	splx(s);
508 done2:
509 	mtx_unlock(&Giant);
510 	return (error);
511 }
512 
513 /*
514  * Real interval timer expired:
515  * send process whose timer expired an alarm signal.
516  * If time is not set up to reload, then just return.
517  * Else compute next time timer should go off which is > current time.
518  * This is where delay in processing this timeout causes multiple
519  * SIGALRM calls to be compressed into one.
520  * tvtohz() always adds 1 to allow for the time until the next clock
521  * interrupt being strictly less than 1 clock tick, but we don't want
522  * that here since we want to appear to be in sync with the clock
523  * interrupt even when we're delayed.
524  */
525 void
526 realitexpire(void *arg)
527 {
528 	struct proc *p;
529 	struct timeval ctv, ntv;
530 	int s;
531 
532 	p = (struct proc *)arg;
533 	PROC_LOCK(p);
534 	psignal(p, SIGALRM);
535 	if (!timevalisset(&p->p_realtimer.it_interval)) {
536 		timevalclear(&p->p_realtimer.it_value);
537 		PROC_UNLOCK(p);
538 		return;
539 	}
540 	for (;;) {
541 		s = splclock(); /* XXX: still neeeded ? */
542 		timevaladd(&p->p_realtimer.it_value,
543 		    &p->p_realtimer.it_interval);
544 		getmicrouptime(&ctv);
545 		if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) {
546 			ntv = p->p_realtimer.it_value;
547 			timevalsub(&ntv, &ctv);
548 			callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1,
549 			    realitexpire, p);
550 			splx(s);
551 			PROC_UNLOCK(p);
552 			return;
553 		}
554 		splx(s);
555 	}
556 	/*NOTREACHED*/
557 }
558 
559 /*
560  * Check that a proposed value to load into the .it_value or
561  * .it_interval part of an interval timer is acceptable, and
562  * fix it to have at least minimal value (i.e. if it is less
563  * than the resolution of the clock, round it up.)
564  */
565 int
566 itimerfix(struct timeval *tv)
567 {
568 
569 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
570 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
571 		return (EINVAL);
572 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
573 		tv->tv_usec = tick;
574 	return (0);
575 }
576 
577 /*
578  * Decrement an interval timer by a specified number
579  * of microseconds, which must be less than a second,
580  * i.e. < 1000000.  If the timer expires, then reload
581  * it.  In this case, carry over (usec - old value) to
582  * reduce the value reloaded into the timer so that
583  * the timer does not drift.  This routine assumes
584  * that it is called in a context where the timers
585  * on which it is operating cannot change in value.
586  */
587 int
588 itimerdecr(struct itimerval *itp, int usec)
589 {
590 
591 	if (itp->it_value.tv_usec < usec) {
592 		if (itp->it_value.tv_sec == 0) {
593 			/* expired, and already in next interval */
594 			usec -= itp->it_value.tv_usec;
595 			goto expire;
596 		}
597 		itp->it_value.tv_usec += 1000000;
598 		itp->it_value.tv_sec--;
599 	}
600 	itp->it_value.tv_usec -= usec;
601 	usec = 0;
602 	if (timevalisset(&itp->it_value))
603 		return (1);
604 	/* expired, exactly at end of interval */
605 expire:
606 	if (timevalisset(&itp->it_interval)) {
607 		itp->it_value = itp->it_interval;
608 		itp->it_value.tv_usec -= usec;
609 		if (itp->it_value.tv_usec < 0) {
610 			itp->it_value.tv_usec += 1000000;
611 			itp->it_value.tv_sec--;
612 		}
613 	} else
614 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
615 	return (0);
616 }
617 
618 /*
619  * Add and subtract routines for timevals.
620  * N.B.: subtract routine doesn't deal with
621  * results which are before the beginning,
622  * it just gets very confused in this case.
623  * Caveat emptor.
624  */
625 void
626 timevaladd(struct timeval *t1, struct timeval *t2)
627 {
628 
629 	t1->tv_sec += t2->tv_sec;
630 	t1->tv_usec += t2->tv_usec;
631 	timevalfix(t1);
632 }
633 
634 void
635 timevalsub(struct timeval *t1, struct timeval *t2)
636 {
637 
638 	t1->tv_sec -= t2->tv_sec;
639 	t1->tv_usec -= t2->tv_usec;
640 	timevalfix(t1);
641 }
642 
643 static void
644 timevalfix(struct timeval *t1)
645 {
646 
647 	if (t1->tv_usec < 0) {
648 		t1->tv_sec--;
649 		t1->tv_usec += 1000000;
650 	}
651 	if (t1->tv_usec >= 1000000) {
652 		t1->tv_sec++;
653 		t1->tv_usec -= 1000000;
654 	}
655 }
656 
657 /*
658  * ratecheck(): simple time-based rate-limit checking.
659  */
660 int
661 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
662 {
663 	struct timeval tv, delta;
664 	int rv = 0;
665 
666 	getmicrouptime(&tv);		/* NB: 10ms precision */
667 	delta = tv;
668 	timevalsub(&delta, lasttime);
669 
670 	/*
671 	 * check for 0,0 is so that the message will be seen at least once,
672 	 * even if interval is huge.
673 	 */
674 	if (timevalcmp(&delta, mininterval, >=) ||
675 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
676 		*lasttime = tv;
677 		rv = 1;
678 	}
679 
680 	return (rv);
681 }
682 
683 /*
684  * ppsratecheck(): packets (or events) per second limitation.
685  *
686  * Return 0 if the limit is to be enforced (e.g. the caller
687  * should drop a packet because of the rate limitation).
688  *
689  * Note that we maintain the struct timeval for compatibility
690  * with other bsd systems.  We reuse the storage and just monitor
691  * clock ticks for minimal overhead.
692  */
693 int
694 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
695 {
696 	int now;
697 
698 	/*
699 	 * Reset the last time and counter if this is the first call
700 	 * or more than a second has passed since the last update of
701 	 * lasttime.
702 	 */
703 	now = ticks;
704 	if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
705 		lasttime->tv_sec = now;
706 		*curpps = 1;
707 		return (1);
708 	} else {
709 		(*curpps)++;		/* NB: ignore potential overflow */
710 		return (maxpps < 0 || *curpps < maxpps);
711 	}
712 }
713