Lines Matching +full:protect +full:- +full:exec

52 kmutex_t tod_lock;	/* protects time-of-day stuff */
57 * significantly lower may allow for denial-of-service attacks.
67 ((tvp)->tv_sec cmp (tsp)->tv_sec || \
68 ((tvp)->tv_sec == (tsp)->tv_sec && \
70 (tvp)->tv_usec * 1000 cmp (tsp)->tv_nsec))
76 * the time-of-day and per-process interval timers. Subroutines
95 * protect modification of last in uniqtime()
101 * Fast algorithm to convert nsec to usec -- see hrt2ts() in uniqtime()
109 usec = nsec - (usec >> 3); in uniqtime()
133 ((last.tv_sec - sec) <= 5)) { /* not way back in time */ in uniqtime()
137 usec -= MICROSEC; in uniqtime()
145 tv->tv_sec = sec; in uniqtime()
146 tv->tv_usec = usec; in uniqtime()
152 * sequencing - truncation to 32-bits is fine for uniqueness,
222 mutex_enter(&p->p_lock); in xgetitimer()
227 aitv = ttolwp(curthread)->lwp_timer[which]; in xgetitimer()
232 aitv = p->p_realitimer; in xgetitimer()
245 if (curproc->p_rprof_cyclic == CYCLIC_NONE) { in xgetitimer()
250 aitv = curproc->p_rprof_timer; in xgetitimer()
261 remain = first - ts; in xgetitimer()
265 * This was set as a one-shot, and we've in xgetitimer()
272 * We have a non-zero interval; we need to in xgetitimer()
277 remain = interval - ((ts - first) % interval); in xgetitimer()
285 mutex_exit(&p->p_lock); in xgetitimer()
289 mutex_exit(&p->p_lock); in xgetitimer()
366 mutex_enter(&p->p_lock); in xsetitimer()
372 * at the same time, even when we drop p->p_lock below. in xsetitimer()
379 if (p->p_flag & SITBUSY) { in xsetitimer()
380 mutex_exit(&p->p_lock); in xsetitimer()
383 p->p_flag |= SITBUSY; in xsetitimer()
384 while ((tmp_id = p->p_itimerid) != 0) { in xsetitimer()
388 * p_lock). Drop p_lock and re-acquire it after in xsetitimer()
392 p->p_itimerid = 0; in xsetitimer()
393 mutex_exit(&p->p_lock); in xsetitimer()
395 mutex_enter(&p->p_lock); in xsetitimer()
400 p->p_itimerid = realtime_timeout(realitexpire, in xsetitimer()
403 p->p_realitimer = aitv; in xsetitimer()
404 p->p_flag &= ~SITBUSY; in xsetitimer()
408 cyclic = p->p_rprof_cyclic; in xsetitimer()
409 p->p_rprof_cyclic = CYCLIC_NONE; in xsetitimer()
411 mutex_exit(&p->p_lock); in xsetitimer()
440 * set the interval to be INT64_MAX - when.cyt_when to in xsetitimer()
441 * effect a one-shot; see the comment in clock_highres.c in xsetitimer()
444 when.cyt_interval = INT64_MAX - when.cyt_when; in xsetitimer()
455 mutex_enter(&p->p_lock); in xsetitimer()
457 if (p->p_rprof_cyclic != CYCLIC_NONE) { in xsetitimer()
464 mutex_exit(&p->p_lock); in xsetitimer()
475 * per-thread SIGPROF buffers, if possible. in xsetitimer()
478 p->p_rprof_timer = aitv; in xsetitimer()
479 p->p_rprof_cyclic = cyclic; in xsetitimer()
481 t = p->p_tlist; in xsetitimer()
485 itvp = &ttolwp(t)->lwp_timer[ITIMER_PROF]; in xsetitimer()
486 timerclear(&itvp->it_interval); in xsetitimer()
487 timerclear(&itvp->it_value); in xsetitimer()
489 if (t->t_rprof != NULL) in xsetitimer()
492 t->t_rprof = in xsetitimer()
495 } while ((t = t->t_forw) != p->p_tlist); in xsetitimer()
500 ttolwp(curthread)->lwp_timer[ITIMER_VIRTUAL] = aitv; in xsetitimer()
504 if (p->p_rprof_cyclic != CYCLIC_NONE) { in xsetitimer()
512 ttolwp(curthread)->lwp_timer[ITIMER_PROF] = aitv; in xsetitimer()
516 mutex_exit(&p->p_lock); in xsetitimer()
519 mutex_exit(&p->p_lock); in xsetitimer()
525 * Called only from exec_args() when exec occurs.
527 * to be inherited across exec(), so leave them alone.
537 mutex_enter(&p->p_lock); in delete_itimer_realprof()
539 /* we are performing execve(); assert we are single-threaded */ in delete_itimer_realprof()
540 ASSERT(t == p->p_tlist && t == t->t_forw); in delete_itimer_realprof()
542 if ((cyclic = p->p_rprof_cyclic) == CYCLIC_NONE) { in delete_itimer_realprof()
543 mutex_exit(&p->p_lock); in delete_itimer_realprof()
545 p->p_rprof_cyclic = CYCLIC_NONE; in delete_itimer_realprof()
549 if (lwp->lwp_cursig == SIGPROF) { in delete_itimer_realprof()
550 lwp->lwp_cursig = 0; in delete_itimer_realprof()
551 lwp->lwp_extsig = 0; in delete_itimer_realprof()
552 if (lwp->lwp_curinfo) { in delete_itimer_realprof()
553 siginfofree(lwp->lwp_curinfo); in delete_itimer_realprof()
554 lwp->lwp_curinfo = NULL; in delete_itimer_realprof()
560 sigdelset(&p->p_sig, SIGPROF); in delete_itimer_realprof()
561 sigdelset(&p->p_extsig, SIGPROF); in delete_itimer_realprof()
563 sigdelset(&t->t_sig, SIGPROF); in delete_itimer_realprof()
564 sigdelset(&t->t_extsig, SIGPROF); in delete_itimer_realprof()
567 mutex_exit(&p->p_lock); in delete_itimer_realprof()
590 struct timeval *valp = &p->p_realitimer.it_value; in realitexpire()
591 struct timeval *intervalp = &p->p_realitimer.it_interval; in realitexpire()
596 mutex_enter(&p->p_lock); in realitexpire()
606 p->p_itimerid = realtime_timeout(realitexpire, p, ticks); in realitexpire()
607 mutex_exit(&p->p_lock); in realitexpire()
614 p->p_itimerid = 0; in realitexpire()
618 p->p_itimerid = realtime_timeout(realitexpire, p, hzto(valp)); in realitexpire()
620 mutex_exit(&p->p_lock); in realitexpire()
637 mutex_enter(&p->p_lock); in realprofexpire()
638 if (p->p_rprof_cyclic == CYCLIC_NONE || in realprofexpire()
639 (t = p->p_tlist) == NULL) { in realprofexpire()
640 mutex_exit(&p->p_lock); in realprofexpire()
649 if (t->t_rprof == NULL) in realprofexpire()
650 t->t_rprof = kmem_zalloc(sizeof (struct rprof), in realprofexpire()
652 if (t->t_rprof == NULL) in realprofexpire()
656 switch (t->t_state) { in realprofexpire()
661 if (!(t->t_schedflag & TS_LOAD)) { in realprofexpire()
665 switch (mstate = ttolwp(t)->lwp_mstate.ms_prev) { in realprofexpire()
681 switch (mstate = t->t_mstate) { in realprofexpire()
692 mstate = t->t_mstate; in realprofexpire()
695 t->t_rprof->rp_anystate = 1; in realprofexpire()
696 t->t_rprof->rp_state[mstate]++; in realprofexpire()
702 if (t->t_state == TS_ONPROC && t->t_cpu != CPU) in realprofexpire()
703 poke_cpu(t->t_cpu->cpu_id); in realprofexpire()
705 } while ((t = t->t_forw) != p->p_tlist); in realprofexpire()
707 mutex_exit(&p->p_lock); in realprofexpire()
743 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || in itimerfix()
744 tv->tv_usec < 0 || tv->tv_usec >= MICROSEC) in itimerfix()
746 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < minimum) in itimerfix()
747 tv->tv_usec = minimum; in itimerfix()
760 if (tv->tv_sec < 0 || tv->tv_nsec < 0 || tv->tv_nsec >= NANOSEC) in itimerspecfix()
769 * it. In this case, carry over (usec - old value) to
778 if (itp->it_value.tv_usec < usec) { in itimerdecr()
779 if (itp->it_value.tv_sec == 0) { in itimerdecr()
781 usec -= itp->it_value.tv_usec; in itimerdecr()
784 itp->it_value.tv_usec += MICROSEC; in itimerdecr()
785 itp->it_value.tv_sec--; in itimerdecr()
787 itp->it_value.tv_usec -= usec; in itimerdecr()
789 if (timerisset(&itp->it_value)) in itimerdecr()
793 if (timerisset(&itp->it_interval)) { in itimerdecr()
794 itp->it_value = itp->it_interval; in itimerdecr()
795 itp->it_value.tv_usec -= usec; in itimerdecr()
796 if (itp->it_value.tv_usec < 0) { in itimerdecr()
797 itp->it_value.tv_usec += MICROSEC; in itimerdecr()
798 itp->it_value.tv_sec--; in itimerdecr()
801 itp->it_value.tv_usec = 0; /* sec is already 0 */ in itimerdecr()
815 t1->tv_sec += t2->tv_sec; in timevaladd()
816 t1->tv_usec += t2->tv_usec; in timevaladd()
823 t1->tv_sec -= t2->tv_sec; in timevalsub()
824 t1->tv_usec -= t2->tv_usec; in timevalsub()
831 if (t1->tv_usec < 0) { in timevalfix()
832 t1->tv_sec--; in timevalfix()
833 t1->tv_usec += MICROSEC; in timevalfix()
835 if (t1->tv_usec >= MICROSEC) { in timevalfix()
836 t1->tv_sec++; in timevalfix()
837 t1->tv_usec -= MICROSEC; in timevalfix()
848 t1->tv_sec += t2->tv_sec; in timespecadd()
849 t1->tv_nsec += t2->tv_nsec; in timespecadd()
856 t1->tv_sec -= t2->tv_sec; in timespecsub()
857 t1->tv_nsec -= t2->tv_nsec; in timespecsub()
864 if (t1->tv_nsec < 0) { in timespecfix()
865 t1->tv_sec--; in timespecfix()
866 t1->tv_nsec += NANOSEC; in timespecfix()
868 if (t1->tv_nsec >= NANOSEC) { in timespecfix()
869 t1->tv_sec++; in timespecfix()
870 t1->tv_nsec -= NANOSEC; in timespecfix()
884 ts.tv_sec = tv->tv_sec; in hzto()
885 ts.tv_nsec = tv->tv_usec * 1000; in hzto()
908 sec = tv->tv_sec - now.tv_sec; in timespectohz()
909 nsec = tv->tv_nsec - now.tv_nsec + nsec_per_tick - 1; in timespectohz()
912 sec--; in timespectohz()
916 nsec -= NANOSEC; in timespectohz()
924 * and at about 35 weeks for hz=100. (Rather longer for the 64-bit in timespectohz()
925 * kernel :-) in timespectohz()
928 ticks = 1; /* protect vs nonpositive */ in timespectohz()
929 else if (sec > (LONG_MAX - ticks) / hz) in timespectohz()
930 ticks = LONG_MAX; /* protect vs overflow */ in timespectohz()
948 sec = tv->tv_sec; in timespectohz64()
949 nsec = tv->tv_nsec + nsec_per_tick - 1; in timespectohz64()
952 sec--; in timespectohz64()
956 nsec -= NANOSEC; in timespectohz64()
964 * and at about 35 weeks for hz=100. (Rather longer for the 64-bit in timespectohz64()
968 ticks = 1; /* protect vs nonpositive */ in timespectohz64()
969 else if (sec > (((~0ULL) >> 1) - ticks) / hz) in timespectohz64()
970 ticks = (~0ULL) >> 1; /* protect vs overflow */ in timespectohz64()
982 * tsp->sec = hrt / NANOSEC;
983 * tsp->nsec = hrt % NANOSEC;
985 * The black magic below avoids doing a 64-bit by 32-bit integer divide,
987 * it might first appear -- don't try this at home.
991 * Multiplication by a fixed constant is easy -- you just do the appropriate
1004 * 1111 = 10000 - 1, hence x * 15 = (x << 4) - (x << 0). [You would never
1005 * actually perform the operation << 0, since it's a no-op; I'm just writing
1011 * in binary. If you apply the bit-grouping trick, it doesn't buy you very
1015 * - 000100011001010011011000000000
1023 * since 128 is nearby: x * 125 = (x << 7) - x - x - x, which is just four
1024 * operations. So, to multiply by 1,000,000,000, we perform three multipli-
1028 * Division is harder; there is no equivalent of the simple shift-add algorithm
1030 * into a multiplication problem by pre-computing the binary representation
1033 * 1 / 1,000,000,000 = 1.0001001011100000101111101000001B-30,
1035 * to 32 bits of precision. (The notation B-30 means "* 2^-30", just like
1036 * E-18 means "* 10^-18".)
1038 * So, to compute x / 1,000,000,000, we just multiply x by the 32-bit
1044 * - 00000000000100000000000100000000
1050 * sec -= (hrt << 8);
1053 * sec -= (hrt << 20);
1060 * Voila! The only problem is, since hrt is 64 bits, we need to use 96-bit
1069 * These 32 bits are just the lower-order word of (hrt >> 30). This brings
1070 * us down from 96-bit math to 64-bit math, and our algorithm becomes:
1075 * sec -= (tmp << 8);
1078 * sec -= (tmp << 20);
1085 * Next, we're going to reduce this 64-bit computation to a 32-bit
1089 * tmp <<= 6, tmp <<= 2 (== 8 - 6), tmp <<= 5 (== 13 - 8), etc:
1094 * tmp <<= 2; sec -= tmp;
1097 * tmp <<= 6; sec -= tmp;
1119 * throw away, which is 2^-32 + 2^-31 + ... + 2^-2 + 2^-1 == 1 - 2^-32.
1120 * Thus, the final result ("sec") is correct to +/- 1.
1125 * "sec = (sec >> n) - tmp" must be changed to "sec = tmp - (sec >> n)", and
1126 * the operators (+ or -) in all previous lines must be toggled accordingly.
1131 * sec = tmp - (tmp >> 2);
1132 * sec = tmp - (sec >> 5);
1134 * sec = tmp - (sec >> 6);
1135 * sec = tmp - (sec >> 3);
1140 * This yields a value for sec that is accurate to +1/-1, so we have two
1141 * cases to deal with. The mysterious-looking "+ 7" in the code below biases
1143 * the correct value. With this modified code, sec is accurate to +0/-2, with
1144 * the -2 case being very rare in practice. With this change, we only have to
1150 * *guaranteed* accuracy of sec to +0/-3, but speeds up the common cases.
1152 * Finally, we compute nsec = hrt - (sec * 1,000,000,000). nsec will always
1154 * the error in sec (times 1,000,000,000) plus the low-order 30 bits of hrt.
1157 * sec * 1,000,000,000, we only need the low 32 bits, so we can just do 32-bit
1158 * arithmetic and let the high-order bits fall off the end.
1163 * nsec -= NANOSEC;
1171 * 35 usec for software division -- about 20 times faster.
1181 tsp->tv_sec = hrt / NANOSEC; in hrt2ts()
1182 tsp->tv_nsec = hrt % NANOSEC; in hrt2ts()
1187 sec = tmp - (tmp >> 2); in hrt2ts()
1188 sec = tmp - (sec >> 5); in hrt2ts()
1190 sec = tmp - (sec >> 6) + 7; in hrt2ts()
1191 sec = tmp - (sec >> 3); in hrt2ts()
1195 tmp = (sec << 7) - sec - sec - sec; in hrt2ts()
1196 tmp = (tmp << 7) - tmp - tmp - tmp; in hrt2ts()
1197 tmp = (tmp << 7) - tmp - tmp - tmp; in hrt2ts()
1198 nsec = (uint32_t)hrt - (tmp << 9); in hrt2ts()
1200 nsec -= NANOSEC; in hrt2ts()
1203 tsp->tv_sec = (time_t)sec; in hrt2ts()
1204 tsp->tv_nsec = nsec; in hrt2ts()
1218 return ((tsp->tv_sec * NANOSEC) + tsp->tv_nsec); in ts2hrt()
1223 * hrt = tsp->tv_sec * NANOSEC + tsp->tv_nsec; in ts2hrt()
1229 hrt = tsp->tv_sec; in ts2hrt()
1230 hrt = (hrt << 7) - hrt - hrt - hrt; in ts2hrt()
1231 hrt = (hrt << 7) - hrt - hrt - hrt; in ts2hrt()
1232 hrt = (hrt << 7) - hrt - hrt - hrt; in ts2hrt()
1233 hrt = (hrt << 9) + tsp->tv_nsec; in ts2hrt()
1239 * For the various 32-bit "compatibility" paths in the system.
1253 * straightforward (x << 10) - (x << 5) + (x << 3) to multiply tv_usec by
1260 return ((hrtime_t)tvp->tv_sec * NANOSEC + in tv2hrt()
1261 (hrtime_t)tvp->tv_usec * (NANOSEC / MICROSEC)); in tv2hrt()
1271 tvp->tv_sec = hrt / NANOSEC; in hrt2tv()
1272 tvp->tv_usec = (hrt % NANOSEC) / (NANOSEC / MICROSEC); in hrt2tv()
1278 sec = tmp - (tmp >> 2); in hrt2tv()
1279 sec = tmp - (sec >> 5); in hrt2tv()
1281 sec = tmp - (sec >> 6) + 7; in hrt2tv()
1282 sec = tmp - (sec >> 3); in hrt2tv()
1286 tmp = (sec << 7) - sec - sec - sec; in hrt2tv()
1287 tmp = (tmp << 7) - tmp - tmp - tmp; in hrt2tv()
1288 tmp = (tmp << 7) - tmp - tmp - tmp; in hrt2tv()
1289 nsec = (uint32_t)hrt - (tmp << 9); in hrt2tv()
1291 nsec -= NANOSEC; in hrt2tv()
1294 tvp->tv_sec = (time_t)sec; in hrt2tv()
1303 r = nsec - q*1000; in hrt2tv()
1304 tvp->tv_usec = q + ((r + 24) >> 10); in hrt2tv()
1338 mutex_enter(&curthread->t_delay_lock); in nanosleep()
1339 while ((ret = cv_waituntil_sig(&curthread->t_delay_cv, in nanosleep()
1340 &curthread->t_delay_lock, &rqtime, timecheck)) > 0) in nanosleep()
1342 mutex_exit(&curthread->t_delay_lock); in nanosleep()
1382 * Note: these routines require tod_lock held to protect cached state.
1392 int saved_utc = -60;
1411 saved_tod.tod_sec += utc - saved_utc; in utc_to_tod()
1423 year = dse / 365 + 72; /* first guess -- always a bit too large */ in utc_to_tod()
1425 year--; in utc_to_tod()
1426 day = dse - 365 * (year - 70) - ((year - 69) >> 2); in utc_to_tod()
1433 tod.tod_day = day - days_thru_month[month] + 1; in utc_to_tod()
1455 int days_diff = days_thru_month[month + 1] - days_thru_month[month]; in tod_to_utc()
1463 "The hardware real-time clock appears to have the " in tod_to_utc()
1464 "wrong years value %d -- time needs to be reset\n", in tod_to_utc()
1471 "The hardware real-time clock appears to have the " in tod_to_utc()
1472 "wrong months value %d -- time needs to be reset\n", in tod_to_utc()
1479 "The hardware real-time clock appears to have the " in tod_to_utc()
1480 "wrong days value %d -- time needs to be reset\n", in tod_to_utc()
1487 "The hardware real-time clock appears to have the " in tod_to_utc()
1488 "wrong hours value %d -- time needs to be reset\n", in tod_to_utc()
1495 "The hardware real-time clock appears to have the " in tod_to_utc()
1496 "wrong minutes value %d -- time needs to be reset\n", in tod_to_utc()
1503 "The hardware real-time clock appears to have the " in tod_to_utc()
1504 "wrong seconds value %d -- time needs to be reset\n", in tod_to_utc()
1510 utc = (year - 70); /* next 3 lines: utc = 365y + y/4 */ in tod_to_utc()
1512 utc += (utc << 2) + ((year - 69) >> 2); in tod_to_utc()
1513 utc += days_thru_month[month] + tod.tod_day - 1; in tod_to_utc()
1515 utc = (utc << 6) - (utc << 2) + tod.tod_min; /* 60 * hour + min */ in tod_to_utc()
1516 utc = (utc << 6) - (utc << 2) + tod.tod_sec; /* 60 * min + sec */ in tod_to_utc()