Lines Matching +full:idle +full:- +full:state
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org>
67 static sbintime_t getnextcpuevent(struct pcpu_state *state, int idle);
68 static sbintime_t getnextevent(struct pcpu_state *state);
73 #define ET_HW_LOCK(state) \ argument
75 if (timer->et_flags & ET_FLAGS_PERCPU) \
76 mtx_lock_spin(&(state)->et_hw_mtx); \
81 #define ET_HW_UNLOCK(state) \ argument
83 if (timer->et_flags & ET_FLAGS_PERCPU) \
84 mtx_unlock_spin(&(state)->et_hw_mtx); \
104 static u_int idletick; /* Run periodic events when idle. */
106 0, "Run periodic events when idle");
108 static int periodic; /* Periodic or one-shot mode. */
113 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */
125 int idle; /* This CPU is in idle mode. */ member
138 struct pcpu_state *state; in hardclockintr() local
143 state = DPCPU_PTR(timerstate); in hardclockintr()
144 now = state->now; in hardclockintr()
159 struct pcpu_state *state; in handleevents() local
170 frame = curthread->td_intr_frame; in handleevents()
174 state = DPCPU_PTR(timerstate); in handleevents()
177 while (now >= state->nexthard) { in handleevents()
178 state->nexthard += tick_sbt; in handleevents()
183 *hct = state->nexthard - tick_sbt; in handleevents()
190 while (now >= state->nextstat) { in handleevents()
191 state->nextstat += statperiod; in handleevents()
200 while (now >= state->nextprof) { in handleevents()
201 state->nextprof += profperiod; in handleevents()
209 state->nextprof = state->nextstat; in handleevents()
210 if (now >= state->nextcallopt || now >= state->nextcall) { in handleevents()
211 state->nextcall = state->nextcallopt = SBT_MAX; in handleevents()
215 ET_HW_LOCK(state); in handleevents()
216 t = getnextcpuevent(state, 0); in handleevents()
218 state->idle = 0; in handleevents()
219 state->nextevent = t; in handleevents()
221 (timer->et_flags & ET_FLAGS_PERCPU)); in handleevents()
223 ET_HW_UNLOCK(state); in handleevents()
231 getnextcpuevent(struct pcpu_state *state, int idle) in getnextcpuevent() argument
236 /* Handle hardclock() events, skipping some if CPU is idle. */ in getnextcpuevent()
237 event = state->nexthard; in getnextcpuevent()
238 if (idle) { in getnextcpuevent()
248 event += tick_sbt * (hardfreq - 1); in getnextcpuevent()
251 if (event > state->nextcall) in getnextcpuevent()
252 event = state->nextcall; in getnextcpuevent()
253 if (!idle) { /* If CPU is active - handle other types of events. */ in getnextcpuevent()
254 if (event > state->nextstat) in getnextcpuevent()
255 event = state->nextstat; in getnextcpuevent()
256 if (profiling && event > state->nextprof) in getnextcpuevent()
257 event = state->nextprof; in getnextcpuevent()
266 getnextevent(struct pcpu_state *state) in getnextevent() argument
275 c = -1; in getnextevent()
277 event = state->nextevent; in getnextevent()
279 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { in getnextevent()
281 state = DPCPU_ID_PTR(cpu, timerstate); in getnextevent()
282 if (event > state->nextevent) { in getnextevent()
283 event = state->nextevent; in getnextevent()
302 struct pcpu_state *state; local
311 state = DPCPU_PTR(timerstate);
312 if (et->et_flags & ET_FLAGS_PERCPU) {
313 next = &state->nexttick;
320 *next = -1; /* Next tick is not scheduled yet. */
321 state->now = now;
329 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
332 if ((et->et_flags & ET_FLAGS_PERCPU) == 0) {
334 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
337 state = DPCPU_ID_PTR(cpu, timerstate);
338 ET_HW_LOCK(state);
339 state->now = now;
340 if (now >= state->nextevent) {
341 state->nextevent += SBT_1S;
343 state->ipi = 1;
347 ET_HW_UNLOCK(state);
356 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
361 state = DPCPU_ID_PTR(cpu, timerstate);
362 if (state->ipi) {
363 state->ipi = 0;
377 struct pcpu_state *state; local
383 state = DPCPU_PTR(timerstate);
384 if (timer->et_flags & ET_FLAGS_PERCPU)
385 next = &state->nexttick;
395 new = timerperiod - tmp;
405 new = getnextevent(state);
411 et_start(timer, new - now, 0);
424 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
426 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
437 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
443 struct pcpu_state *state; local
445 state = DPCPU_PTR(timerstate);
446 switch (atomic_load_acq_int(&state->action)) {
449 ET_HW_LOCK(state);
451 ET_HW_UNLOCK(state);
452 state->handle = 0;
453 atomic_store_rel_int(&state->action, 0);
456 ET_HW_LOCK(state);
458 ET_HW_UNLOCK(state);
459 state->handle = 0;
460 atomic_store_rel_int(&state->action, 0);
463 if (atomic_readandclear_int(&state->handle) && !busy) {
473 * For per-CPU timers use IPI to make other CPUs to reconfigure.
479 struct pcpu_state *state; local
495 nexttick = -1;
500 state = DPCPU_ID_PTR(cpu, timerstate);
501 state->now = now;
504 state->nextevent = SBT_MAX;
507 state->nextevent = next;
509 state->nexttick = next;
511 state->nexttick = -1;
512 state->nexthard = next;
513 state->nextstat = next;
514 state->nextprof = next;
515 state->nextcall = next;
516 state->nextcallopt = next;
520 /* Start global timer or per-CPU timer of this CPU. */
524 /* Stop global timer or per-CPU timer of this CPU. */
531 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
533 /* If timer is global or there is no other CPUs yet - we are done. */
534 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
541 state = DPCPU_ID_PTR(cpu, timerstate);
542 atomic_store_rel_int(&state->action,
553 state = DPCPU_ID_PTR(cpu, timerstate);
554 if (atomic_load_acq_int(&state->action))
569 if (et->et_frequency != 0) {
570 div = lmax((et->et_frequency + freq / 2) / freq, 1);
571 if (et->et_flags & ET_FLAGS_POW2DIV)
572 div = 1 << (flsl(div + div / 2) - 1);
573 freq = (et->et_frequency + div / 2) / div;
575 if (et->et_min_period > SBT_1S)
576 panic("Event timer \"%s\" doesn't support sub-second periods!",
577 et->et_name);
578 else if (et->et_min_period != 0)
579 freq = min(freq, SBT2FREQ(et->et_min_period));
580 if (et->et_max_period < SBT_1S && et->et_max_period != 0)
581 freq = max(freq, SBT2FREQ(et->et_max_period));
591 struct pcpu_state *state; local
596 state = DPCPU_ID_PTR(cpu, timerstate);
597 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
598 state->nextcall = SBT_MAX;
599 state->nextcallopt = SBT_MAX;
622 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
624 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
626 if (timer->et_flags & ET_FLAGS_C3STOP)
674 * Start per-CPU event timers on APs.
679 struct pcpu_state *state; local
682 state = DPCPU_PTR(timerstate);
683 ET_HW_LOCK(state);
684 state->now = sbinuptime();
687 ET_HW_UNLOCK(state);
689 td->td_intr_nesting_level++;
690 handleevents(state->now, 2);
691 td->td_intr_nesting_level--;
747 profiling--;
752 * Switch to idle mode (all ticks handled).
758 struct pcpu_state *state; local
761 (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
766 return (-1);
767 state = DPCPU_PTR(timerstate);
768 ET_HW_LOCK(state);
770 now = state->now;
773 CTR2(KTR_SPARE2, "idle: now %d.%08x",
775 t = getnextcpuevent(state, 1);
776 state->idle = 1;
777 state->nextevent = t;
780 ET_HW_UNLOCK(state);
781 return (MAX(t - now, 0));
791 struct pcpu_state *state; local
794 state = DPCPU_PTR(timerstate);
795 if (atomic_load_int(&state->idle) == 0 || busy)
799 now = state->now;
805 td->td_intr_nesting_level++;
807 td->td_intr_nesting_level--;
812 * Change the frequency of the given timer. This changes et->et_frequency and
823 et->et_frequency = newfreq;
826 et->et_frequency = newfreq;
833 struct pcpu_state *state; local
839 CTR5(KTR_SPARE2, "new co: on %d at %d.%08x - %d.%08x",
844 state = DPCPU_ID_PTR(cpu, timerstate);
845 ET_HW_LOCK(state);
848 * If there is callout time already set earlier -- do nothing.
854 state->nextcallopt = bt_opt;
855 if (bt >= state->nextcall)
857 state->nextcall = bt;
858 /* If there is some other event set earlier -- do nothing. */
859 if (bt >= state->nextevent)
861 state->nextevent = bt;
862 /* If timer is periodic -- there is nothing to reprogram. */
865 /* If timer is global or of the current CPU -- reprogram it. */
866 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) {
869 ET_HW_UNLOCK(state);
873 state->handle = 1;
874 ET_HW_UNLOCK(state);
892 snprintf(buf, sizeof(buf), "%s", et->et_name);
897 if (error != 0 || req->newptr == NULL ||
898 strcasecmp(buf, et->et_name) == 0) {
909 if (et->et_flags & ET_FLAGS_C3STOP)
911 if (timer->et_flags & ET_FLAGS_C3STOP)
912 cpu_disable_c3_sleep--;
934 if (error != 0 || req->newptr == NULL)
960 "CPU %2d: action %d handle %d ipi %d idle %d\n"
965 c, st->action, st->handle, st->ipi, st->idle,
966 (uintmax_t)st->now,
967 (uintmax_t)st->nextevent,
968 (uintmax_t)(st->nextevent - st->now) / tick_sbt,
969 (uintmax_t)st->nexttick,
970 (uintmax_t)(st->nexttick - st->now) / tick_sbt,
971 (uintmax_t)st->nexthard,
972 (uintmax_t)(st->nexthard - st->now) / tick_sbt,
973 (uintmax_t)st->nextstat,
974 (uintmax_t)(st->nextstat - st->now) / tick_sbt,
975 (uintmax_t)st->nextprof,
976 (uintmax_t)(st->nextprof - st->now) / tick_sbt,
977 (uintmax_t)st->nextcall,
978 (uintmax_t)(st->nextcall - st->now) / tick_sbt,
979 (uintmax_t)st->nextcallopt,
980 (uintmax_t)(st->nextcallopt - st->now) / tick_sbt);