Lines Matching full:tmc
428 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc) in tmigr_is_not_available() argument
430 return !(tmc->tmgroup && tmc->online); in tmigr_is_not_available()
526 struct tmigr_cpu *tmc) in __walk_groups() argument
528 struct tmigr_group *child = NULL, *group = tmc->tmgroup; in __walk_groups()
542 static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc) in walk_groups() argument
544 lockdep_assert_held(&tmc->lock); in walk_groups()
546 __walk_groups(up, data, tmc); in walk_groups()
668 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc) in __tmigr_cpu_activate() argument
672 data.childmask = tmc->groupmask; in __tmigr_cpu_activate()
674 trace_tmigr_cpu_active(tmc); in __tmigr_cpu_activate()
676 tmc->cpuevt.ignore = true; in __tmigr_cpu_activate()
677 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in __tmigr_cpu_activate()
679 walk_groups(&tmigr_active_up, &data, tmc); in __tmigr_cpu_activate()
689 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_activate() local
691 if (tmigr_is_not_available(tmc)) in tmigr_cpu_activate()
694 if (WARN_ON_ONCE(!tmc->idle)) in tmigr_cpu_activate()
697 raw_spin_lock(&tmc->lock); in tmigr_cpu_activate()
698 tmc->idle = false; in tmigr_cpu_activate()
699 __tmigr_cpu_activate(tmc); in tmigr_cpu_activate()
700 raw_spin_unlock(&tmc->lock); in tmigr_cpu_activate()
867 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp) in tmigr_new_timer() argument
871 .evt = &tmc->cpuevt }; in tmigr_new_timer()
873 lockdep_assert_held(&tmc->lock); in tmigr_new_timer()
875 if (tmc->remote) in tmigr_new_timer()
878 trace_tmigr_cpu_new_timer(tmc); in tmigr_new_timer()
880 tmc->cpuevt.ignore = false; in tmigr_new_timer()
883 walk_groups(&tmigr_new_timer_up, &data, tmc); in tmigr_new_timer()
894 struct tmigr_cpu *tmc; in tmigr_handle_remote_cpu() local
896 tmc = per_cpu_ptr(&tmigr_cpu, cpu); in tmigr_handle_remote_cpu()
898 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
915 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || in tmigr_handle_remote_cpu()
916 now < tmc->cpuevt.nextevt.expires) { in tmigr_handle_remote_cpu()
917 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
921 trace_tmigr_handle_remote_cpu(tmc); in tmigr_handle_remote_cpu()
923 tmc->remote = true; in tmigr_handle_remote_cpu()
924 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_handle_remote_cpu()
927 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
935 * the top). During fetching the next timer interrupt, also tmc->lock in tmigr_handle_remote_cpu()
949 raw_spin_lock(&tmc->lock); in tmigr_handle_remote_cpu()
962 if (!tmc->online || !tmc->idle) { in tmigr_handle_remote_cpu()
973 data.evt = &tmc->cpuevt; in tmigr_handle_remote_cpu()
981 walk_groups(&tmigr_new_timer_up, &data, tmc); in tmigr_handle_remote_cpu()
984 tmc->remote = false; in tmigr_handle_remote_cpu()
985 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
1046 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_handle_remote() local
1049 if (tmigr_is_not_available(tmc)) in tmigr_handle_remote()
1052 data.childmask = tmc->groupmask; in tmigr_handle_remote()
1060 if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) { in tmigr_handle_remote()
1066 if (READ_ONCE(tmc->wakeup) == KTIME_MAX) in tmigr_handle_remote()
1073 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to in tmigr_handle_remote()
1074 * KTIME_MAX. Even if tmc->lock is not held during the whole remote in tmigr_handle_remote()
1075 * handling, tmc->wakeup is fine to be stale as it is called in in tmigr_handle_remote()
1080 __walk_groups(&tmigr_handle_remote_up, &data, tmc); in tmigr_handle_remote()
1082 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote()
1083 WRITE_ONCE(tmc->wakeup, data.firstexp); in tmigr_handle_remote()
1084 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote()
1144 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_requires_handle_remote() local
1149 if (tmigr_is_not_available(tmc)) in tmigr_requires_handle_remote()
1153 data.childmask = tmc->groupmask; in tmigr_requires_handle_remote()
1155 data.tmc_active = !tmc->idle; in tmigr_requires_handle_remote()
1162 * Check is done lockless as interrupts are disabled and @tmc->idle is in tmigr_requires_handle_remote()
1165 if (!tmc->idle) { in tmigr_requires_handle_remote()
1166 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc); in tmigr_requires_handle_remote()
1172 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock in tmigr_requires_handle_remote()
1178 if (data.now >= READ_ONCE(tmc->wakeup)) in tmigr_requires_handle_remote()
1181 raw_spin_lock(&tmc->lock); in tmigr_requires_handle_remote()
1182 if (data.now >= tmc->wakeup) in tmigr_requires_handle_remote()
1184 raw_spin_unlock(&tmc->lock); in tmigr_requires_handle_remote()
1191 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1196 * and thereby the timer idle path is executed once more. @tmc->wakeup
1205 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_new_timer() local
1208 if (tmigr_is_not_available(tmc)) in tmigr_cpu_new_timer()
1211 raw_spin_lock(&tmc->lock); in tmigr_cpu_new_timer()
1213 ret = READ_ONCE(tmc->wakeup); in tmigr_cpu_new_timer()
1215 if (nextexp != tmc->cpuevt.nextevt.expires || in tmigr_cpu_new_timer()
1216 tmc->cpuevt.ignore) { in tmigr_cpu_new_timer()
1217 ret = tmigr_new_timer(tmc, nextexp); in tmigr_cpu_new_timer()
1222 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_new_timer()
1225 trace_tmigr_cpu_new_timer_idle(tmc, nextexp); in tmigr_cpu_new_timer()
1226 raw_spin_unlock(&tmc->lock); in tmigr_cpu_new_timer()
1307 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp) in __tmigr_cpu_deactivate() argument
1311 .evt = &tmc->cpuevt, in __tmigr_cpu_deactivate()
1312 .childmask = tmc->groupmask }; in __tmigr_cpu_deactivate()
1320 tmc->cpuevt.ignore = false; in __tmigr_cpu_deactivate()
1322 walk_groups(&tmigr_inactive_up, &data, tmc); in __tmigr_cpu_deactivate()
1338 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_deactivate() local
1341 if (tmigr_is_not_available(tmc)) in tmigr_cpu_deactivate()
1344 raw_spin_lock(&tmc->lock); in tmigr_cpu_deactivate()
1346 ret = __tmigr_cpu_deactivate(tmc, nextexp); in tmigr_cpu_deactivate()
1348 tmc->idle = true; in tmigr_cpu_deactivate()
1354 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_deactivate()
1356 trace_tmigr_cpu_idle(tmc, nextexp); in tmigr_cpu_deactivate()
1357 raw_spin_unlock(&tmc->lock); in tmigr_cpu_deactivate()
1381 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_quick_check() local
1382 struct tmigr_group *group = tmc->tmgroup; in tmigr_quick_check()
1384 if (tmigr_is_not_available(tmc)) in tmigr_quick_check()
1387 if (WARN_ON_ONCE(tmc->idle)) in tmigr_quick_check()
1390 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask)) in tmigr_quick_check()
1422 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_trigger_active() local
1424 WARN_ON_ONCE(!tmc->online || tmc->idle); in tmigr_trigger_active()
1431 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_offline() local
1435 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_offline()
1436 tmc->online = false; in tmigr_cpu_offline()
1437 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_offline()
1443 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); in tmigr_cpu_offline()
1444 trace_tmigr_cpu_offline(tmc); in tmigr_cpu_offline()
1445 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_offline()
1457 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); in tmigr_cpu_online() local
1460 if (WARN_ON_ONCE(!tmc->tmgroup)) in tmigr_cpu_online()
1463 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_online()
1464 trace_tmigr_cpu_online(tmc); in tmigr_cpu_online()
1465 tmc->idle = timer_base_is_idle(); in tmigr_cpu_online()
1466 if (!tmc->idle) in tmigr_cpu_online()
1467 __tmigr_cpu_activate(tmc); in tmigr_cpu_online()
1468 tmc->online = true; in tmigr_cpu_online()
1469 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_online()
1646 * Update tmc -> group / child -> group connection in tmigr_setup_groups()
1649 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); in tmigr_setup_groups() local
1653 tmc->tmgroup = group; in tmigr_setup_groups()
1654 tmc->groupmask = BIT(group->num_children++); in tmigr_setup_groups()
1658 trace_tmigr_connect_cpu_parent(tmc); in tmigr_setup_groups()
1714 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); in tmigr_cpu_prepare() local
1718 if (tmc->tmgroup) in tmigr_cpu_prepare()
1721 raw_spin_lock_init(&tmc->lock); in tmigr_cpu_prepare()
1722 timerqueue_init(&tmc->cpuevt.nextevt); in tmigr_cpu_prepare()
1723 tmc->cpuevt.nextevt.expires = KTIME_MAX; in tmigr_cpu_prepare()
1724 tmc->cpuevt.ignore = true; in tmigr_cpu_prepare()
1725 tmc->cpuevt.cpu = cpu; in tmigr_cpu_prepare()
1726 tmc->remote = false; in tmigr_cpu_prepare()
1727 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_prepare()
1733 if (tmc->groupmask == 0) in tmigr_cpu_prepare()