Lines Matching full:direct

28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 "Average number of direct callouts examined per callout_process call. "
101 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
105 0, "Average number of MP direct callouts made per callout_process call. "
212 int direct);
237 cc_cce_cleanup(struct callout_cpu *cc, int direct) in cc_cce_cleanup() argument
240 cc_exec_curr(cc, direct) = NULL; in cc_cce_cleanup()
241 cc_exec_cancel(cc, direct) = false; in cc_cce_cleanup()
242 cc_exec_waiting(cc, direct) = false; in cc_cce_cleanup()
244 cc_migration_cpu(cc, direct) = CPUBLOCK; in cc_cce_cleanup()
245 cc_migration_time(cc, direct) = 0; in cc_cce_cleanup()
246 cc_migration_prec(cc, direct) = 0; in cc_cce_cleanup()
247 cc_migration_func(cc, direct) = NULL; in cc_cce_cleanup()
248 cc_migration_arg(cc, direct) = NULL; in cc_cce_cleanup()
256 cc_cce_migrating(struct callout_cpu *cc, int direct) in cc_cce_migrating() argument
260 return (cc_migration_cpu(cc, direct) != CPUBLOCK); in cc_cce_migrating()
629 int direct) in softclock_call_cc() argument
671 cc_exec_curr(cc, direct) = c; in softclock_call_cc()
672 cc_exec_last_func(cc, direct) = c_func; in softclock_call_cc()
673 cc_exec_last_arg(cc, direct) = c_arg; in softclock_call_cc()
674 cc_exec_cancel(cc, direct) = false; in softclock_call_cc()
679 cc_exec_curr(cc, direct) = NULL; in softclock_call_cc()
683 (direct) ? C_DIRECT_EXEC : 0); in softclock_call_cc()
694 if (cc_exec_cancel(cc, direct)) { in softclock_call_cc()
700 cc_exec_cancel(cc, direct) = true; in softclock_call_cc()
723 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); in softclock_call_cc()
752 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); in softclock_call_cc()
753 cc_exec_curr(cc, direct) = NULL; in softclock_call_cc()
754 if (cc_exec_waiting(cc, direct)) { in softclock_call_cc()
761 if (cc_cce_migrating(cc, direct)) { in softclock_call_cc()
762 cc_cce_cleanup(cc, direct); in softclock_call_cc()
770 cc_exec_waiting(cc, direct) = false; in softclock_call_cc()
772 wakeup(&cc_exec_waiting(cc, direct)); in softclock_call_cc()
774 } else if (cc_cce_migrating(cc, direct)) { in softclock_call_cc()
780 new_cpu = cc_migration_cpu(cc, direct); in softclock_call_cc()
781 new_time = cc_migration_time(cc, direct); in softclock_call_cc()
782 new_prec = cc_migration_prec(cc, direct); in softclock_call_cc()
783 new_func = cc_migration_func(cc, direct); in softclock_call_cc()
784 new_arg = cc_migration_arg(cc, direct); in softclock_call_cc()
785 cc_cce_cleanup(cc, direct); in softclock_call_cc()
802 flags = (direct) ? C_DIRECT_EXEC : 0; in softclock_call_cc()
944 int cancelled, direct; in callout_reset_sbt_on() local
952 * wrong direct flag if we don't do it before we add. in callout_reset_sbt_on()
955 direct = 1; in callout_reset_sbt_on()
957 direct = 0; in callout_reset_sbt_on()
959 KASSERT(!direct || c->c_lock == NULL || in callout_reset_sbt_on()
961 ("%s: direct callout %p has non-spin lock", __func__, c)); in callout_reset_sbt_on()
969 if (cc_exec_curr(cc, direct) == c) { in callout_reset_sbt_on()
975 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) in callout_reset_sbt_on()
976 cancelled = cc_exec_cancel(cc, direct) = true; in callout_reset_sbt_on()
977 if (cc_exec_waiting(cc, direct)) { in callout_reset_sbt_on()
997 cc_migration_cpu(cc, direct) = cpu; in callout_reset_sbt_on()
998 cc_migration_time(cc, direct) = to_sbt; in callout_reset_sbt_on()
999 cc_migration_prec(cc, direct) = precision; in callout_reset_sbt_on()
1000 cc_migration_func(cc, direct) = ftn; in callout_reset_sbt_on()
1001 cc_migration_arg(cc, direct) = arg; in callout_reset_sbt_on()
1028 if (cc_exec_curr(cc, direct) == c) { in callout_reset_sbt_on()
1045 cc_migration_cpu(cc, direct) = cpu; in callout_reset_sbt_on()
1046 cc_migration_time(cc, direct) = to_sbt; in callout_reset_sbt_on()
1047 cc_migration_prec(cc, direct) = precision; in callout_reset_sbt_on()
1048 cc_migration_func(cc, direct) = ftn; in callout_reset_sbt_on()
1049 cc_migration_arg(cc, direct) = arg; in callout_reset_sbt_on()
1092 int direct, sq_locked, use_lock; in _callout_stop_safe() local
1114 direct = 1; in _callout_stop_safe()
1116 direct = 0; in _callout_stop_safe()
1153 sleepq_release(&cc_exec_waiting(old_cc, direct)); in _callout_stop_safe()
1165 if (cc_exec_curr(cc, direct) == c) { in _callout_stop_safe()
1182 if (cc_exec_curr(cc, direct) == c) { in _callout_stop_safe()
1184 * Use direct calls to sleepqueue interface in _callout_stop_safe()
1203 &cc_exec_waiting(cc, direct)); in _callout_stop_safe()
1215 cc_exec_waiting(cc, direct) = true; in _callout_stop_safe()
1219 &cc_exec_waiting(cc, direct), in _callout_stop_safe()
1223 &cc_exec_waiting(cc, direct), in _callout_stop_safe()
1233 } else if (use_lock && !cc_exec_cancel(cc, direct)) { in _callout_stop_safe()
1243 cc_exec_cancel(cc, direct) = true; in _callout_stop_safe()
1246 KASSERT(!cc_cce_migrating(cc, direct), in _callout_stop_safe()
1251 cc_migration_cpu(cc, direct) = CPUBLOCK; in _callout_stop_safe()
1252 cc_migration_time(cc, direct) = 0; in _callout_stop_safe()
1253 cc_migration_prec(cc, direct) = 0; in _callout_stop_safe()
1254 cc_migration_func(cc, direct) = NULL; in _callout_stop_safe()
1255 cc_migration_arg(cc, direct) = NULL; in _callout_stop_safe()
1279 cc_migration_cpu(cc, direct) = CPUBLOCK; in _callout_stop_safe()
1280 cc_migration_time(cc, direct) = 0; in _callout_stop_safe()
1281 cc_migration_prec(cc, direct) = 0; in _callout_stop_safe()
1282 cc_migration_func(cc, direct) = NULL; in _callout_stop_safe()
1283 cc_migration_arg(cc, direct) = NULL; in _callout_stop_safe()
1299 sleepq_release(&cc_exec_waiting(cc, direct)); in _callout_stop_safe()
1308 if (cc_exec_curr(cc, direct) != c) in _callout_stop_safe()
1508 _show_last_callout(int cpu, int direct, const char *dirstr) in _show_last_callout() argument
1514 func = cc_exec_last_func(cc, direct); in _show_last_callout()
1515 arg = cc_exec_last_arg(cc, direct); in _show_last_callout()
1539 _show_last_callout(cpu, 1, " direct"); in DB_SHOW_COMMAND_FLAGS()