Lines Matching full:smt

34 	cpumask_var_t smt;  member
84 * SMT mask should be cleared whether we can claim @cpu or not. The SMT in scx_idle_test_and_clear_cpu()
89 const struct cpumask *smt = cpu_smt_mask(cpu); in scx_idle_test_and_clear_cpu() local
90 struct cpumask *idle_smts = idle_cpumask(node)->smt; in scx_idle_test_and_clear_cpu()
95 * @cpu is never cleared from the idle SMT mask. Ensure that in scx_idle_test_and_clear_cpu()
102 if (cpumask_intersects(smt, idle_smts)) in scx_idle_test_and_clear_cpu()
103 cpumask_andnot(idle_smts, idle_smts, smt); in scx_idle_test_and_clear_cpu()
121 cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed); in pick_idle_cpu_in_node()
421 * idle) to avoid interference caused by SMT.
557 * If CPU has SMT, any wholly idle CPU is likely a better pick than in scx_select_cpu_dfl()
565 cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) && in scx_select_cpu_dfl()
602 * Give up if we're strictly looking for a full-idle SMT in scx_select_cpu_dfl()
664 BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL)); in scx_idle_init_masks()
677 BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[i]->smt, GFP_KERNEL, i)); in scx_idle_init_masks()
700 const struct cpumask *smt = cpu_smt_mask(cpu); in update_builtin_idle() local
701 struct cpumask *idle_smts = idle_cpumask(node)->smt; in update_builtin_idle()
708 if (!cpumask_subset(smt, idle_cpus)) in update_builtin_idle()
710 cpumask_or(idle_smts, idle_smts, smt); in update_builtin_idle()
712 cpumask_andnot(idle_smts, idle_smts, smt); in update_builtin_idle()
785 cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask); in reset_idle_masks()
793 cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask); in reset_idle_masks()
1112 return idle_cpumask(node)->smt; in scx_bpf_get_idle_smtmask_node()
1144 return idle_cpumask(NUMA_NO_NODE)->smt; in scx_bpf_get_idle_smtmask()
1151 * either the percpu, or SMT idle-tracking cpumask.