| /linux/kernel/irq/ |
| H A D | migration.c | 29 if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) { in irq_fixup_move_pending() 87 if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) { in irq_move_masked_irq()
|
| H A D | manage.c | 269 if (!cpumask_intersects(tmp_mask, cpu_online_mask)) in irq_do_set_affinity() 611 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity() 626 if (cpumask_intersects(&mask, nodemask)) in irq_setup_affinity() 1446 if (cpumask_intersects(old->affinity, new->affinity) || in valid_percpu_irqaction() 2421 if (cpumask_intersects(desc->percpu_enabled, action->affinity)) { in __free_percpu_irq()
|
| H A D | msi.c | 1252 !cpumask_intersects(irq_data_get_affinity_mask(irqd), in msi_init_virq()
|
| /linux/kernel/cgroup/ |
| H A D | cpuset.c | 481 while (!cpumask_intersects(cs->effective_cpus, pmask)) in guarantee_active_cpus() 645 if (cpumask_intersects(xcpus1, xcpus2)) in cpusets_are_exclusive() 677 return cpumask_intersects(trial->exclusive_cpus, sibling->exclusive_cpus); in cpus_excl_conflict() 1175 (!cpumask_intersects(xcpus, cpu_active_mask) && in tasks_nocpu_error() 1213 if (!cpumask_intersects(xcpus, isolated_cpus)) in isolated_cpus_update() 1415 if (cpumask_intersects(excpus, sibling_xcpus)) { in rm_siblings_excl_cpus() 1510 WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus)); in remote_partition_enable() 1511 if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) || in remote_partition_enable() 1612 WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus)); in remote_cpus_update() 1615 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || in remote_cpus_update() [all …]
|
| H A D | cpuset-internal.h | 259 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
|
| H A D | cpuset-v1.c | 403 return cpumask_intersects(cs1->cpus_allowed, in cpuset1_cpus_excl_conflict() 687 cpumask_intersects(cp->cpus_allowed, in cpuset1_generate_sched_domains()
|
| /linux/arch/mips/kernel/ |
| H A D | mips-mt-fpaff.c | 122 cpumask_intersects(new_mask, &mt_fpu_cpumask)) { in mipsmt_sys_sched_setaffinity()
|
| H A D | traps.c | 945 if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { in mt_ase_fp_affinity()
|
| /linux/drivers/sh/intc/ |
| H A D | chip.c | 72 if (!cpumask_intersects(cpumask, cpu_online_mask)) in intc_set_affinity()
|
| /linux/arch/mips/mm/ |
| H A D | context.c | 249 cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) { in check_switch_mmu_context()
|
| /linux/arch/arm64/kernel/ |
| H A D | topology.c | 215 if (!cpumask_intersects(policy->related_cpus, in arch_freq_get_on_cpu()
|
| /linux/kernel/sched/ |
| H A D | isolation.c | 131 if (!cpumask_intersects(trial, cpu_online_mask)) { in housekeeping_update()
|
| H A D | ext_idle.c | 98 * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to in scx_idle_test_and_clear_cpu() 102 if (cpumask_intersects(smt, idle_smts)) in scx_idle_test_and_clear_cpu()
|
| H A D | deadline.c | 3123 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) { in set_cpus_allowed_dl() 3181 if (!cpumask_intersects(p->cpus_ptr, hk_msk)) { in dl_get_task_effective_cpus()
|
| H A D | fair.c | 7729 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in sched_balance_find_dst_cpu() 8932 if (!cpumask_intersects(p->cpus_ptr, cpumask)) in set_task_max_allowed_capacity() 11119 if (!cpumask_intersects(sched_group_span(group), in sched_balance_find_dst_group()
|
| /linux/arch/arm/kernel/ |
| H A D | hw_breakpoint.c | 1108 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { in reset_ctrl_regs() 1128 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { in reset_ctrl_regs()
|
| /linux/lib/ |
| H A D | group_cpus.c | 92 if (cpumask_intersects(mask, node_to_cpumask[n])) { in get_nodes_in_cpumask()
|
| /linux/drivers/hwmon/ |
| H A D | coretemp.c | 719 if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu))) in coretemp_cpu_online()
|
| /linux/include/linux/ |
| H A D | cpumask.h | 820 bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) in cpumask_intersects() function
|
| /linux/arch/powerpc/platforms/cell/spufs/ |
| H A D | sched.c | 160 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
|
| /linux/kernel/ |
| H A D | workqueue.c | 2745 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) in unbind_worker() 5335 if (!cpumask_intersects(attrs->__pod_cpumask, wq_online_cpumask)) { in wq_calc_pod_cpumask() 5716 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) in init_rescuer() 7907 if (!cpumask_intersects(wq_unbound_cpumask, mask)) { in init_cpu_worker_pool()
|
| /linux/kernel/trace/ |
| H A D | trace_events_filter.c | 661 return cpumask_intersects(mask, cmp); in do_filter_cpumask()
|
| /linux/drivers/vdpa/vdpa_user/ |
| H A D | vduse_dev.c | 1704 if (!cpumask_intersects(new_value, cpu_online_mask)) in irq_cb_affinity_store()
|
| /linux/drivers/scsi/hisi_sas/ |
| H A D | hisi_sas_main.c | 1205 if (mask && !cpumask_intersects(cpu_online_mask, mask)) in hisi_sas_internal_task_abort_dev()
|