Lines Matching +full:non +full:- +full:smp
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic helpers for smp ipi calls
19 #include <linux/smp.h>
36 #include "sched/smp.h"
38 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
58 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu()
60 return -ENOMEM; in smpcfd_prepare_cpu()
61 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, in smpcfd_prepare_cpu()
63 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
64 return -ENOMEM; in smpcfd_prepare_cpu()
66 cfd->csd = alloc_percpu(call_single_data_t); in smpcfd_prepare_cpu()
67 if (!cfd->csd) { in smpcfd_prepare_cpu()
68 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
69 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_prepare_cpu()
70 return -ENOMEM; in smpcfd_prepare_cpu()
80 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu()
81 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_dead_cpu()
82 free_percpu(cfd->csd); in smpcfd_dead_cpu()
89 * The IPIs for the smp-call-function callbacks queued by other CPUs in smpcfd_dying_cpu()
91 * CPU disabled interrupts (inside stop-machine) before the IPIs were in smpcfd_dying_cpu()
189 __this_cpu_write(cur_csd_func, csd->func); in __csd_lock_record()
190 __this_cpu_write(cur_csd_info, csd->info); in __csd_lock_record()
209 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ in csd_lock_wait_getcpu()
210 return -1; in csd_lock_wait_getcpu()
216 * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
218 * Returns @true if a CSD-lock acquisition is stuck and has been stuck
219 * long enough for a "non-responsive CSD lock" message to be printed.
233 int cpu = -1; in csd_lock_wait_toolong()
238 unsigned int flags = READ_ONCE(csd->node.u_flags); in csd_lock_wait_toolong()
253 ts_delta = ts2 - *ts1; in csd_lock_wait_toolong()
261 ts_delta = ts0 - ts2; in csd_lock_wait_toolong()
277 ts_delta = ts2 - ts0; in csd_lock_wait_toolong()
278 …pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\… in csd_lock_wait_toolong()
280 cpu, csd->func, csd->info); in csd_lock_wait_toolong()
302 …pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_proces… in csd_lock_wait_toolong()
314 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
316 * For non-synchronous ipi calls the csd can still be in use by the
317 * previous function call. For multi-cpu calls its even more interesting
342 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
351 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
358 csd->node.u_flags |= CSD_FLAG_LOCK; in csd_lock()
362 * to ->flags with any subsequent assignments to other in csd_lock()
370 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); in csd_unlock()
375 smp_store_release(&csd->node.u_flags, 0); in csd_unlock()
395 sched_ttwu_pending : csd->func; in __smp_call_single_queue()
419 * ->func, ->info, and ->flags set.
428 smp_call_func_t func = csd->func; in generic_exec_single()
429 void *info = csd->info; in generic_exec_single()
433 * We can unlock early even for the synchronous on-stack case, in generic_exec_single()
447 return -ENXIO; in generic_exec_single()
450 __smp_call_single_queue(cpu, &csd->node.llist); in generic_exec_single()
456 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
467 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
472 * Flush any pending smp-call-function callbacks queued on this CPU. This is
514 csd->func); in __flush_smp_call_function_queue()
518 pr_warn("IPI task-wakeup sent to offline CPU\n"); in __flush_smp_call_function_queue()
536 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
537 void *info = csd->info; in __flush_smp_call_function_queue()
540 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
542 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
550 prev = &csd->node.llist; in __flush_smp_call_function_queue()
566 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
568 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
572 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
573 void *info = csd->info; in __flush_smp_call_function_queue()
584 prev = &csd->node.llist; in __flush_smp_call_function_queue()
599 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
605 * handle queued SMP function calls before scheduling.
629 * smp_call_function_single - Run a function on a specific CPU
630 * @func: The function to run. This must be fast and non-blocking.
657 * send smp call function interrupt to this cpu and as such deadlocks in smp_call_function_single()
677 csd->func = func; in smp_call_function_single()
678 csd->info = info; in smp_call_function_single()
680 csd->node.src = smp_processor_id(); in smp_call_function_single()
681 csd->node.dst = cpu; in smp_call_function_single()
696 * smp_call_function_single_async() - Run an asynchronous function on a
699 * @csd: Pre-allocated and setup data structure
704 * The caller passes his own pre-allocated data structure
710 * function will return immediately with -EBUSY showing that the csd
724 if (csd->node.u_flags & CSD_FLAG_LOCK) { in smp_call_function_single_async()
725 err = -EBUSY; in smp_call_function_single_async()
729 csd->node.u_flags = CSD_FLAG_LOCK; in smp_call_function_single_async()
742 * smp_call_function_any - Run a function on any of the given cpus
744 * @func: The function to run. This must be fast and non-blocking.
796 * send smp call function interrupt to this cpu and as such deadlocks in smp_call_function_many_cond()
814 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond()
815 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond()
817 cpumask_clear(cfd->cpumask_ipi); in smp_call_function_many_cond()
818 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
819 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
822 __cpumask_clear_cpu(cpu, cfd->cpumask); in smp_call_function_many_cond()
831 csd->node.u_flags |= CSD_TYPE_SYNC; in smp_call_function_many_cond()
832 csd->func = func; in smp_call_function_many_cond()
833 csd->info = info; in smp_call_function_many_cond()
835 csd->node.src = smp_processor_id(); in smp_call_function_many_cond()
836 csd->node.dst = cpu; in smp_call_function_many_cond()
844 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { in smp_call_function_many_cond()
845 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); in smp_call_function_many_cond()
859 send_call_function_ipi_mask(cfd->cpumask_ipi); in smp_call_function_many_cond()
873 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
876 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
885 * @func: The function to run. This must be fast and non-blocking.
906 * @func: The function to run. This must be fast and non-blocking.
933 * Setup routine for controlling SMP activation
935 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
938 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
940 * SMP mode to <NUM>.
1023 * This must be fast and non-blocking.
1053 * kick_all_cpus_sync - Force all cpus out of idle
1072 * wake_up_all_idle_cpus - break all cpus out of idle
1074 * including idle polling cpus, for non-idle cpus, we will do nothing
1091 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1097 * @cpu: target CPU (%-1 for any CPU)
1117 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1118 hypervisor_pin_vcpu(sscs->cpu); in smp_call_on_cpu_callback()
1119 sscs->ret = sscs->func(sscs->data); in smp_call_on_cpu_callback()
1120 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1121 hypervisor_pin_vcpu(-1); in smp_call_on_cpu_callback()
1123 complete(&sscs->done); in smp_call_on_cpu_callback()
1132 .cpu = phys ? cpu : -1, in smp_call_on_cpu()
1138 return -ENXIO; in smp_call_on_cpu()