Lines Matching +full:function +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
38 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
58 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu()
60 return -ENOMEM; in smpcfd_prepare_cpu()
61 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, in smpcfd_prepare_cpu()
63 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
64 return -ENOMEM; in smpcfd_prepare_cpu()
66 cfd->csd = alloc_percpu(call_single_data_t); in smpcfd_prepare_cpu()
67 if (!cfd->csd) { in smpcfd_prepare_cpu()
68 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
69 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_prepare_cpu()
70 return -ENOMEM; in smpcfd_prepare_cpu()
80 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu()
81 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_dead_cpu()
82 free_percpu(cfd->csd); in smpcfd_dead_cpu()
89 * The IPIs for the smp-call-function callbacks queued by other in smpcfd_dying_cpu()
91 * because this CPU disabled interrupts (inside stop-machine) in smpcfd_dying_cpu()
123 send_call_function_ipi_mask(struct cpumask *mask) in send_call_function_ipi_mask() argument
125 trace_ipi_send_cpumask(mask, _RET_IP_, in send_call_function_ipi_mask()
127 arch_send_call_function_ipi_mask(mask); in send_call_function_ipi_mask()
187 __this_cpu_write(cur_csd_func, csd->func); in __csd_lock_record()
188 __this_cpu_write(cur_csd_info, csd->info); in __csd_lock_record()
191 smp_mb(); /* Update cur_csd before function call. */ in __csd_lock_record()
207 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ in csd_lock_wait_getcpu()
208 return -1; in csd_lock_wait_getcpu()
214 * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
216 * Returns @true if a CSD-lock acquisition is stuck and has been stuck
217 * long enough for a "non-responsive CSD lock" message to be printed.
231 int cpu = -1; in csd_lock_wait_toolong()
236 unsigned int flags = READ_ONCE(csd->node.u_flags); in csd_lock_wait_toolong()
251 ts_delta = ts2 - *ts1; in csd_lock_wait_toolong()
259 ts_delta = ts0 - ts2; in csd_lock_wait_toolong()
275 ts_delta = ts2 - ts0; in csd_lock_wait_toolong()
276 …pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\… in csd_lock_wait_toolong()
278 cpu, csd->func, csd->info); in csd_lock_wait_toolong()
300 …pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_proces… in csd_lock_wait_toolong()
312 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
314 * For non-synchronous ipi calls the csd can still be in use by the
315 * previous function call. For multi-cpu calls its even more interesting
340 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
349 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
356 csd->node.u_flags |= CSD_FLAG_LOCK; in csd_lock()
360 * to ->flags with any subsequent assignments to other in csd_lock()
368 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); in csd_unlock()
373 smp_store_release(&csd->node.u_flags, 0); in csd_unlock()
393 sched_ttwu_pending : csd->func; in __smp_call_single_queue()
417 * ->func, ->info, and ->flags set.
422 smp_call_func_t func = csd->func; in generic_exec_single()
423 void *info = csd->info; in generic_exec_single()
427 * We can unlock early even for the synchronous on-stack case, in generic_exec_single()
441 return -ENXIO; in generic_exec_single()
444 __smp_call_single_queue(cpu, &csd->node.llist); in generic_exec_single()
450 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
452 * Invoked by arch to handle an IPI for call function single.
461 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
466 * Flush any pending smp-call-function callbacks queued on this CPU. This is
508 csd->func); in __flush_smp_call_function_queue()
512 pr_warn("IPI task-wakeup sent to offline CPU\n"); in __flush_smp_call_function_queue()
530 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
531 void *info = csd->info; in __flush_smp_call_function_queue()
534 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
536 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
544 prev = &csd->node.llist; in __flush_smp_call_function_queue()
560 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
562 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
566 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
567 void *info = csd->info; in __flush_smp_call_function_queue()
578 prev = &csd->node.llist; in __flush_smp_call_function_queue()
593 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
599 * handle queued SMP function calls before scheduling.
623 * smp_call_function_single - Run a function on a specific CPU
624 * @func: The function to run. This must be fast and non-blocking.
625 * @info: An arbitrary pointer to pass to the function.
626 * @wait: If true, wait until function has completed on other CPUs.
649 * send smp call function interrupt to this cpu and as such deadlocks in smp_call_function_single()
669 csd->func = func; in smp_call_function_single()
670 csd->info = info; in smp_call_function_single()
672 csd->node.src = smp_processor_id(); in smp_call_function_single()
673 csd->node.dst = cpu; in smp_call_function_single()
688 * smp_call_function_single_async() - Run an asynchronous function on a
691 * @csd: Pre-allocated and setup data structure
696 * The caller passes his own pre-allocated data structure
700 * If the function is called with one csd which has not yet been
702 * function will return immediately with -EBUSY showing that the csd
716 if (csd->node.u_flags & CSD_FLAG_LOCK) { in smp_call_function_single_async()
717 err = -EBUSY; in smp_call_function_single_async()
721 csd->node.u_flags = CSD_FLAG_LOCK; in smp_call_function_single_async()
734 * smp_call_function_any - Run a function on any of the given cpus
735 * @mask: The mask of cpus it can run on.
736 * @func: The function to run. This must be fast and non-blocking.
737 * @info: An arbitrary pointer to pass to the function.
738 * @wait: If true, wait until function has completed.
743 * 1) current cpu if in @mask
744 * 2) any cpu of current node if in @mask
745 * 3) any other online cpu in @mask
747 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() argument
756 if (cpumask_test_cpu(cpu, mask)) in smp_call_function_any()
761 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any()
762 cpu = cpumask_next_and(cpu, nodemask, mask)) { in smp_call_function_any()
768 cpu = cpumask_any_and(mask, cpu_online_mask); in smp_call_function_any()
779 * %SCF_WAIT: Wait until function execution is completed
785 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond() argument
802 * send smp call function interrupt to this cpu and as such deadlocks in smp_call_function_many_cond()
818 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask)) in smp_call_function_many_cond()
822 cpu = cpumask_first_and(mask, cpu_online_mask); in smp_call_function_many_cond()
824 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many_cond()
830 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond()
831 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond()
833 cpumask_clear(cfd->cpumask_ipi); in smp_call_function_many_cond()
834 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
835 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
838 __cpumask_clear_cpu(cpu, cfd->cpumask); in smp_call_function_many_cond()
844 csd->node.u_flags |= CSD_TYPE_SYNC; in smp_call_function_many_cond()
845 csd->func = func; in smp_call_function_many_cond()
846 csd->info = info; in smp_call_function_many_cond()
848 csd->node.src = smp_processor_id(); in smp_call_function_many_cond()
849 csd->node.dst = cpu; in smp_call_function_many_cond()
853 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { in smp_call_function_many_cond()
854 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); in smp_call_function_many_cond()
863 * provided mask. in smp_call_function_many_cond()
868 send_call_function_ipi_mask(cfd->cpumask_ipi); in smp_call_function_many_cond()
880 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
883 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
890 * smp_call_function_many(): Run a function on a set of CPUs.
891 * @mask: The set of cpus to run on (only runs on online subset).
892 * @func: The function to run. This must be fast and non-blocking.
893 * @info: An arbitrary pointer to pass to the function.
895 * (atomically) until function has completed on other CPUs. If
896 * %SCF_RUN_LOCAL is set, the function will also be run locally
901 * You must not call this function with disabled interrupts or from a
903 * must be disabled when calling this function.
905 void smp_call_function_many(const struct cpumask *mask, in smp_call_function_many() argument
908 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL); in smp_call_function_many()
913 * smp_call_function(): Run a function on all other CPUs.
914 * @func: The function to run. This must be fast and non-blocking.
915 * @info: An arbitrary pointer to pass to the function.
916 * @wait: If true, wait (atomically) until function has completed
924 * You must not call this function with disabled interrupts or from a
943 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
946 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
1021 * on_each_cpu_cond(): Call a function on each processor for which
1022 * the supplied function cond_func returns true, optionally waiting
1025 * @cond_func: A callback function that is passed a cpu id and
1026 * the info parameter. The function is called
1027 * with preemption disabled. The function should
1030 * @func: The function to run on all applicable CPUs.
1031 * This must be fast and non-blocking.
1033 * @wait: If true, wait (atomically) until function has
1039 * You must not call this function with disabled interrupts or
1043 void *info, bool wait, const struct cpumask *mask) in on_each_cpu_cond_mask() argument
1051 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func); in on_each_cpu_cond_mask()
1061 * kick_all_cpus_sync - Force all cpus out of idle
1063 * Used to synchronize the update of pm_idle function pointer. It's
1065 * callback function has been executed on all cpus. The execution of
1066 * the function can only happen on the remote cpus after they have
1067 * left the idle function which had been called via pm_idle function
1080 * wake_up_all_idle_cpus - break all cpus out of idle
1082 * including idle polling cpus, for non-idle cpus, we will do nothing
1099 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1102 * @func: function to call
1103 * @data: function's data argument
1105 * @cpu: target CPU (%-1 for any CPU)
1107 * Used to call a function on a specific cpu and wait for it to return.
1125 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1126 hypervisor_pin_vcpu(sscs->cpu); in smp_call_on_cpu_callback()
1127 sscs->ret = sscs->func(sscs->data); in smp_call_on_cpu_callback()
1128 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1129 hypervisor_pin_vcpu(-1); in smp_call_on_cpu_callback()
1131 complete(&sscs->done); in smp_call_on_cpu_callback()
1140 .cpu = phys ? cpu : -1, in smp_call_on_cpu()
1146 return -ENXIO; in smp_call_on_cpu()