Lines Matching +full:acquisition +full:- +full:time +full:- +full:ns
1 // SPDX-License-Identifier: GPL-2.0-only
38 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
58 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, in smpcfd_prepare_cpu()
60 return -ENOMEM; in smpcfd_prepare_cpu()
61 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, in smpcfd_prepare_cpu()
63 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
64 return -ENOMEM; in smpcfd_prepare_cpu()
66 cfd->csd = alloc_percpu(call_single_data_t); in smpcfd_prepare_cpu()
67 if (!cfd->csd) { in smpcfd_prepare_cpu()
68 free_cpumask_var(cfd->cpumask); in smpcfd_prepare_cpu()
69 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_prepare_cpu()
70 return -ENOMEM; in smpcfd_prepare_cpu()
80 free_cpumask_var(cfd->cpumask); in smpcfd_dead_cpu()
81 free_cpumask_var(cfd->cpumask_ipi); in smpcfd_dead_cpu()
82 free_percpu(cfd->csd); in smpcfd_dead_cpu()
89 * The IPIs for the smp-call-function callbacks queued by other in smpcfd_dying_cpu()
91 * because this CPU disabled interrupts (inside stop-machine) in smpcfd_dying_cpu()
187 __this_cpu_write(cur_csd_func, csd->func); in __csd_lock_record()
188 __this_cpu_write(cur_csd_info, csd->info); in __csd_lock_record()
207 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ in csd_lock_wait_getcpu()
208 return -1; in csd_lock_wait_getcpu()
214 * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
216 * Returns @true if a CSD-lock acquisition is stuck and has been stuck
217 * long enough for a "non-responsive CSD lock" message to be printed.
225 * Complain if too much time spent waiting. Note that only
231 int cpu = -1; in csd_lock_wait_toolong()
236 unsigned int flags = READ_ONCE(csd->node.u_flags); in csd_lock_wait_toolong()
251 ts_delta = ts2 - *ts1; in csd_lock_wait_toolong()
259 ts_delta = ts0 - ts2; in csd_lock_wait_toolong()
260 pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta); in csd_lock_wait_toolong()
275 ts_delta = ts2 - ts0; in csd_lock_wait_toolong()
276 …pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\… in csd_lock_wait_toolong()
278 cpu, csd->func, csd->info); in csd_lock_wait_toolong()
300 …pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_proces… in csd_lock_wait_toolong()
312 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
314 * For non-synchronous ipi calls the csd can still be in use by the
315 * previous function call. For multi-cpu calls its even more interesting
340 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
349 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); in csd_lock_wait()
356 csd->node.u_flags |= CSD_FLAG_LOCK; in csd_lock()
360 * to ->flags with any subsequent assignments to other in csd_lock()
368 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); in csd_unlock()
373 smp_store_release(&csd->node.u_flags, 0); in csd_unlock()
393 sched_ttwu_pending : csd->func; in __smp_call_single_queue()
417 * ->func, ->info, and ->flags set.
422 smp_call_func_t func = csd->func; in generic_exec_single()
423 void *info = csd->info; in generic_exec_single()
427 * We can unlock early even for the synchronous on-stack case, in generic_exec_single()
441 return -ENXIO; in generic_exec_single()
444 __smp_call_single_queue(cpu, &csd->node.llist); in generic_exec_single()
450 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
461 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
466 * Flush any pending smp-call-function callbacks queued on this CPU. This is
508 csd->func); in __flush_smp_call_function_queue()
512 pr_warn("IPI task-wakeup sent to offline CPU\n"); in __flush_smp_call_function_queue()
530 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
531 void *info = csd->info; in __flush_smp_call_function_queue()
534 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
536 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
544 prev = &csd->node.llist; in __flush_smp_call_function_queue()
560 prev->next = &csd_next->node.llist; in __flush_smp_call_function_queue()
562 entry = &csd_next->node.llist; in __flush_smp_call_function_queue()
566 smp_call_func_t func = csd->func; in __flush_smp_call_function_queue()
567 void *info = csd->info; in __flush_smp_call_function_queue()
578 prev = &csd->node.llist; in __flush_smp_call_function_queue()
593 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
623 * smp_call_function_single - Run a function on a specific CPU
624 * @func: The function to run. This must be fast and non-blocking.
669 csd->func = func; in smp_call_function_single()
670 csd->info = info; in smp_call_function_single()
672 csd->node.src = smp_processor_id(); in smp_call_function_single()
673 csd->node.dst = cpu; in smp_call_function_single()
688 * smp_call_function_single_async() - Run an asynchronous function on a
691 * @csd: Pre-allocated and setup data structure
696 * The caller passes his own pre-allocated data structure
702 * function will return immediately with -EBUSY showing that the csd
716 if (csd->node.u_flags & CSD_FLAG_LOCK) { in smp_call_function_single_async()
717 err = -EBUSY; in smp_call_function_single_async()
721 csd->node.u_flags = CSD_FLAG_LOCK; in smp_call_function_single_async()
734 * smp_call_function_any - Run a function on any of the given cpus
736 * @func: The function to run. This must be fast and non-blocking.
830 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond()
831 __cpumask_clear_cpu(this_cpu, cfd->cpumask); in smp_call_function_many_cond()
833 cpumask_clear(cfd->cpumask_ipi); in smp_call_function_many_cond()
834 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
835 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
838 __cpumask_clear_cpu(cpu, cfd->cpumask); in smp_call_function_many_cond()
844 csd->node.u_flags |= CSD_TYPE_SYNC; in smp_call_function_many_cond()
845 csd->func = func; in smp_call_function_many_cond()
846 csd->info = info; in smp_call_function_many_cond()
848 csd->node.src = smp_processor_id(); in smp_call_function_many_cond()
849 csd->node.dst = cpu; in smp_call_function_many_cond()
853 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { in smp_call_function_many_cond()
854 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); in smp_call_function_many_cond()
868 send_call_function_ipi_mask(cfd->cpumask_ipi); in smp_call_function_many_cond()
880 for_each_cpu(cpu, cfd->cpumask) { in smp_call_function_many_cond()
883 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many_cond()
892 * @func: The function to run. This must be fast and non-blocking.
914 * @func: The function to run. This must be fast and non-blocking.
943 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
946 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
1031 * This must be fast and non-blocking.
1061 * kick_all_cpus_sync - Force all cpus out of idle
1080 * wake_up_all_idle_cpus - break all cpus out of idle
1082 * including idle polling cpus, for non-idle cpus, we will do nothing
1099 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1105 * @cpu: target CPU (%-1 for any CPU)
1125 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1126 hypervisor_pin_vcpu(sscs->cpu); in smp_call_on_cpu_callback()
1127 sscs->ret = sscs->func(sscs->data); in smp_call_on_cpu_callback()
1128 if (sscs->cpu >= 0) in smp_call_on_cpu_callback()
1129 hypervisor_pin_vcpu(-1); in smp_call_on_cpu_callback()
1131 complete(&sscs->done); in smp_call_on_cpu_callback()
1140 .cpu = phys ? cpu : -1, in smp_call_on_cpu()
1146 return -ENXIO; in smp_call_on_cpu()