Lines Matching +full:ipi +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
66 #include <trace/events/ipi.h>
117 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
118 * the set its siblings that share the L1-cache.
123 * On some big-cores system, thread_group_l2_cache_map for each CPU
125 * L2-cache.
150 /* Special case - we inhibit secondary thread startup in smp_generic_cpu_bootable()
169 return -EINVAL; in smp_generic_kick_cpu()
173 * cpu_start field to become non-zero After we set cpu_start, in smp_generic_kick_cpu()
176 if (!paca_ptrs[nr]->cpu_start) { in smp_generic_kick_cpu()
177 paca_ptrs[nr]->cpu_start = 1; in smp_generic_kick_cpu()
184 * Ok it's not there, so it might be soft-unplugged, let's in smp_generic_kick_cpu()
236 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
241 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
242 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
244 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
247 [PPC_MSG_NMI_IPI] = "nmi ipi",
251 /* optional function to request ipi, for controllers with >= 4 ipis */
257 return -EINVAL; in smp_request_message_ipi()
281 char *message = (char *)&info->messages; in smp_muxed_ipi_set_message()
284 * Order previous accesses before accesses in the IPI handler. in smp_muxed_ipi_set_message()
296 * before doing whatever causes the IPI. in smp_muxed_ipi_message_pass()
298 smp_ops->cause_ipi(cpu); in smp_muxed_ipi_message_pass()
302 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
314 /* sync-free variant. Callers should ensure synchronization */
322 all = xchg(&info->messages, 0); in smp_ipi_demux_relaxed()
346 } while (READ_ONCE(info->messages)); in smp_ipi_demux_relaxed()
354 if (smp_ops->message_pass) in do_message_pass()
355 smp_ops->message_pass(cpu, msg); in do_message_pass()
385 * "NMI IPI" system.
390 * The IPI call waits with interrupts disabled until all targets enter the
393 * concurrency or re-entrancy.
397 * The IPI call may time out without all targets entering the NMI handler.
472 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) in do_smp_send_nmi_ipi()
489 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
490 * - fn is the target callback function.
491 * - delay_us > 0 is the delay before giving up waiting for targets to
539 delay_us--; in __smp_send_nmi_ipi()
623 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before in crash_smp_send_stop()
703 static void smp_store_cpu_info(int id) in smp_store_cpu_info() argument
705 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); in smp_store_cpu_info()
707 per_cpu(next_tlbcam_idx, id) in smp_store_cpu_info()
708 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; in smp_store_cpu_info()
713 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
755 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
762 * output of "ibm,thread-groups" is stored.
764 * ibm,thread-groups[0..N-1] array defines which group of threads in
765 * the CPU-device node can be grouped together based on the property.
769 * ibm,thread-groups[i + 0] tells us the property based on which the
775 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
776 * property ibm,thread-groups[i]
778 * ibm,thread-groups[i+2] tells us the number of threads in each such
780 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
782 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
783 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
787 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
795 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
797 * "ibm,ppc-interrupt-server#s" of the second group is
803 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
805 * "ibm,ppc-interrupt-server#s" of the second group is
807 * group share the L2-cache.
809 * Returns 0 on success, -EINVAL if the property does not exist,
810 * -ENODATA if property does not have a value, and -EOVERFLOW if the
823 count = of_property_count_u32_elems(dn, "ibm,thread-groups"); in parse_thread_groups()
825 ret = of_property_read_u32_array(dn, "ibm,thread-groups", in parse_thread_groups()
832 struct thread_groups *tg = &tglp->property_tgs[property_idx++]; in parse_thread_groups()
834 tg->property = thread_group_array[i]; in parse_thread_groups()
835 tg->nr_groups = thread_group_array[i + 1]; in parse_thread_groups()
836 tg->threads_per_group = thread_group_array[i + 2]; in parse_thread_groups()
837 total_threads = tg->nr_groups * tg->threads_per_group; in parse_thread_groups()
842 tg->thread_list[j] = thread_list[j]; in parse_thread_groups()
846 tglp->nr_properties = property_idx; in parse_thread_groups()
854 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
858 * @tg : The thread-group structure of the CPU node which @cpu belongs
861 * Returns the index to tg->thread_list that points to the start
864 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
865 * tg->thread_list.
872 for (i = 0; i < tg->nr_groups; i++) { in get_cpu_thread_group_start()
873 int group_start = i * tg->threads_per_group; in get_cpu_thread_group_start()
875 for (j = 0; j < tg->threads_per_group; j++) { in get_cpu_thread_group_start()
878 if (tg->thread_list[idx] == hw_cpu_id) in get_cpu_thread_group_start()
883 return -1; in get_cpu_thread_group_start()
897 *err = -ENODATA; in get_thread_groups()
901 if (!cpu_tgl->nr_properties) { in get_thread_groups()
907 for (i = 0; i < cpu_tgl->nr_properties; i++) { in get_thread_groups()
908 if (cpu_tgl->property_tgs[i].property == group_property) { in get_thread_groups()
909 tg = &cpu_tgl->property_tgs[i]; in get_thread_groups()
915 *err = -EINVAL; in get_thread_groups()
932 if (unlikely(i_group_start == -1)) { in update_mask_from_threadgroup()
934 return -ENODATA; in update_mask_from_threadgroup()
947 int cpu_group_start = -1, err = 0; in init_thread_group_cache_map()
953 return -EINVAL; in init_thread_group_cache_map()
962 if (unlikely(cpu_group_start == -1)) { in init_thread_group_cache_map()
964 return -ENODATA; in init_thread_group_cache_map()
1029 * returns a non-const pointer and the compiler barfs on that.
1146 if (cpu_to_chip_id(boot_cpuid) != -1) { in smp_prepare_cpus()
1152 * Assumption: if boot_cpuid doesn't have a chip-id, then no in smp_prepare_cpus()
1153 * other CPUs, will also not have chip-id. in smp_prepare_cpus()
1157 memset(chip_id_lookup_table, -1, sizeof(int) * idx); in smp_prepare_cpus()
1160 if (smp_ops && smp_ops->probe) in smp_prepare_cpus()
1161 smp_ops->probe(); in smp_prepare_cpus()
1174 paca_ptrs[boot_cpuid]->__current = current; in smp_prepare_boot_cpu()
1187 return -EBUSY; in generic_cpu_disable()
1191 systemcfg->processorCount--; in generic_cpu_disable()
1263 paca_ptrs[cpu]->__current = idle; in cpu_idle_thread_init()
1264 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + in cpu_idle_thread_init()
1265 THREAD_SIZE - STACK_FRAME_MIN_SIZE; in cpu_idle_thread_init()
1267 task_thread_info(idle)->cpu = cpu; in cpu_idle_thread_init()
1285 return -EBUSY; in __cpu_up()
1288 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) in __cpu_up()
1289 return -EINVAL; in __cpu_up()
1297 if (smp_ops->prepare_cpu) { in __cpu_up()
1298 rc = smp_ops->prepare_cpu(cpu); in __cpu_up()
1303 /* Make sure callin-map entry is 0 (can be leftover a CPU in __cpu_up()
1316 rc = smp_ops->kick_cpu(cpu); in __cpu_up()
1343 return -ENOENT; in __cpu_up()
1348 if (smp_ops->give_timebase) in __cpu_up()
1349 smp_ops->give_timebase(); in __cpu_up()
1363 int id = -1; in cpu_to_core_id() local
1369 id = of_get_cpu_hwid(np, 0); in cpu_to_core_id()
1372 return id; in cpu_to_core_id()
1421 * If the threads in a thread-group share L2 cache, then the in update_mask_by_l2()
1422 * L2-mask can be obtained from thread_group_l2_cache_map. in update_mask_by_l2()
1432 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */ in update_mask_by_l2()
1453 /* Update l2-cache mask with all the CPUs that are part of submask */ in update_mask_by_l2()
1456 /* Skip all CPUs already part of current CPU l2-cache mask */ in update_mask_by_l2()
1466 /* Skip all CPUs already part of current CPU l2-cache */ in update_mask_by_l2()
1565 int chip_id = -1; in add_cpu_to_masks()
1583 /* In CPU-hotplug path, hence use GFP_ATOMIC */ in add_cpu_to_masks()
1602 /* If chip_id is -1; limit the cpu_core_mask to within PKG */ in add_cpu_to_masks()
1603 if (chip_id == -1) in add_cpu_to_masks()
1629 current->active_mm = &init_mm; in start_secondary()
1639 if (smp_ops->setup_cpu) in start_secondary()
1640 smp_ops->setup_cpu(cpu); in start_secondary()
1641 if (smp_ops->take_timebase) in start_secondary()
1642 smp_ops->take_timebase(); in start_secondary()
1648 systemcfg->processorCount++; in start_secondary()
1662 * per-core basis because one core in the pair might be disabled. in start_secondary()
1727 BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1); in build_sched_topology()
1737 if (smp_ops && smp_ops->setup_cpu) in smp_cpus_done()
1738 smp_ops->setup_cpu(boot_cpuid); in smp_cpus_done()
1740 if (smp_ops && smp_ops->bringup_done) in smp_cpus_done()
1741 smp_ops->bringup_done(); in smp_cpus_done()
1755 return -cpu / threads_per_core; in arch_asym_cpu_priority()
1757 return -cpu; in arch_asym_cpu_priority()
1766 if (!smp_ops->cpu_disable) in __cpu_disable()
1767 return -ENOSYS; in __cpu_disable()
1771 err = smp_ops->cpu_disable(); in __cpu_disable()
1791 if (smp_ops->cpu_die) in __cpu_die()
1792 smp_ops->cpu_die(cpu); in __cpu_die()
1798 * Disable on the down path. This will be re-enabled by in arch_cpu_idle_dead()
1803 if (smp_ops->cpu_offline_self) in arch_cpu_idle_dead()
1804 smp_ops->cpu_offline_self(); in arch_cpu_idle_dead()
1806 /* If we return, we re-enter start_secondary */ in arch_cpu_idle_dead()