Lines Matching +full:ipi +full:- +full:id

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
85 "Max CPU ID.");
106 int mp_ncores = -1; /* how many physical cores running */
143 KASSERT(mp_maxid >= mp_ncpus - 1, in mp_setmaxid()
186 int id; in forward_signal() local
197 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); in forward_signal()
202 /* No need to IPI ourself. */ in forward_signal()
206 id = td->td_oncpu; in forward_signal()
207 if (id == NOCPU) in forward_signal()
209 ipi_cpu(id, IPI_AST); in forward_signal()
213 * When called the executing CPU will send an IPI to all other CPUs
218 * - Signals all CPUs in map to stop.
219 * - Waits for each to stop.
222 * -1: error
261 * IPI delivery. in generic_stop_cpus()
276 /* send the stop IPI to all CPUs in map */ in generic_stop_cpus()
337 * - Signals all CPUs in map to restart.
338 * - Waits for each to restart.
341 * -1: error
380 u_int id; in generic_restart_cpus() local
382 CPU_FOREACH(id) { in generic_restart_cpus()
383 if (!CPU_ISSET(id, &map)) in generic_restart_cpus()
386 mb = &pcpu_find(id)->pc_monitorbuf; in generic_restart_cpus()
387 atomic_store_int(&mb->stop_state, in generic_restart_cpus()
436 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
456 /* Ensure we have up-to-date values. */ in smp_rendezvous_action()
470 * Specifically, if a rendezvous handler is invoked via an IPI in smp_rendezvous_action()
481 * problematic (the preemption must not occur before the IPI in smp_rendezvous_action()
490 td->td_critnest++; in smp_rendezvous_action()
492 owepreempt = td->td_owepreempt; in smp_rendezvous_action()
527 * This means that no member of smp_rv_* pseudo-structure will be in smp_rendezvous_action()
538 td->td_critnest--; in smp_rendezvous_action()
539 KASSERT(owepreempt == td->td_owepreempt, in smp_rendezvous_action()
567 * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI. in smp_rendezvous_cpus()
569 MPASS(curthread->td_md.md_spinlock_count == 0); in smp_rendezvous_cpus()
576 panic("ncpus is 0 with non-zero map"); in smp_rendezvous_cpus()
592 * Signal other processors, which will enter the IPI with in smp_rendezvous_cpus()
606 * pseudo-structure and the arg are guaranteed to not in smp_rendezvous_cpus()
634 for (c = 0; c < cg->cg_children; c++) in smp_topo_fill()
635 smp_topo_fill(&cg->cg_child[c]); in smp_topo_fill()
636 cg->cg_first = CPU_FFS(&cg->cg_mask) - 1; in smp_topo_fill()
637 cg->cg_last = CPU_FLS(&cg->cg_mask) - 1; in smp_topo_fill()
648 * during the kernel boot while we are still single-threaded. in smp_topo()
678 /* Single-core 2xHTT */ in smp_topo()
694 if (top->cg_count != mp_ncpus) in smp_topo()
696 top, top->cg_count, mp_ncpus); in smp_topo()
697 if (CPU_CMP(&top->cg_mask, &all_cpus)) in smp_topo()
699 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), in smp_topo()
706 while (top->cg_children == 1) { in smp_topo()
707 top = &top->cg_child[0]; in smp_topo()
708 top->cg_parent = NULL; in smp_topo()
736 top->cg_parent = NULL; in smp_topo_none()
737 top->cg_child = NULL; in smp_topo_none()
738 top->cg_mask = all_cpus; in smp_topo_none()
739 top->cg_count = mp_ncpus; in smp_topo_none()
740 top->cg_children = 0; in smp_topo_none()
741 top->cg_level = CG_SHARE_NONE; in smp_topo_none()
742 top->cg_flags = 0; in smp_topo_none()
758 child->cg_parent = parent; in smp_topo_addleaf()
759 child->cg_child = NULL; in smp_topo_addleaf()
760 child->cg_children = 0; in smp_topo_addleaf()
761 child->cg_level = share; in smp_topo_addleaf()
762 child->cg_count = count; in smp_topo_addleaf()
763 child->cg_flags = flags; in smp_topo_addleaf()
764 child->cg_mask = mask; in smp_topo_addleaf()
765 parent->cg_children++; in smp_topo_addleaf()
766 for (; parent != NULL; parent = parent->cg_parent) { in smp_topo_addleaf()
767 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) in smp_topo_addleaf()
770 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), in smp_topo_addleaf()
771 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); in smp_topo_addleaf()
772 CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask); in smp_topo_addleaf()
773 parent->cg_count += child->cg_count; in smp_topo_addleaf()
791 top->cg_child = child = top + 1; in smp_topo_1level()
792 top->cg_level = CG_SHARE_NONE; in smp_topo_1level()
813 top->cg_child = l2g; in smp_topo_2level()
814 top->cg_level = CG_SHARE_NONE; in smp_topo_2level()
815 top->cg_children = mp_ncpus / (l2count * l1count); in smp_topo_2level()
816 l1g = l2g + top->cg_children; in smp_topo_2level()
817 for (i = 0; i < top->cg_children; i++, l2g++) { in smp_topo_2level()
818 l2g->cg_parent = top; in smp_topo_2level()
819 l2g->cg_child = l1g; in smp_topo_2level()
820 l2g->cg_level = l2share; in smp_topo_2level()
839 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) in smp_topo_find()
841 if (cg->cg_children == 0) in smp_topo_find()
843 children = cg->cg_children; in smp_topo_find()
844 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) in smp_topo_find()
845 if (CPU_OVERLAP(&cg->cg_mask, &mask)) in smp_topo_find()
895 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); in mp_setvariables_for_up()
919 CPU_COPY(&map, &arg->cpus); in smp_rendezvous_cpus_retry()
942 arg->cpus, in smp_rendezvous_cpus_retry()
948 if (CPU_EMPTY(&arg->cpus)) in smp_rendezvous_cpus_retry()
952 if (!CPU_ISSET(cpu, &arg->cpus)) in smp_rendezvous_cpus_retry()
963 CPU_CLR_ATOMIC(curcpu, &arg->cpus); in smp_rendezvous_cpus_done()
991 gen[cpu] = pcpu->pc_idlethread->td_generation; in quiesce_cpus()
1003 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { in quiesce_cpus()
1040 MPASS(curthread->td_critnest == 0); in quiesce_all_critical()
1044 td = pcpu->pc_curthread; in quiesce_all_critical()
1046 if (td->td_critnest == 0) in quiesce_all_critical()
1050 atomic_load_acq_ptr((void *)pcpu->pc_curthread); in quiesce_all_critical()
1065 * Send an IPI forcing a sequentially consistent fence.
1104 TAILQ_INIT(&node->children); in topo_init_node()
1112 root->type = TOPO_TYPE_SYSTEM; in topo_init_root()
1116 * Add a child node with the given ID under the given parent.
1117 * Do nothing if there is already a child with that ID.
1125 TAILQ_FOREACH_REVERSE(node, &parent->children, in topo_add_node_by_hwid()
1127 if (node->hwid == hwid in topo_add_node_by_hwid()
1128 && node->type == type && node->subtype == subtype) { in topo_add_node_by_hwid()
1135 node->parent = parent; in topo_add_node_by_hwid()
1136 node->hwid = hwid; in topo_add_node_by_hwid()
1137 node->type = type; in topo_add_node_by_hwid()
1138 node->subtype = subtype; in topo_add_node_by_hwid()
1139 TAILQ_INSERT_TAIL(&parent->children, node, siblings); in topo_add_node_by_hwid()
1140 parent->nchildren++; in topo_add_node_by_hwid()
1146 * Find a child node with the given ID under the given parent.
1155 TAILQ_FOREACH(node, &parent->children, siblings) { in topo_find_node_by_hwid()
1156 if (node->hwid == hwid in topo_find_node_by_hwid()
1157 && node->type == type && node->subtype == subtype) { in topo_find_node_by_hwid()
1178 parent = child->parent; in topo_promote_child()
1180 TAILQ_REMOVE(&parent->children, child, siblings); in topo_promote_child()
1181 TAILQ_INSERT_HEAD(&parent->children, child, siblings); in topo_promote_child()
1186 TAILQ_REMOVE(&parent->children, node, siblings); in topo_promote_child()
1187 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings); in topo_promote_child()
1193 * Iterate to the next node in the depth-first search (traversal) of
1201 if ((next = TAILQ_FIRST(&node->children)) != NULL) in topo_next_node()
1207 while (node != top && (node = node->parent) != top) in topo_next_node()
1215 * Iterate to the next node in the depth-first search of the topology tree,
1226 while (node != top && (node = node->parent) != top) in topo_next_nonchild_node()
1234 * Assign the given ID to the given topology node that represents a logical
1238 topo_set_pu_id(struct topo_node *node, cpuid_t id) in topo_set_pu_id() argument
1241 KASSERT(node->type == TOPO_TYPE_PU, in topo_set_pu_id()
1242 ("topo_set_pu_id: wrong node type: %u", node->type)); in topo_set_pu_id()
1243 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0, in topo_set_pu_id()
1245 node->id = id; in topo_set_pu_id()
1246 CPU_SET(id, &node->cpuset); in topo_set_pu_id()
1247 node->cpu_count = 1; in topo_set_pu_id()
1248 node->subtype = 1; in topo_set_pu_id()
1250 while ((node = node->parent) != NULL) { in topo_set_pu_id()
1251 KASSERT(!CPU_ISSET(id, &node->cpuset), in topo_set_pu_id()
1252 ("logical ID %u is already set in node %p", id, node)); in topo_set_pu_id()
1253 CPU_SET(id, &node->cpuset); in topo_set_pu_id()
1254 node->cpu_count++; in topo_set_pu_id()
1290 if (node->type != spec->type || in topo_analyze_table()
1291 (spec->match_subtype && node->subtype != spec->subtype)) { in topo_analyze_table()
1295 if (!all && CPU_EMPTY(&node->cpuset)) { in topo_analyze_table()
1316 if (results->entities[level] == -1) in topo_analyze_table()
1317 results->entities[level] = count; in topo_analyze_table()
1318 else if (results->entities[level] != count) in topo_analyze_table()
1337 results->entities[TOPO_LEVEL_PKG] = -1; in topo_analyze()
1338 results->entities[TOPO_LEVEL_CORE] = -1; in topo_analyze()
1339 results->entities[TOPO_LEVEL_THREAD] = -1; in topo_analyze()
1340 results->entities[TOPO_LEVEL_GROUP] = -1; in topo_analyze()
1341 results->entities[TOPO_LEVEL_CACHEGROUP] = -1; in topo_analyze()
1346 KASSERT(results->entities[TOPO_LEVEL_PKG] > 0, in topo_analyze()