Lines Matching +full:cpu +full:- +full:map
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
47 #include <machine/cpu.h>
77 /* Array of CPU contexts saved during a panic. */
85 "Max CPU ID.");
98 int smp_cpus = 1; /* how many cpu's running */
106 int mp_ncores = -1; /* how many physical cores running */
140 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__)); in mp_setmaxid()
142 ("%s: one CPU but mp_maxid is not zero", __func__)); in mp_setmaxid()
143 KASSERT(mp_maxid >= mp_ncpus - 1, in mp_setmaxid()
197 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); in forward_signal()
206 id = td->td_oncpu; in forward_signal()
213 * When called the executing CPU will send an IPI to all other CPUs
218 * - Signals all CPUs in map to stop.
219 * - Waits for each to stop.
222 * -1: error
233 generic_stop_cpus(cpuset_t map, u_int type) in generic_stop_cpus() argument
253 cpusetobj_strprint(cpusetbuf, &map), type); in generic_stop_cpus()
276 /* send the stop IPI to all CPUs in map */ in generic_stop_cpus()
277 ipi_selected(map, type); in generic_stop_cpus()
290 while (!CPU_SUBSET(cpus, &map)) { in generic_stop_cpus()
310 stop_cpus(cpuset_t map) in stop_cpus() argument
313 return (generic_stop_cpus(map, IPI_STOP)); in stop_cpus()
317 stop_cpus_hard(cpuset_t map) in stop_cpus_hard() argument
320 return (generic_stop_cpus(map, IPI_STOP_HARD)); in stop_cpus_hard()
325 suspend_cpus(cpuset_t map) in suspend_cpus() argument
328 return (generic_stop_cpus(map, IPI_SUSPEND)); in suspend_cpus()
333 * Called by a CPU to restart stopped CPUs.
337 * - Signals all CPUs in map to restart.
338 * - Waits for each to restart.
341 * -1: error
346 generic_restart_cpus(cpuset_t map, u_int type) in generic_restart_cpus() argument
360 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); in generic_restart_cpus()
369 CPU_COPY_STORE_REL(&map, &toresume_cpus); in generic_restart_cpus()
371 CPU_COPY_STORE_REL(&map, &started_cpus); in generic_restart_cpus()
383 if (!CPU_ISSET(id, &map)) in generic_restart_cpus()
386 mb = &pcpu_find(id)->pc_monitorbuf; in generic_restart_cpus()
387 atomic_store_int(&mb->stop_state, in generic_restart_cpus()
394 while (CPU_OVERLAP(cpus, &map)) in generic_restart_cpus()
404 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); in generic_restart_cpus()
409 CPU_COPY_STORE_REL(&map, &started_cpus); in generic_restart_cpus()
412 while (CPU_OVERLAP(cpus, &map)) in generic_restart_cpus()
419 restart_cpus(cpuset_t map) in restart_cpus() argument
422 return (generic_restart_cpus(map, IPI_STOP)); in restart_cpus()
427 resume_cpus(cpuset_t map) in resume_cpus() argument
430 return (generic_restart_cpus(map, IPI_SUSPEND)); in resume_cpus()
436 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
456 /* Ensure we have up-to-date values. */ in smp_rendezvous_action()
490 td->td_critnest++; in smp_rendezvous_action()
492 owepreempt = td->td_owepreempt; in smp_rendezvous_action()
526 * Signal that the rendezvous is fully completed by this CPU. in smp_rendezvous_action()
527 * This means that no member of smp_rv_* pseudo-structure will be in smp_rendezvous_action()
528 * accessed by this target CPU after this point; in particular, in smp_rendezvous_action()
532 * the current CPU are visible when smp_rendezvous_cpus() in smp_rendezvous_action()
538 td->td_critnest--; in smp_rendezvous_action()
539 KASSERT(owepreempt == td->td_owepreempt, in smp_rendezvous_action()
544 smp_rendezvous_cpus(cpuset_t map, in smp_rendezvous_cpus() argument
569 MPASS(curthread->td_md.md_spinlock_count == 0); in smp_rendezvous_cpus()
572 if (CPU_ISSET(i, &map)) in smp_rendezvous_cpus()
576 panic("ncpus is 0 with non-zero map"); in smp_rendezvous_cpus()
595 curcpumap = CPU_ISSET(curcpu, &map); in smp_rendezvous_cpus()
596 CPU_CLR(curcpu, &map); in smp_rendezvous_cpus()
597 ipi_selected(map, IPI_RENDEZVOUS); in smp_rendezvous_cpus()
599 /* Check if the current CPU is in the map */ in smp_rendezvous_cpus()
604 * Ensure that the master CPU waits for all the other in smp_rendezvous_cpus()
606 * pseudo-structure and the arg are guaranteed to not in smp_rendezvous_cpus()
634 for (c = 0; c < cg->cg_children; c++) in smp_topo_fill()
635 smp_topo_fill(&cg->cg_child[c]); in smp_topo_fill()
636 cg->cg_first = CPU_FFS(&cg->cg_mask) - 1; in smp_topo_fill()
637 cg->cg_last = CPU_FLS(&cg->cg_mask) - 1; in smp_topo_fill()
648 * during the kernel boot while we are still single-threaded. in smp_topo()
678 /* Single-core 2xHTT */ in smp_topo()
694 if (top->cg_count != mp_ncpus) in smp_topo()
695 panic("Built bad topology at %p. CPU count %d != %d", in smp_topo()
696 top, top->cg_count, mp_ncpus); in smp_topo()
697 if (CPU_CMP(&top->cg_mask, &all_cpus)) in smp_topo()
698 panic("Built bad topology at %p. CPU mask (%s) != (%s)", in smp_topo()
699 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), in smp_topo()
706 while (top->cg_children == 1) { in smp_topo()
707 top = &top->cg_child[0]; in smp_topo()
708 top->cg_parent = NULL; in smp_topo()
736 top->cg_parent = NULL; in smp_topo_none()
737 top->cg_child = NULL; in smp_topo_none()
738 top->cg_mask = all_cpus; in smp_topo_none()
739 top->cg_count = mp_ncpus; in smp_topo_none()
740 top->cg_children = 0; in smp_topo_none()
741 top->cg_level = CG_SHARE_NONE; in smp_topo_none()
742 top->cg_flags = 0; in smp_topo_none()
758 child->cg_parent = parent; in smp_topo_addleaf()
759 child->cg_child = NULL; in smp_topo_addleaf()
760 child->cg_children = 0; in smp_topo_addleaf()
761 child->cg_level = share; in smp_topo_addleaf()
762 child->cg_count = count; in smp_topo_addleaf()
763 child->cg_flags = flags; in smp_topo_addleaf()
764 child->cg_mask = mask; in smp_topo_addleaf()
765 parent->cg_children++; in smp_topo_addleaf()
766 for (; parent != NULL; parent = parent->cg_parent) { in smp_topo_addleaf()
767 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) in smp_topo_addleaf()
770 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), in smp_topo_addleaf()
771 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); in smp_topo_addleaf()
772 CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask); in smp_topo_addleaf()
773 parent->cg_count += child->cg_count; in smp_topo_addleaf()
785 int cpu; in smp_topo_1level() local
788 cpu = 0; in smp_topo_1level()
791 top->cg_child = child = top + 1; in smp_topo_1level()
792 top->cg_level = CG_SHARE_NONE; in smp_topo_1level()
794 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); in smp_topo_1level()
805 int cpu; in smp_topo_2level() local
809 cpu = 0; in smp_topo_2level()
813 top->cg_child = l2g; in smp_topo_2level()
814 top->cg_level = CG_SHARE_NONE; in smp_topo_2level()
815 top->cg_children = mp_ncpus / (l2count * l1count); in smp_topo_2level()
816 l1g = l2g + top->cg_children; in smp_topo_2level()
817 for (i = 0; i < top->cg_children; i++, l2g++) { in smp_topo_2level()
818 l2g->cg_parent = top; in smp_topo_2level()
819 l2g->cg_child = l1g; in smp_topo_2level()
820 l2g->cg_level = l2share; in smp_topo_2level()
822 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, in smp_topo_2level()
823 l1flags, cpu); in smp_topo_2level()
829 smp_topo_find(struct cpu_group *top, int cpu) in smp_topo_find() argument
836 CPU_SETOF(cpu, &mask); in smp_topo_find()
839 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) in smp_topo_find()
841 if (cg->cg_children == 0) in smp_topo_find()
843 children = cg->cg_children; in smp_topo_find()
844 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) in smp_topo_find()
845 if (CPU_OVERLAP(&cg->cg_mask, &mask)) in smp_topo_find()
853 smp_rendezvous_cpus(cpuset_t map, in smp_rendezvous_cpus() argument
895 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); in mp_setvariables_for_up()
910 smp_rendezvous_cpus_retry(cpuset_t map, in smp_rendezvous_cpus_retry() argument
917 int cpu; in smp_rendezvous_cpus_retry() local
919 CPU_COPY(&map, &arg->cpus); in smp_rendezvous_cpus_retry()
922 * Only one CPU to execute on. in smp_rendezvous_cpus_retry()
942 arg->cpus, in smp_rendezvous_cpus_retry()
948 if (CPU_EMPTY(&arg->cpus)) in smp_rendezvous_cpus_retry()
951 CPU_FOREACH(cpu) { in smp_rendezvous_cpus_retry()
952 if (!CPU_ISSET(cpu, &arg->cpus)) in smp_rendezvous_cpus_retry()
954 wait_func(arg, cpu); in smp_rendezvous_cpus_retry()
963 CPU_CLR_ATOMIC(curcpu, &arg->cpus); in smp_rendezvous_cpus_done()
976 quiesce_cpus(cpuset_t map, const char *wmesg, int prio) in quiesce_cpus() argument
981 int cpu; in quiesce_cpus() local
987 for (cpu = 0; cpu <= mp_maxid; cpu++) { in quiesce_cpus()
988 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) in quiesce_cpus()
990 pcpu = pcpu_find(cpu); in quiesce_cpus()
991 gen[cpu] = pcpu->pc_idlethread->td_generation; in quiesce_cpus()
994 for (cpu = 0; cpu <= mp_maxid; cpu++) { in quiesce_cpus()
995 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) in quiesce_cpus()
997 pcpu = pcpu_find(cpu); in quiesce_cpus()
999 sched_bind(curthread, cpu); in quiesce_cpus()
1003 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { in quiesce_cpus()
1038 int cpu; in quiesce_all_critical() local
1040 MPASS(curthread->td_critnest == 0); in quiesce_all_critical()
1042 CPU_FOREACH(cpu) { in quiesce_all_critical()
1043 pcpu = cpuid_to_pcpu[cpu]; in quiesce_all_critical()
1044 td = pcpu->pc_curthread; in quiesce_all_critical()
1046 if (td->td_critnest == 0) in quiesce_all_critical()
1050 atomic_load_acq_ptr((void *)pcpu->pc_curthread); in quiesce_all_critical()
1104 TAILQ_INIT(&node->children); in topo_init_node()
1112 root->type = TOPO_TYPE_SYSTEM; in topo_init_root()
1125 TAILQ_FOREACH_REVERSE(node, &parent->children, in topo_add_node_by_hwid()
1127 if (node->hwid == hwid in topo_add_node_by_hwid()
1128 && node->type == type && node->subtype == subtype) { in topo_add_node_by_hwid()
1135 node->parent = parent; in topo_add_node_by_hwid()
1136 node->hwid = hwid; in topo_add_node_by_hwid()
1137 node->type = type; in topo_add_node_by_hwid()
1138 node->subtype = subtype; in topo_add_node_by_hwid()
1139 TAILQ_INSERT_TAIL(&parent->children, node, siblings); in topo_add_node_by_hwid()
1140 parent->nchildren++; in topo_add_node_by_hwid()
1155 TAILQ_FOREACH(node, &parent->children, siblings) { in topo_find_node_by_hwid()
1156 if (node->hwid == hwid in topo_find_node_by_hwid()
1157 && node->type == type && node->subtype == subtype) { in topo_find_node_by_hwid()
1178 parent = child->parent; in topo_promote_child()
1180 TAILQ_REMOVE(&parent->children, child, siblings); in topo_promote_child()
1181 TAILQ_INSERT_HEAD(&parent->children, child, siblings); in topo_promote_child()
1186 TAILQ_REMOVE(&parent->children, node, siblings); in topo_promote_child()
1187 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings); in topo_promote_child()
1193 * Iterate to the next node in the depth-first search (traversal) of
1201 if ((next = TAILQ_FIRST(&node->children)) != NULL) in topo_next_node()
1207 while (node != top && (node = node->parent) != top) in topo_next_node()
1215 * Iterate to the next node in the depth-first search of the topology tree,
1226 while (node != top && (node = node->parent) != top) in topo_next_nonchild_node()
1241 KASSERT(node->type == TOPO_TYPE_PU, in topo_set_pu_id()
1242 ("topo_set_pu_id: wrong node type: %u", node->type)); in topo_set_pu_id()
1243 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0, in topo_set_pu_id()
1245 node->id = id; in topo_set_pu_id()
1246 CPU_SET(id, &node->cpuset); in topo_set_pu_id()
1247 node->cpu_count = 1; in topo_set_pu_id()
1248 node->subtype = 1; in topo_set_pu_id()
1250 while ((node = node->parent) != NULL) { in topo_set_pu_id()
1251 KASSERT(!CPU_ISSET(id, &node->cpuset), in topo_set_pu_id()
1253 CPU_SET(id, &node->cpuset); in topo_set_pu_id()
1254 node->cpu_count++; in topo_set_pu_id()
1290 if (node->type != spec->type || in topo_analyze_table()
1291 (spec->match_subtype && node->subtype != spec->subtype)) { in topo_analyze_table()
1295 if (!all && CPU_EMPTY(&node->cpuset)) { in topo_analyze_table()
1316 if (results->entities[level] == -1) in topo_analyze_table()
1317 results->entities[level] = count; in topo_analyze_table()
1318 else if (results->entities[level] != count) in topo_analyze_table()
1337 results->entities[TOPO_LEVEL_PKG] = -1; in topo_analyze()
1338 results->entities[TOPO_LEVEL_CORE] = -1; in topo_analyze()
1339 results->entities[TOPO_LEVEL_THREAD] = -1; in topo_analyze()
1340 results->entities[TOPO_LEVEL_GROUP] = -1; in topo_analyze()
1341 results->entities[TOPO_LEVEL_CACHEGROUP] = -1; in topo_analyze()
1346 KASSERT(results->entities[TOPO_LEVEL_PKG] > 0, in topo_analyze()