Lines Matching full:cpu

61  *  arbitrarily high resolution, per-CPU interval timers (to avoid colliding
64 * optionally bound to a CPU or a CPU partition. A cyclic's CPU or CPU
66 * to a CPU which satisfies the new binding. Alternatively, a cyclic may
113 * cyclic_bind() <-- Change a cyclic's CPU or partition binding
115 * cyclic_move_here() <-- Shuffle cyclic to current CPU
119 * cyclic_juggle() <-- Juggles cyclics away from a CPU
120 * cyclic_offline() <-- Offlines cyclic operation on a CPU
121 * cyclic_online() <-- Reenables operation on an offlined CPU
122 * cyclic_move_in() <-- Notifies subsystem of change in CPU partition
123 * cyclic_move_out() <-- Notifies subsystem of change in CPU partition
142 * hang off of a per-CPU structure, cyc_cpu.
172 * element containing an index into the CPU's cyp_cyclics array.
312 * potential disconnect: if the CPU is at an interrupt level less than
380 * Traditionally, access to per-CPU data structures shared between
441 * We resize our data structures lazily, and only on a per-CPU basis.
445 * on the CPU being resized, but should not affect cyclic operation on other
454 * 2. cyclic_expand() cross calls cyclic_expand_xcall() on the CPU
517 * 1. cyclic_remove() calls cyclic_remove_xcall() on the CPU undergoing
551 * CPU, recording its expiration time in the remove cross call (step (3)
552 * in "Removing", above). We then add the cyclic to the new CPU, explicitly
568 * applied to the omni-cyclic's component on the current CPU.
585 * 1. cyclic_reprogram() calls cyclic_reprogram_xcall() on the CPU
696 #define CYC_TRACE(cpu, level, why, arg0, arg1) \ argument
697 CYC_TRACE_IMPL(&cpu->cyp_trace[level], level, why, arg0, arg1)
720 #define CYC_TRACE(cpu, level, why, arg0, arg1) argument
725 #define CYC_TRACE0(cpu, level, why) CYC_TRACE(cpu, level, why, 0, 0) argument
726 #define CYC_TRACE1(cpu, level, why, arg0) CYC_TRACE(cpu, level, why, arg0, 0) argument
742 cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx) in cyclic_upheap() argument
752 heap = cpu->cyp_heap; in cyclic_upheap()
753 cyclics = cpu->cyp_cyclics; in cyclic_upheap()
785 cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx) in cyclic_downheap() argument
787 cyclic_t *cyclics = cpu->cyp_cyclics; in cyclic_downheap()
788 cyc_index_t *heap = cpu->cyp_heap; in cyclic_downheap()
792 cyc_index_t nelems = cpu->cyp_nelems; in cyclic_downheap()
860 cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic) in cyclic_expire() argument
862 cyc_backend_t *be = cpu->cyp_backend; in cyclic_expire()
873 CYC_TRACE(cpu, CY_HIGH_LEVEL, "handler-in", handler, arg); in cyclic_expire()
879 CYC_TRACE(cpu, CY_HIGH_LEVEL, "handler-out", handler, arg); in cyclic_expire()
890 cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[level]; in cyclic_expire()
896 CYC_TRACE(cpu, CY_HIGH_LEVEL, "expire-enq", cyclic, in cyclic_expire()
909 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "expire-wrap", cyclic); in cyclic_expire()
913 CYC_TRACE(cpu, CY_HIGH_LEVEL, "expire-bump", cyclic, 0); in cyclic_expire()
929 * The only argument is the CPU on which the interrupt is executing;
930 * backends must call into cyclic_fire() on the specified CPU.
951 cyc_cpu_t *cpu = c->cpu_cyclic; in cyclic_fire() local
952 cyc_backend_t *be = cpu->cyp_backend; in cyclic_fire()
953 cyc_index_t *heap = cpu->cyp_heap; in cyclic_fire()
954 cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics; in cyclic_fire()
959 CYC_TRACE(cpu, CY_HIGH_LEVEL, "fire", now, 0); in cyclic_fire()
961 if (cpu->cyp_nelems == 0) { in cyclic_fire()
966 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "fire-spurious"); in cyclic_fire()
977 CYC_TRACE(cpu, CY_HIGH_LEVEL, "fire-check", cyclic, in cyclic_fire()
983 cyclic_expire(cpu, ndx, cyclic); in cyclic_fire()
1029 CYC_TRACE(cpu, CY_HIGH_LEVEL, exp == interval ? in cyclic_fire()
1036 cyclic_downheap(cpu, 0); in cyclic_fire()
1047 cyclic_remove_pend(cyc_cpu_t *cpu, cyc_level_t level, cyclic_t *cyclic) in cyclic_remove_pend() argument
1051 uint32_t i, rpend = cpu->cyp_rpend - 1; in cyclic_remove_pend()
1055 ASSERT(cpu->cyp_state == CYS_REMOVING); in cyclic_remove_pend()
1056 ASSERT(cpu->cyp_rpend > 0); in cyclic_remove_pend()
1058 CYC_TRACE(cpu, level, "remove-rpend", cyclic, cpu->cyp_rpend); in cyclic_remove_pend()
1065 CYC_TRACE(cpu, level, "rpend-in", handler, arg); in cyclic_remove_pend()
1071 CYC_TRACE(cpu, level, "rpend-out", handler, arg); in cyclic_remove_pend()
1077 sema_v(&cpu->cyp_modify_wait); in cyclic_remove_pend()
1081 * cyclic_softint(cpu_t *cpu, cyc_level_t level)
1090 * The first argument to cyclic_softint() is the CPU on which the interrupt
1092 * CPU. The second argument is the level of the soft interrupt; it must
1102 * cyclic_add() nor a cyclic_remove() is pending on the specified CPU, is
1131 cyc_cpu_t *cpu = c->cpu_cyclic; in cyclic_softint() local
1135 cyclic_t *cyclics = cpu->cyp_cyclics; in cyclic_softint()
1138 CYC_TRACE(cpu, level, "softint", cyclics, 0); in cyclic_softint()
1142 softbuf = &cpu->cyp_softbuf[level]; in cyclic_softint()
1152 CYC_TRACE(cpu, level, "softint-top", cyclics, pc); in cyclic_softint()
1161 ASSERT(buf[consmasked] < cpu->cyp_size); in cyclic_softint()
1162 CYC_TRACE(cpu, level, "consuming", consndx, cyclic); in cyclic_softint()
1186 CYC_TRACE(cpu, level, "handler-in", handler, arg); in cyclic_softint()
1192 CYC_TRACE(cpu, level, "handler-out", handler, arg); in cyclic_softint()
1198 if (cpu->cyp_state == CYS_REMOVING) { in cyclic_softint()
1205 * this CPU, and there must be a remove in cyclic_softint()
1210 cyclic_remove_pend(cpu, level, cyclic); in cyclic_softint()
1217 CYC_TRACE(cpu, level, "resize-int", cyclics, 0); in cyclic_softint()
1218 ASSERT(cpu->cyp_state == CYS_EXPANDING); in cyclic_softint()
1219 ASSERT(cyclics != cpu->cyp_cyclics); in cyclic_softint()
1223 cyclics = cpu->cyp_cyclics; in cyclic_softint()
1257 CYC_TRACE(cpu, level, "cas-fail", opend, pend); in cyclic_softint()
1259 ((cyclics != cpu->cyp_cyclics && in cyclic_softint()
1260 cpu->cyp_state == CYS_EXPANDING) || in cyclic_softint()
1261 (cpu->cyp_state == CYS_REMOVING && in cyclic_softint()
1286 CYC_TRACE(cpu, level, "buffer-grow", 0, 0); in cyclic_softint()
1287 ASSERT(cpu->cyp_state == CYS_EXPANDING); in cyclic_softint()
1298 if (cpu->cyp_cyclics != cyclics) { in cyclic_softint()
1299 CYC_TRACE1(cpu, level, "resize-int-int", consndx); in cyclic_softint()
1300 cyclics = cpu->cyp_cyclics; in cyclic_softint()
1319 ASSERT(cpu->cyp_state == CYS_EXPANDING); in cyclic_softint()
1322 lev = cpu->cyp_modify_levels; in cyclic_softint()
1324 } while (atomic_cas_32(&cpu->cyp_modify_levels, lev, nlev) != in cyclic_softint()
1333 CYC_TRACE0(cpu, level, "resize-kick"); in cyclic_softint()
1334 sema_v(&cpu->cyp_modify_wait); in cyclic_softint()
1338 cyc_backend_t *be = cpu->cyp_backend; in cyclic_softint()
1340 CYC_TRACE0(cpu, level, "resize-post"); in cyclic_softint()
1350 cyc_cpu_t *cpu = arg->cyx_cpu; in cyclic_expand_xcall() local
1351 cyc_backend_t *be = cpu->cyp_backend; in cyclic_expand_xcall()
1354 cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i; in cyclic_expand_xcall()
1356 cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics; in cyclic_expand_xcall()
1358 ASSERT(cpu->cyp_state == CYS_EXPANDING); in cyclic_expand_xcall()
1362 * to CY_HIGH_LEVEL. This CPU already has a new heap, cyclic array, in cyclic_expand_xcall()
1369 CYC_TRACE(cpu, CY_HIGH_LEVEL, "expand", new_size, 0); in cyclic_expand_xcall()
1376 ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL); in cyclic_expand_xcall()
1378 bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size); in cyclic_expand_xcall()
1402 cpu->cyp_heap = new_heap; in cyclic_expand_xcall()
1403 cpu->cyp_cyclics = new_cyclics; in cyclic_expand_xcall()
1404 cpu->cyp_size = new_size; in cyclic_expand_xcall()
1411 cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i]; in cyclic_expand_xcall()
1442 * cyclic_expand() will cross call onto the CPU to perform the actual
1446 cyclic_expand(cyc_cpu_t *cpu) in cyclic_expand() argument
1452 cyc_backend_t *be = cpu->cyp_backend; in cyclic_expand()
1457 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_expand()
1459 cpu->cyp_state = CYS_EXPANDING; in cyclic_expand()
1461 old_heap = cpu->cyp_heap; in cyclic_expand()
1462 old_cyclics = cpu->cyp_cyclics; in cyclic_expand()
1464 if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) { in cyclic_expand()
1481 old_hard = cpu->cyp_softbuf[0].cys_hard; in cyclic_expand()
1484 cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i]; in cyclic_expand()
1498 arg.cyx_cpu = cpu; in cyclic_expand()
1503 cpu->cyp_modify_levels = 0; in cyclic_expand()
1505 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, in cyclic_expand()
1511 sema_p(&cpu->cyp_modify_wait); in cyclic_expand()
1512 ASSERT(cpu->cyp_modify_levels == CY_SOFT_LEVELS); in cyclic_expand()
1518 cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i]; in cyclic_expand()
1542 ASSERT(cpu->cyp_state == CYS_EXPANDING); in cyclic_expand()
1543 cpu->cyp_state = CYS_ONLINE; in cyclic_expand()
1547 * cyclic_pick_cpu will attempt to pick a CPU according to the constraints
1548 * specified by the partition, bound CPU, and flags. Additionally,
1549 * cyclic_pick_cpu() will not pick the avoid CPU; it will return NULL if
1550 * the avoid CPU is the only CPU which satisfies the constraints.
1552 * If CYF_CPU_BOUND is set in flags, the specified CPU must be non-NULL.
1554 * If both CYF_CPU_BOUND and CYF_PART_BOUND are set, the specified CPU must
1560 cpu_t *c, *start = (part != NULL) ? part->cp_cpulist : CPU; in cyclic_pick_cpu()
1564 CYC_PTRACE("pick-cpu", part, bound); in cyclic_pick_cpu()
1570 * If we're bound to our CPU, there isn't much choice involved. We in cyclic_pick_cpu()
1571 * need to check that the CPU passed as bound is in the cpupart, and in cyclic_pick_cpu()
1572 * that the CPU that we're binding to has been configured. in cyclic_pick_cpu()
1575 CYC_PTRACE("pick-cpu-bound", bound, avoid); in cyclic_pick_cpu()
1579 "CPU binding contradicts partition binding"); in cyclic_pick_cpu()
1586 "attempt to bind to non-configured CPU"); in cyclic_pick_cpu()
1619 * (a) We have a partition-bound cyclic, and there is no CPU in in cyclic_pick_cpu()
1621 * non-CYS_OFFLINE CPU in our partition, we'll go with it. in cyclic_pick_cpu()
1622 * If not, the avoid CPU must be the only non-CYS_OFFLINE in cyclic_pick_cpu()
1623 * CPU in the partition; we're forced to return NULL. in cyclic_pick_cpu()
1626 * must only be one CPU CPU_ENABLE'd, and it must be the one in cyclic_pick_cpu()
1630 * At any rate: we can't avoid the avoid CPU, so we return in cyclic_pick_cpu()
1648 CYC_PTRACE("pick-cpu-found", c, avoid); in cyclic_pick_cpu()
1658 cyc_cpu_t *cpu = arg->cyx_cpu; in cyclic_add_xcall() local
1661 cyc_backend_t *be = cpu->cyp_backend; in cyclic_add_xcall()
1667 ASSERT(cpu->cyp_nelems < cpu->cyp_size); in cyclic_add_xcall()
1671 CYC_TRACE(cpu, CY_HIGH_LEVEL, in cyclic_add_xcall()
1674 nelems = cpu->cyp_nelems++; in cyclic_add_xcall()
1679 * backend on this CPU. in cyclic_add_xcall()
1681 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "enabled"); in cyclic_add_xcall()
1685 ndx = cpu->cyp_heap[nelems]; in cyclic_add_xcall()
1686 cyclic = &cpu->cyp_cyclics[ndx]; in cyclic_add_xcall()
1707 if (cyclic_upheap(cpu, nelems)) { in cyclic_add_xcall()
1710 CYC_TRACE(cpu, CY_HIGH_LEVEL, "add-reprog", cyclic, exp); in cyclic_add_xcall()
1724 cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, in cyclic_add_here() argument
1727 cyc_backend_t *be = cpu->cyp_backend; in cyclic_add_here()
1731 CYC_PTRACE("add-cpu", cpu, hdlr->cyh_func); in cyclic_add_here()
1733 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_add_here()
1734 ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE)); in cyclic_add_here()
1737 if (cpu->cyp_nelems == cpu->cyp_size) { in cyclic_add_here()
1740 * CPU to perform the expansion. in cyclic_add_here()
1742 cyclic_expand(cpu); in cyclic_add_here()
1743 ASSERT(cpu->cyp_nelems < cpu->cyp_size); in cyclic_add_here()
1748 * perform the add. Now cross call over to the CPU of interest to in cyclic_add_here()
1751 arg.cyx_cpu = cpu; in cyclic_add_here()
1756 be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg); in cyclic_add_here()
1758 CYC_PTRACE("add-cpu-done", cpu, arg.cyx_ndx); in cyclic_add_here()
1766 cyc_cpu_t *cpu = arg->cyx_cpu; in cyclic_remove_xcall() local
1767 cyc_backend_t *be = cpu->cyp_backend; in cyclic_remove_xcall()
1777 ASSERT(cpu->cyp_state == CYS_REMOVING); in cyclic_remove_xcall()
1781 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "remove-xcall", ndx); in cyclic_remove_xcall()
1783 heap = cpu->cyp_heap; in cyclic_remove_xcall()
1784 nelems = cpu->cyp_nelems; in cyclic_remove_xcall()
1786 cyclic = &cpu->cyp_cyclics[ndx]; in cyclic_remove_xcall()
1791 * will be used when the cyclic is added to the new CPU. in cyclic_remove_xcall()
1803 * we will stash the pend value * in this CPU's rpend, and in cyclic_remove_xcall()
1815 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "remove-pend", cyclic->cy_pend); in cyclic_remove_xcall()
1816 cpu->cyp_rpend = cyclic->cy_pend; in cyclic_remove_xcall()
1836 cpu->cyp_nelems = --nelems; in cyclic_remove_xcall()
1841 * disable the backend on this CPU. in cyclic_remove_xcall()
1843 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "disabled"); in cyclic_remove_xcall()
1852 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-bottom"); in cyclic_remove_xcall()
1869 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-root"); in cyclic_remove_xcall()
1870 cyclic_downheap(cpu, 0); in cyclic_remove_xcall()
1872 if (cyclic_upheap(cpu, i) == 0) { in cyclic_remove_xcall()
1877 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-no-root"); in cyclic_remove_xcall()
1879 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-no-up"); in cyclic_remove_xcall()
1880 cyclic_downheap(cpu, i); in cyclic_remove_xcall()
1891 cyclic = &cpu->cyp_cyclics[heap[0]]; in cyclic_remove_xcall()
1893 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-reprog"); in cyclic_remove_xcall()
1902 cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) in cyclic_remove_here() argument
1904 cyc_backend_t *be = cpu->cyp_backend; in cyclic_remove_here()
1906 cyclic_t *cyclic = &cpu->cyp_cyclics[ndx]; in cyclic_remove_here()
1910 ASSERT(cpu->cyp_rpend == 0); in cyclic_remove_here()
1914 arg.cyx_cpu = cpu; in cyclic_remove_here()
1918 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_remove_here()
1919 cpu->cyp_state = CYS_REMOVING; in cyclic_remove_here()
1921 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, in cyclic_remove_here()
1929 ASSERT(!(level == CY_HIGH_LEVEL && cpu->cyp_rpend != 0)); in cyclic_remove_here()
1930 ASSERT(!(wait == CY_NOWAIT && cpu->cyp_rpend != 0)); in cyclic_remove_here()
1931 ASSERT(!(arg.cyx_wait == CY_NOWAIT && cpu->cyp_rpend != 0)); in cyclic_remove_here()
1936 * remove this cyclic; put the CPU back in the CYS_ONLINE in cyclic_remove_here()
1940 ASSERT(cpu->cyp_state == CYS_REMOVING); in cyclic_remove_here()
1941 cpu->cyp_state = CYS_ONLINE; in cyclic_remove_here()
1946 if (cpu->cyp_rpend != 0) in cyclic_remove_here()
1947 sema_p(&cpu->cyp_modify_wait); in cyclic_remove_here()
1949 ASSERT(cpu->cyp_state == CYS_REMOVING); in cyclic_remove_here()
1951 cpu->cyp_rpend = 0; in cyclic_remove_here()
1952 cpu->cyp_state = CYS_ONLINE; in cyclic_remove_here()
1958 * If cyclic_reprogram() is called on the same CPU as the cyclic's CPU, then
1960 * an X-call to the cyclic's CPU.
1963 cyclic_reprogram_cyclic(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expire, in cyclic_reprogram_cyclic() argument
1966 cyc_backend_t *be = cpu->cyp_backend; in cyclic_reprogram_cyclic()
1977 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "reprog-xcall", ndx); in cyclic_reprogram_cyclic()
1979 nelems = cpu->cyp_nelems; in cyclic_reprogram_cyclic()
1981 heap = cpu->cyp_heap; in cyclic_reprogram_cyclic()
1996 * cyclic_reprogram() is occurring on the CPU which the cyclic in cyclic_reprogram_cyclic()
1998 * was removed from that CPU. in cyclic_reprogram_cyclic()
2004 cpu->cyp_state == CYS_REMOVING && cpu->cyp_rpend > 0) { in cyclic_reprogram_cyclic()
2010 cyclic = &cpu->cyp_cyclics[ndx]; in cyclic_reprogram_cyclic()
2016 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "reprog-down", i); in cyclic_reprogram_cyclic()
2017 cyclic_downheap(cpu, i); in cyclic_reprogram_cyclic()
2019 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "reprog-up", i); in cyclic_reprogram_cyclic()
2020 reprog = cyclic_upheap(cpu, i); in cyclic_reprogram_cyclic()
2023 if (reprog && (cpu->cyp_state != CYS_SUSPENDED)) { in cyclic_reprogram_cyclic()
2027 CYC_TRACE0(cpu, CY_HIGH_LEVEL, "reprog-root"); in cyclic_reprogram_cyclic()
2028 cyclic = &cpu->cyp_cyclics[heap[0]]; in cyclic_reprogram_cyclic()
2048 cyclic_reprogram_here(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expiration) in cyclic_reprogram_here() argument
2050 cyc_backend_t *be = cpu->cyp_backend; in cyclic_reprogram_here()
2057 arg.cyx_cpu = cpu; in cyclic_reprogram_here()
2061 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, in cyclic_reprogram_here()
2067 * can be juggled and the destination CPU is known to be able to accept
2097 * CPU requires an expansion. If it does, we'll perform the in cyclic_juggle_one_to()
2100 * cyclic) isn't on a CPU. in cyclic_juggle_one_to()
2111 * to the wrong CPU. in cyclic_juggle_one_to()
2157 * CPU before removing the cyclic from the source CPU. in cyclic_juggle_one_to()
2174 cyc_cpu_t *cpu = idp->cyi_cpu, *dest; in cyclic_juggle_one() local
2175 cyclic_t *cyclic = &cpu->cyp_cyclics[ndx]; in cyclic_juggle_one()
2176 cpu_t *c = cpu->cyp_cpu; in cyclic_juggle_one()
2179 CYC_PTRACE("juggle-one", idp, cpu); in cyclic_juggle_one()
2182 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_juggle_one()
2189 CYC_PTRACE("juggle-fail", idp, cpu) in cyclic_juggle_one()
2202 cyc_cpu_t *cpu = idp->cyi_cpu; in cyclic_unbind_cpu() local
2203 cpu_t *c = cpu->cyp_cpu; in cyclic_unbind_cpu()
2204 cyclic_t *cyclic = &cpu->cyp_cyclics[idp->cyi_ndx]; in cyclic_unbind_cpu()
2206 CYC_PTRACE("unbind-cpu", id, cpu); in cyclic_unbind_cpu()
2208 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_unbind_cpu()
2215 * If we were bound to CPU which has interrupts disabled, we need in cyclic_unbind_cpu()
2217 * processor set, and if every CPU in the processor set has in cyclic_unbind_cpu()
2223 ASSERT((res && idp->cyi_cpu != cpu) || in cyclic_unbind_cpu()
2232 cyc_cpu_t *dest = d->cpu_cyclic, *cpu = idp->cyi_cpu; in cyclic_bind_cpu() local
2233 cpu_t *c = cpu->cyp_cpu; in cyclic_bind_cpu()
2234 cyclic_t *cyclic = &cpu->cyp_cyclics[idp->cyi_ndx]; in cyclic_bind_cpu()
2237 CYC_PTRACE("bind-cpu", id, dest); in cyclic_bind_cpu()
2241 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_bind_cpu()
2249 if (dest != cpu) { in cyclic_bind_cpu()
2261 cyc_cpu_t *cpu = idp->cyi_cpu; in cyclic_unbind_cpupart() local
2262 cpu_t *c = cpu->cyp_cpu; in cyclic_unbind_cpupart()
2263 cyclic_t *cyc = &cpu->cyp_cyclics[idp->cyi_ndx]; in cyclic_unbind_cpupart()
2267 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_unbind_cpupart()
2274 * If we're on a CPU which has interrupts disabled (and if this cyclic in cyclic_unbind_cpupart()
2275 * isn't bound to the CPU), we need to juggle away. in cyclic_unbind_cpupart()
2280 ASSERT(res && idp->cyi_cpu != cpu); in cyclic_unbind_cpupart()
2288 cyc_cpu_t *cpu = idp->cyi_cpu, *dest; in cyclic_bind_cpupart() local
2289 cpu_t *c = cpu->cyp_cpu; in cyclic_bind_cpupart()
2290 cyclic_t *cyc = &cpu->cyp_cyclics[idp->cyi_ndx]; in cyclic_bind_cpupart()
2295 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_bind_cpupart()
2302 if (dest != cpu) { in cyclic_bind_cpupart()
2313 cyc_cpu_t *cpu = kmem_zalloc(sizeof (cyc_cpu_t), KM_SLEEP); in cyclic_configure() local
2317 CYC_PTRACE1("configure", cpu); in cyclic_configure()
2324 cpu->cyp_cpu = c; in cyclic_configure()
2326 sema_init(&cpu->cyp_modify_wait, 0, NULL, SEMA_DEFAULT, NULL); in cyclic_configure()
2328 cpu->cyp_size = 1; in cyclic_configure()
2329 cpu->cyp_heap = kmem_zalloc(sizeof (cyc_index_t), KM_SLEEP); in cyclic_configure()
2330 cpu->cyp_cyclics = kmem_zalloc(sizeof (cyclic_t), KM_SLEEP); in cyclic_configure()
2331 cpu->cyp_cyclics->cy_flags = CYF_FREE; in cyclic_configure()
2338 cpu->cyp_softbuf[i].cys_buf[0].cypc_buf = in cyclic_configure()
2342 cpu->cyp_state = CYS_OFFLINE; in cyclic_configure()
2345 * Setup the backend for this CPU. in cyclic_configure()
2349 cpu->cyp_backend = nbe; in cyclic_configure()
2353 * the CPU's cpu_cyclic pointer serves as an indicator that the in cyclic_configure()
2354 * cyclic subsystem for this CPU is prepared to field interrupts. in cyclic_configure()
2358 c->cpu_cyclic = cpu; in cyclic_configure()
2364 cyc_cpu_t *cpu = c->cpu_cyclic; in cyclic_unconfigure() local
2365 cyc_backend_t *be = cpu->cyp_backend; in cyclic_unconfigure()
2369 CYC_PTRACE1("unconfigure", cpu); in cyclic_unconfigure()
2371 ASSERT(cpu->cyp_state == CYS_OFFLINE); in cyclic_unconfigure()
2372 ASSERT(cpu->cyp_nelems == 0); in cyclic_unconfigure()
2375 * Let the backend know that the CPU is being yanked, and free up in cyclic_unconfigure()
2380 cpu->cyp_backend = NULL; in cyclic_unconfigure()
2386 cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i]; in cyclic_unconfigure()
2407 kmem_free(cpu->cyp_cyclics, cpu->cyp_size * sizeof (cyclic_t)); in cyclic_unconfigure()
2408 kmem_free(cpu->cyp_heap, cpu->cyp_size * sizeof (cyc_index_t)); in cyclic_unconfigure()
2409 kmem_free(cpu, sizeof (cyc_cpu_t)); in cyclic_unconfigure()
2419 * cpu array for this CPU. in cyclic_cpu_setup()
2421 cpu_t *c = cpu[id]; in cyclic_cpu_setup()
2447 cyc_cpu_t *cpu = arg->cyx_cpu; in cyclic_suspend_xcall() local
2448 cyc_backend_t *be = cpu->cyp_backend; in cyclic_suspend_xcall()
2454 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "suspend-xcall", cpu->cyp_nelems); in cyclic_suspend_xcall()
2455 ASSERT(cpu->cyp_state == CYS_ONLINE || cpu->cyp_state == CYS_OFFLINE); in cyclic_suspend_xcall()
2458 * We won't disable this CPU unless it has a non-zero number of in cyclic_suspend_xcall()
2460 * to disable this CPU). in cyclic_suspend_xcall()
2462 if (cpu->cyp_nelems > 0) { in cyclic_suspend_xcall()
2463 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_suspend_xcall()
2467 if (cpu->cyp_state == CYS_ONLINE) in cyclic_suspend_xcall()
2468 cpu->cyp_state = CYS_SUSPENDED; in cyclic_suspend_xcall()
2477 cyc_cpu_t *cpu = arg->cyx_cpu; in cyclic_resume_xcall() local
2478 cyc_backend_t *be = cpu->cyp_backend; in cyclic_resume_xcall()
2481 cyc_state_t state = cpu->cyp_state; in cyclic_resume_xcall()
2485 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "resume-xcall", cpu->cyp_nelems); in cyclic_resume_xcall()
2491 * We won't enable this CPU unless it has a non-zero number of in cyclic_resume_xcall()
2494 if (cpu->cyp_nelems > 0) { in cyclic_resume_xcall()
2495 cyclic_t *cyclic = &cpu->cyp_cyclics[cpu->cyp_heap[0]]; in cyclic_resume_xcall()
2498 CYC_TRACE(cpu, CY_HIGH_LEVEL, "resume-reprog", cyclic, exp); in cyclic_resume_xcall()
2505 cpu->cyp_state = CYS_ONLINE; in cyclic_resume_xcall()
2507 CYC_TRACE1(cpu, CY_HIGH_LEVEL, "resume-done", cpu->cyp_nelems); in cyclic_resume_xcall()
2512 cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu) in cyclic_omni_start() argument
2519 CYC_PTRACE("omni-start", cpu, idp); in cyclic_omni_start()
2521 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_omni_start()
2531 omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when); in cyclic_omni_start()
2537 ocpu->cyo_cpu = cpu; in cyclic_omni_start()
2539 ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0); in cyclic_omni_start()
2545 cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu) in cyclic_omni_stop() argument
2552 CYC_PTRACE("omni-stop", cpu, idp); in cyclic_omni_stop()
2554 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_omni_stop()
2561 * to the offlined CPU. in cyclic_omni_stop()
2565 while (ocpu != NULL && ocpu->cyo_cpu != cpu) { in cyclic_omni_stop()
2572 * CPU -- the definition of an omnipresent cyclic is that it runs in cyclic_omni_stop()
2630 * The cyclic has been removed from this CPU; time to call the in cyclic_omni_stop()
2634 omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg); in cyclic_omni_stop()
2649 * The cyi_cpu field of the cyc_id_t structure tracks the CPU in cyclic_new_id()
2679 * interval. The cyclic will run on a CPU which both has interrupts enabled
2680 * and is in the system CPU partition.
2726 * the CPU will wedge. It's the responsibility of the caller to assure that
2799 * cpu_t * <-- Pointer to CPU about to be onlined
2806 * cyclic begins to fire on the specified CPU. As the above argument
2834 * cpu_t * <-- Pointer to CPU about to be offlined
2835 * void * <-- CPU's cyclic argument (that is, value
2840 * cyclic has ceased firing on the specified CPU. Its purpose is to
2861 cyc_cpu_t *cpu; in cyclic_add_omni() local
2871 if ((cpu = c->cpu_cyclic) == NULL) in cyclic_add_omni()
2874 if (cpu->cyp_state != CYS_ONLINE) { in cyclic_add_omni()
2875 ASSERT(cpu->cyp_state == CYS_OFFLINE); in cyclic_add_omni()
2879 cyclic_omni_start(idp, cpu); in cyclic_add_omni()
2883 * We must have found at least one online CPU on which to run in cyclic_add_omni()
2927 cyc_cpu_t *cpu = idp->cyi_cpu; in cyclic_remove() local
2932 if (cpu != NULL) { in cyclic_remove()
2933 (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT); in cyclic_remove()
2959 * cyclic_bind() atomically changes the CPU and CPU partition bindings
2968 * The second argument specifies the CPU to which to bind the specified
2969 * cyclic. If the specified cyclic is bound to a CPU other than the one
2970 * specified, it will be unbound from its bound CPU. Unbinding the cyclic
2971 * from its CPU may cause it to be juggled to another CPU. If the specified
2972 * CPU is non-NULL, the cyclic will be subsequently rebound to the specified
2973 * CPU.
2975 * If a CPU with bound cyclics is transitioned into the P_NOINTR state,
2976 * only cyclics not bound to the CPU can be juggled away; CPU-bound cyclics
2977 * will continue to fire on the P_NOINTR CPU. A CPU with bound cyclics
2978 * cannot be offlined (attempts to offline the CPU will return EBUSY).
2979 * Likewise, cyclics may not be bound to an offline CPU; if the caller
2980 * attempts to bind a cyclic to an offline CPU, the cyclic subsystem will
2983 * The third argument specifies the CPU partition to which to bind the
2984 * specified cyclic. If the specified cyclic is bound to a CPU partition
2986 * partition. Unbinding the cyclic from its CPU partition may cause it
2987 * to be juggled to another CPU. If the specified CPU partition is
2988 * non-NULL, the cyclic will be subsequently rebound to the specified CPU
2991 * It is the caller's responsibility to assure that the specified CPU
2992 * partition contains a CPU. If it does not, the cyclic subsystem will
2993 * panic. A CPU partition with bound cyclics cannot be destroyed (attempts
2994 * to destroy the partition will return EBUSY). If a CPU with
2996 * bound to the CPU's partition (but not bound to the CPU) will be juggled
2997 * away only if there exists another CPU in the partition in the P_ONLINE
3000 * It is the caller's responsibility to assure that the specified CPU and
3001 * CPU partition are self-consistent. If both parameters are non-NULL,
3002 * and the specified CPU partition does not contain the specified CPU, the
3005 * It is the caller's responsibility to assure that the specified CPU has
3027 cyc_cpu_t *cpu = idp->cyi_cpu; in cyclic_bind() local
3035 if (cpu == NULL) { in cyclic_bind()
3040 c = cpu->cyp_cpu; in cyclic_bind()
3041 flags = cpu->cyp_cyclics[idp->cyi_ndx].cy_flags; in cyclic_bind()
3047 * Reload our cpu (we may have migrated). We don't have to reload in cyclic_bind()
3051 cpu = idp->cyi_cpu; in cyclic_bind()
3052 c = cpu->cyp_cpu; in cyclic_bind()
3058 * Now reload the flags field, asserting that if we are CPU bound, in cyclic_bind()
3059 * the CPU was specified (and likewise, if we are partition bound, in cyclic_bind()
3062 cpu = idp->cyi_cpu; in cyclic_bind()
3063 c = cpu->cyp_cpu; in cyclic_bind()
3064 flags = cpu->cyp_cyclics[idp->cyi_ndx].cy_flags; in cyclic_bind()
3079 cyc_cpu_t *cpu; in cyclic_reprogram() local
3097 cpu = CPU->cpu_cyclic; in cyclic_reprogram()
3101 * to the current CPU. Look for it in the list. in cyclic_reprogram()
3105 if (ocpu->cyo_cpu == cpu) in cyclic_reprogram()
3112 * Didn't find it. This means that CPU offline in cyclic_reprogram()
3124 cpu = idp->cyi_cpu; in cyclic_reprogram()
3128 if (cpu->cyp_cpu == CPU) { in cyclic_reprogram()
3132 * remote CPU will cause it to fail. in cyclic_reprogram()
3134 if (!cyclic_reprogram_cyclic(cpu, ndx, expiration, B_TRUE)) { in cyclic_reprogram()
3138 cyclic_reprogram_here(cpu, ndx, expiration); in cyclic_reprogram()
3156 * cyclic_move_here() attempts to shuffle a cyclic onto the current CPU.
3162 * cyclic_add_omni() or one bound to a CPU or partition via cyclic_bind().
3165 * reason the current CPU is unsuitable or the thread migrates between CPUs
3167 * other CPU.
3184 cpu_t *dest = CPU; in cyclic_move_here()
3196 /* Is the destination CPU suitable for a migration target? */ in cyclic_move_here()
3222 * be done before the CPU can be configured. in cyclic_init()
3227 * It's safe to look at the "CPU" pointer without disabling kernel in cyclic_init()
3231 cyclic_configure(CPU); in cyclic_init()
3232 cyclic_online(CPU); in cyclic_init()
3238 * find the already initialized CPU, and initialize every other CPU with the
3266 * specified CPU; all remaining cyclics on the CPU will either be CPU-
3271 * The only argument to cyclic_juggle() is the CPU from which cyclics
3272 * should be juggled. CPU-bound cyclics are never juggled; partition-bound
3273 * cyclics are only juggled if the specified CPU is in the P_NOINTR state
3274 * and there exists a P_ONLINE CPU in the partition. The cyclic subsystem
3281 * be juggled away from the CPU, and zero if one or more cyclics could
3297 cyc_cpu_t *cpu = c->cpu_cyclic; in cyclic_juggle() local
3305 * We'll go through each cyclic on the CPU, attempting to juggle in cyclic_juggle()
3309 if (idp->cyi_cpu != cpu) in cyclic_juggle()
3317 ASSERT(idp->cyi_cpu != cpu); in cyclic_juggle()
3328 * cyclic_offline() offlines the cyclic subsystem on the specified CPU.
3332 * The only argument to cyclic_offline() is a CPU to offline.
3334 * CPU.
3338 * cyclic_offline() returns 1 if all cyclics on the CPU were juggled away
3339 * and the cyclic subsystem on the CPU was successfully offlines.
3341 * offline operation. All remaining cyclics on the CPU will either be
3342 * CPU- or partition-bound.
3351 * offline the CPU immediately after cyclic_offline() returns success (i.e.
3353 * fail the CPU offline operation if cyclic_offline() returns failure.
3358 cyc_cpu_t *cpu = c->cpu_cyclic; in cyclic_offline() local
3361 CYC_PTRACE1("offline", cpu); in cyclic_offline()
3368 * This CPU is headed offline; we need to now stop omnipresent in cyclic_offline()
3369 * cyclic firing on this CPU. in cyclic_offline()
3376 * We cannot possibly be offlining the last CPU; cyi_omni_list in cyclic_offline()
3380 cyclic_omni_stop(idp, cpu); in cyclic_offline()
3383 ASSERT(cpu->cyp_state == CYS_ONLINE); in cyclic_offline()
3384 cpu->cyp_state = CYS_OFFLINE; in cyclic_offline()
3394 * cyclic_online() onlines a CPU previously offlined with cyclic_offline().
3398 * cyclic_online()'s only argument is a CPU to online. The specified
3399 * CPU must have been previously offlined with cyclic_offline(). After
3400 * cyclic_online() returns, the specified CPU will be eligible to execute
3415 cyc_cpu_t *cpu = c->cpu_cyclic; in cyclic_online() local
3418 CYC_PTRACE1("online", cpu); in cyclic_online()
3421 ASSERT(cpu->cyp_state == CYS_OFFLINE); in cyclic_online()
3423 cpu->cyp_state = CYS_ONLINE; in cyclic_online()
3426 * Now that this CPU is open for business, we need to start firing in cyclic_online()
3433 cyclic_omni_start(idp, cpu); in cyclic_online()
3442 * cyclic_move_in() is called by the CPU partition code immediately after
3443 * the specified CPU has moved into a new partition.
3447 * The only argument to cyclic_move_in() is a CPU which has moved into a
3448 * new partition. If the specified CPU is P_ONLINE, and every other
3449 * CPU in the specified CPU's new partition is P_NOINTR, cyclic_move_in()
3450 * will juggle all partition-bound, CPU-unbound cyclics to the specified
3451 * CPU.
3459 * cyclic_move_in() should _only_ be called immediately after a CPU has
3477 * we find one, check to see if it is currently on a CPU which has in cyclic_move_in()
3478 * interrupts disabled. If it is (and if this CPU currently has in cyclic_move_in()
3487 cyc_cpu_t *cpu = idp->cyi_cpu; in cyclic_move_in() local
3493 if (cpu == NULL) in cyclic_move_in()
3496 c = cpu->cyp_cpu; in cyclic_move_in()
3501 cyclic = &cpu->cyp_cyclics[idp->cyi_ndx]; in cyclic_move_in()
3508 * (otherwise, it would not be on a CPU with interrupts in cyclic_move_in()
3509 * disabled); juggle it to our CPU. in cyclic_move_in()
3523 * cyclic_move_out() is called by the CPU partition code immediately before
3524 * the specified CPU is to move out of its partition.
3528 * The only argument to cyclic_move_out() is a CPU which is to move out of
3532 * cyclics. If the specified CPU is the last CPU in a partition with
3534 * a partition-bound cyclic which is CPU-bound to the specified CPU,
3538 * partition-bound cyclics; CPU-bound cyclics which are not partition-bound
3540 * affiliation of the CPU.
3544 * cyclic_move_out() returns 1 if all partition-bound cyclics on the CPU
3549 * cyclic_move_out() should _only_ be called immediately before a CPU has
3552 * of the specified CPU immediately after cyclic_move_out() returns
3554 * the caller will fail the CPU repartitioning operation if cyclic_move_out()
3562 cyc_cpu_t *cpu = c->cpu_cyclic, *dest; in cyclic_move_out() local
3563 cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics; in cyclic_move_out()
3566 CYC_PTRACE1("move-out", cpu); in cyclic_move_out()
3570 * If there are any CYF_PART_BOUND cyclics on this CPU, we need in cyclic_move_out()
3575 if (idp->cyi_cpu != cpu) in cyclic_move_out()
3591 CYC_PTRACE("move-out-fail", cpu, idp); in cyclic_move_out()
3597 CYC_PTRACE1("move-out-done", cpu); in cyclic_move_out()
3613 * cyclic_suspend() takes no arguments. Each CPU with an active cyclic
3641 * The cyclic subsystem must be configured on every valid CPU;
3653 cyc_cpu_t *cpu; in cyclic_suspend() local
3662 cpu = c->cpu_cyclic; in cyclic_suspend()
3663 be = cpu->cyp_backend; in cyclic_suspend()
3664 arg.cyx_cpu = cpu; in cyclic_suspend()
3679 * cyclic_resume() takes no arguments. Each CPU with an active cyclic
3694 * The cyclic subsystem must be configured on every valid CPU;
3705 cyc_cpu_t *cpu; in cyclic_resume() local
3715 cpu = c->cpu_cyclic; in cyclic_resume()
3716 be = cpu->cyp_backend; in cyclic_resume()
3717 arg.cyx_cpu = cpu; in cyclic_resume()