Lines Matching defs:lpl
143 * statically allocated 2-element lpl list lpl_bootstrap_list and later cloned
145 * for all lpl operations until cp_default is fully constructed.
148 * consumer who needs default lpl should use lpl_bootstrap which is a pointer to
152 * lgrp will use lpl_bootstrap as a default lpl. This is necessary because
163 * If cp still references the bootstrap lpl, it has not yet been added to
237 * lpl topology
252 * defines for lpl topology verifier return codes
332 * Setup initial lpl list for CPU0 and initial t0 home.
333 * The only lpl space we have so far is lpl_bootstrap. It is used for
577 * links, and give it a bootstrap lpl so that it can
816 * For multi-lgroup systems, need to setup lpl for CPU0 or CPU0 will
817 * end up in lpl for lgroup 0 whether it is supposed to be in there or
818 * not since none of lgroup IDs in the lpl's have been set yet.
1530 * thread to be re-homed while we're poking around with its lpl, and the lpl
1542 lpl_t *lpl;
1546 lpl = curthread->t_lpl;
1547 ASSERT(lpl != NULL);
1548 ASSERT(lpl->lpl_lgrpid >= 0 && lpl->lpl_lgrpid <= lgrp_alloc_max);
1549 ASSERT(LGRP_EXISTS(lgrp_table[lpl->lpl_lgrpid]));
1550 lgrp = lgrp_table[lpl->lpl_lgrpid];
1566 lpl_t *lpl;
1578 lpl = t->t_lpl;
1579 ASSERT(lpl != NULL);
1580 ASSERT(lpl->lpl_lgrpid >= 0 && lpl->lpl_lgrpid <= lgrp_alloc_max);
1581 lgrp = lpl->lpl_lgrpid;
1634 * "cpu", and it's lpl from going away across a call to this function.
1840 * each lpl is traversed sequentially, but in a different order. We hope this
1868 * slot into which the new lpl will be inserted. This effectively
1954 * Check to see if the resource set of the target lpl contains the
1955 * supplied leaf lpl. This returns 1 if the lpl is found, 0 if it is not.
1972 * Called when we change cpu lpl membership. This increments or decrements the
1973 * per-cpu counter in every lpl in which our leaf appears.
1995 * Don't adjust if the lgrp isn't there, if we're the leaf lpl
2019 * Initialize lpl with given resources and specified lgrp
2022 lpl_init(lpl_t *lpl, lpl_t *lpl_leaf, lgrp_t *lgrp)
2024 lpl->lpl_lgrpid = lgrp->lgrp_id;
2025 lpl->lpl_loadavg = 0;
2026 if (lpl == lpl_leaf)
2027 lpl->lpl_ncpu = 1;
2029 lpl->lpl_ncpu = lpl_leaf->lpl_ncpu;
2030 lpl->lpl_nrset = 1;
2031 lpl->lpl_rset[0] = lpl_leaf;
2032 lpl->lpl_id2rset[lpl_leaf->lpl_lgrpid] = 0;
2033 lpl->lpl_lgrp = lgrp;
2034 lpl->lpl_parent = NULL; /* set by lpl_leaf_insert() */
2035 lpl->lpl_cpus = NULL; /* set by lgrp_part_add_cpu() */
2039 * Clear an unused lpl
2042 lpl_clear(lpl_t *lpl)
2045 * Clear out all fields in the lpl except:
2049 * Note that the lpl's rset and id2rset mapping are cleared as well.
2051 lpl->lpl_loadavg = 0;
2052 lpl->lpl_ncpu = 0;
2053 lpl->lpl_lgrp = NULL;
2054 lpl->lpl_parent = NULL;
2055 lpl->lpl_cpus = NULL;
2056 lpl->lpl_nrset = 0;
2057 lpl->lpl_homed_time = 0;
2058 bzero(lpl->lpl_rset, sizeof (lpl->lpl_rset[0]) * lpl->lpl_rset_sz);
2059 bzero(lpl->lpl_id2rset,
2060 sizeof (lpl->lpl_id2rset[0]) * lpl->lpl_rset_sz);
2064 * Given a CPU-partition, verify that the lpl topology in the CPU-partition
2065 * is in sync with the lgroup toplogy in the system. The lpl topology may not
2077 lpl_t *lpl;
2094 lpl = NULL;
2100 lpl = &cpupart->cp_lgrploads[i];
2104 /* if lgroup doesn't exist, make sure lpl is empty */
2106 ASSERT(lpl->lpl_ncpu == 0);
2107 if (lpl->lpl_ncpu > 0) {
2114 /* verify that lgroup and lpl are identically numbered */
2115 ASSERT(lgrp->lgrp_id == lpl->lpl_lgrpid);
2117 /* if lgroup isn't in our partition, make sure lpl is empty */
2120 ASSERT(lpl->lpl_ncpu == 0);
2121 if (lpl->lpl_ncpu > 0) {
2125 * lpl is empty, and lgroup isn't in partition. verify
2126 * that lpl doesn't show up in anyone else's rsets (in
2130 lpl_t *i_lpl; /* lpl we're iterating over */
2134 ASSERT(!lpl_rset_contains(i_lpl, lpl));
2135 if (lpl_rset_contains(i_lpl, lpl)) {
2144 /* lgroup is in this partition, now check it against lpl */
2147 ASSERT(lgrp == lpl->lpl_lgrp);
2148 if (lgrp != lpl->lpl_lgrp) {
2154 ASSERT(lpl->lpl_parent);
2156 lpl->lpl_parent->lpl_lgrpid);
2158 if (!lpl->lpl_parent) {
2161 lpl->lpl_parent->lpl_lgrpid) {
2167 if ((lpl->lpl_nrset == 1) && (lpl == lpl->lpl_rset[0])) {
2172 lpl->lpl_lgrpid)));
2176 lpl->lpl_lgrpid))) {
2180 ASSERT((lgrp->lgrp_cpucnt >= lpl->lpl_ncpu) &&
2181 (lpl->lpl_ncpu > 0));
2182 if ((lgrp->lgrp_cpucnt < lpl->lpl_ncpu) ||
2183 (lpl->lpl_ncpu <= 0)) {
2189 * cpus in the lpl's linked list. This only exists in
2193 cpu = cp_start = lpl->lpl_cpus;
2197 /* check to make sure cpu's lpl is leaf lpl */
2198 ASSERT(cpu->cpu_lpl == lpl);
2199 if (cpu->cpu_lpl != lpl) {
2211 ASSERT(j == lpl->lpl_ncpu);
2212 if (j != lpl->lpl_ncpu) {
2217 * Also, check that leaf lpl is contained in all
2242 lpl));
2244 if (!lpl_rset_contains(lpl_cand, lpl)) {
2256 ASSERT(lpl->lpl_cpus == NULL);
2257 if (lpl->lpl_cpus != NULL) {
2265 for (j = sum = 0; j < lpl->lpl_nrset; j++) {
2266 sum += lpl->lpl_rset[j]->lpl_ncpu;
2269 ASSERT(sum == lpl->lpl_ncpu);
2270 if (sum != lpl->lpl_ncpu) {
2276 * Check the rset of the lpl in question. Make sure that each
2284 for (j = 0; j < lpl->lpl_nrset; j++) {
2285 klgrpset_add(rset, lpl->lpl_rset[j]->lpl_lgrpid);
2288 /* make sure lpl rset matches lgrp rset */
2300 * contained in the lpl
2302 for (j = 0; j < lpl->lpl_nrset; j++) {
2303 if (lpl->lpl_rset[j] == NULL)
2307 ASSERT(j == lpl->lpl_nrset);
2308 if (j != lpl->lpl_nrset) {
2317 * Flatten lpl topology to given number of levels. This is presently only
2319 * and home the leaf lpls to the root lpl.
2366 * this guy back to the root lpl.
2375 * Now that we're done, make sure the count on the root lpl is
2392 * Insert a lpl into the resource hierarchy and create any additional lpls that
2398 * those for which the lpl is a leaf as opposed to simply a named equally local
2400 * new intermediate lpl is introduced. Since the main loop only traverses
2459 /* does new lpl need to be populated with other resources? */
2472 lpl_t *lpl_cand; /* candidate lpl */
2485 * This lpl's rset has changed. Update the hint in it's
2493 * remove a lpl from the hierarchy of resources, clearing its state when
2548 * Update this lpl's children
2559 * The lpl (cpu partition load average information) is now arranged in a
2564 * 1. A lpl structure that contains resources already in the hierarchy tree.
2565 * In this case, all of the associated lpl relationships have been defined, and
2566 * all that is necessary is that we link the new cpu into the per-lpl list of
2571 * 2. The lpl to contain the resources in this cpu-partition for this lgrp does
2572 * not exist yet. In this case, it is necessary to build the leaf lpl, and
2576 * also calls lpl_leaf_insert() which inserts the named lpl into the hierarchy
2606 * the lpl should already exist in the parent, so just update
2612 /* link cpu into list of cpus in lpl */
2622 * lpl, so assert that ncpu == 1 for the case where we don't
2635 * The lpl (cpu partition load average information) is now arranged in a
2644 * from the per-cpu lpl list.
2646 * 2. Removal of the resource results in the lpl containing no resources. (It's
2648 * place; however, additionally we must remove the lpl structure itself, prune
2651 * the lpl that has been delted. Cpu-partition changes are handled by this
2653 * out the empty lpl and any of its orphaned direct ancestors.
2658 lpl_t *lpl;
2666 lpl = leaf_lpl = cp->cpu_lpl;
2673 ASSERT(lpl->lpl_ncpu);
2674 if (--lpl->lpl_ncpu == 0) {
2679 klgrpset_del(cp->cpu_part->cp_lgrpset, lpl->lpl_lgrpid);
2681 /* eliminate remaning lpl link pointers in cpu, lpl */
2682 lpl->lpl_cpus = cp->cpu_next_lpl = cp->cpu_prev_lpl = NULL;
2687 /* unlink cpu from lists of cpus in lpl */
2690 if (lpl->lpl_cpus == cp) {
2691 lpl->lpl_cpus = cp->cpu_next_lpl;
2701 /* clear cpu's lpl ptr when we're all done */
2717 lgrp_loadavg(lpl_t *lpl, uint_t nrcpus, int ageflag)
2738 if ((lpl == NULL) || /* we're booting - this is easiest for now */
2739 ((ncpu = lpl->lpl_ncpu) == 0)) {
2763 old = new = lpl->lpl_loadavg;
2776 } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
2787 old = new = lpl->lpl_loadavg;
2795 } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
2800 * Do the same for this lpl's parent
2802 if ((lpl = lpl->lpl_parent) == NULL)
2804 ncpu = lpl->lpl_ncpu;
2809 * Initialize lpl topology in the target based on topology currently present in
2815 * and all subsequent lpl operations should use it instead of lpl_bootstrap. The
2819 * This function walks the lpl topology in lpl_bootstrap and does for things:
2823 * 2) Sets CPU0 lpl pointer to the correct element of the target list.
2839 lpl_t *lpl = lpl_bootstrap;
2849 * The only target that should be passed here is cp_default lpl list.
2857 for (i = 0; i < howmany; i++, lpl++, target_lpl++) {
2859 * Copy all fields from lpl, except for the rset,
2867 *target_lpl = *lpl;
2874 * Substitute CPU0 lpl pointer with one relative to target.
2876 if (lpl->lpl_cpus == CPU) {
2877 ASSERT(CPU->cpu_lpl == lpl);
2884 if (lpl->lpl_parent != NULL)
2886 (((uintptr_t)lpl->lpl_parent -
2894 ASSERT(lpl->lpl_nrset <= 1);
2896 for (id = 0; id < lpl->lpl_nrset; id++) {
2897 if (lpl->lpl_rset[id] != NULL) {
2899 (((uintptr_t)lpl->lpl_rset[id] -
2904 lpl->lpl_id2rset[id];
2910 * actual lpl array in the default cpu partition.
2912 * We still need to keep one empty lpl around for newly starting
2914 * dispatcher prior to their lgrp/lpl initialization.
2916 * The lpl related dispatcher code has been designed to work properly
2918 * bootstrap lpl. Such an lpl appears to the dispatcher as an lpl
3017 lpl_t *lpl, *bestlpl, *bestrlpl;
3072 lpl = lgrp_affinity_best(t, cpupart, lgrpid_start, B_FALSE);
3073 if (lpl != NULL)
3074 return (lpl);
3110 lpl = &cpupart->cp_lgrploads[lgrpid];
3119 lpl_pick(lpl, bestlpl)) {
3120 bestload = lpl->lpl_loadavg;
3121 bestlpl = lpl;
3131 lpl_pick(lpl, bestrlpl)) {
3132 bestrload = lpl->lpl_loadavg;
3133 bestrlpl = lpl;
3261 lpl_t *lpl, *oldlpl;
3343 lpl = oldlpl;
3346 old = new = lpl->lpl_loadavg;
3358 (lgrp_load_t *)&lpl->lpl_loadavg, old,
3361 lpl = lpl->lpl_parent;
3362 if (lpl == NULL)
3365 ncpu = lpl->lpl_ncpu;
3413 lpl = newlpl;
3415 ncpu = lpl->lpl_ncpu;
3418 old = new = lpl->lpl_loadavg;
3426 } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
3429 lpl = lpl->lpl_parent;
3430 if (lpl == NULL)