Lines Matching +full:top +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0-only
15 #include "tick-internal.h"
22 * lowest level group contains CPUs, the next level groups of CPU groups
25 * CPUs per node even the next level might be kept as groups of CPU groups
34 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5
37 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47
80 * duties up to the top level of the hierarchy (LVL2 in the example). It
86 * ---------------
96 * --------------
102 * the per CPU tmigr_cpu->lock is held.
107 * When @timer_base->lock as well as tmigr related locks are required, the lock
108 * ordering is: first @timer_base->lock, afterwards tmigr related locks.
112 * ------------------------------------------------
144 * --> migrator = TMIGR_NONE migrator = CPU2
145 * --> active = active = CPU2
148 * --> idle idle active idle
161 * --> migrator = CPU1 migrator = CPU2
162 * --> active = CPU1 active = CPU2
165 * idle --> active active idle
173 * --> migrator = GRP0:1
174 * --> active = GRP0:0, GRP0:1
186 * --> migrator = GRP0:1
187 * --> active = GRP0:1
202 * expected value (compare-and-exchange).
210 * ----------------------------------------------------------
241 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE
242 * active = --> active =
246 * idle idle --> idle idle
249 * child going idle in top level group, the expiry of the next group event
254 * --> migrator = TMIGR_NONE
255 * --> active =
264 * idle idle --> idle idle
277 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX
286 * top level group.
291 * --> next_expiry = TIMER0
305 * -------------------------- ---------------------------
307 * cmpxchg(&GRP1:0->state);
309 * spin_lock(&GRP1:0->lock);
312 * spin_unlock(&GRP1:0->lock);
316 * spin_lock(&GRP1:0->lock)
318 * group_state = atomic_read(&GRP1:0->state)
321 * spin_unlock(&GRP1:0->lock) <3>
332 * -----------------------------------------------------------
347 * --> timerqueue = evt-GRP0:0
354 * timerqueue = evt-CPU0, timerqueue =
355 * evt-CPU1
369 * --> timerqueue =
375 * --> groupevt.cpu = CPU0 groupevt.cpu =
376 * timerqueue = evt-CPU0, timerqueue =
377 * evt-CPU1
397 * --> timerqueue = evt-GRP0:0
403 * --> groupevt.cpu = CPU1 groupevt.cpu =
404 * --> timerqueue = evt-CPU1 timerqueue =
430 return !(tmc->tmgroup && tmc->online); in tmigr_is_not_available()
435 * group is not active - so no migrator is set.
441 s.state = atomic_read(&group->migr_state); in tmigr_check_migrator()
455 s.state = atomic_read(&group->migr_state); in tmigr_check_migrator_and_lonely()
471 s.state = atomic_read(&group->migr_state); in tmigr_check_lonely()
479 * struct tmigr_walk - data required for walking the hierarchy
489 * in top level group only. Be aware, there could occur a
490 * new top level of the hierarchy between the 'top level
494 * final top level. This is not a problem, as the worst
508 * idle, only the first event of the top level has to be
528 struct tmigr_group *child = NULL, *group = tmc->tmgroup; in __walk_groups()
531 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels); in __walk_groups()
541 group = READ_ONCE(group->parent); in __walk_groups()
542 data->childmask = child->groupmask; in __walk_groups()
543 WARN_ON_ONCE(!data->childmask); in __walk_groups()
549 lockdep_assert_held(&tmc->lock); in walk_groups()
555 * Returns the next event of the timerqueue @group->events
565 lockdep_assert_held(&group->lock); in tmigr_next_groupevt()
567 WRITE_ONCE(group->next_expiry, KTIME_MAX); in tmigr_next_groupevt()
569 while ((node = timerqueue_getnext(&group->events))) { in tmigr_next_groupevt()
572 if (!READ_ONCE(evt->ignore)) { in tmigr_next_groupevt()
573 WRITE_ONCE(group->next_expiry, evt->nextevt.expires); in tmigr_next_groupevt()
581 if (!timerqueue_del(&group->events, node)) in tmigr_next_groupevt()
598 if (!evt || now < evt->nextevt.expires) in tmigr_next_expired_groupevt()
604 timerqueue_del(&group->events, &evt->nextevt); in tmigr_next_expired_groupevt()
619 return evt->nextevt.expires; in tmigr_next_groupevt_expires()
630 childmask = data->childmask; in tmigr_active_up()
636 curstate.state = atomic_read(&group->migr_state); in tmigr_active_up()
652 } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)); in tmigr_active_up()
668 WRITE_ONCE(group->groupevt.ignore, true); in tmigr_active_up()
677 data.childmask = tmc->groupmask; in __tmigr_cpu_activate()
681 tmc->cpuevt.ignore = true; in __tmigr_cpu_activate()
682 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in __tmigr_cpu_activate()
688 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy
699 if (WARN_ON_ONCE(!tmc->idle)) in tmigr_cpu_activate()
702 raw_spin_lock(&tmc->lock); in tmigr_cpu_activate()
703 tmc->idle = false; in tmigr_cpu_activate()
705 raw_spin_unlock(&tmc->lock); in tmigr_cpu_activate()
709 * Returns true, if there is nothing to be propagated to the next level
711 * @data->firstexp is set to expiry of first gobal event of the (top level of
717 * the documentation at the top.
727 bool remote = data->remote; in tmigr_update_events()
733 raw_spin_lock(&child->lock); in tmigr_update_events()
734 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); in tmigr_update_events()
736 childstate.state = atomic_read(&child->migr_state); in tmigr_update_events()
737 groupstate.state = atomic_read(&group->migr_state); in tmigr_update_events()
745 nextexp = child->next_expiry; in tmigr_update_events()
746 evt = &child->groupevt; in tmigr_update_events()
755 WRITE_ONCE(evt->ignore, ignore); in tmigr_update_events()
757 nextexp = data->nextexp; in tmigr_update_events()
759 first_childevt = evt = data->evt; in tmigr_update_events()
760 ignore = evt->ignore; in tmigr_update_events()
767 * expiry" in the documentation at the top). in tmigr_update_events()
772 * - When entering this path by tmigr_new_timer(), @evt->ignore in tmigr_update_events()
774 * - tmigr_inactive_up() takes care of the propagation by in tmigr_update_events()
777 * locking at this level, because the upper walking call to in tmigr_update_events()
782 * single level so @group is the top level group, make sure the in tmigr_update_events()
786 if (ignore && !remote && group->parent) in tmigr_update_events()
789 raw_spin_lock(&group->lock); in tmigr_update_events()
792 groupstate.state = atomic_read(&group->migr_state); in tmigr_update_events()
799 if (timerqueue_node_queued(&evt->nextevt)) { in tmigr_update_events()
800 if ((evt->nextevt.expires == nextexp) && !ignore) { in tmigr_update_events()
802 evt->cpu = first_childevt->cpu; in tmigr_update_events()
806 if (!timerqueue_del(&group->events, &evt->nextevt)) in tmigr_update_events()
807 WRITE_ONCE(group->next_expiry, KTIME_MAX); in tmigr_update_events()
821 * of the group needs to be propagated to a higher level to in tmigr_update_events()
827 evt->nextevt.expires = nextexp; in tmigr_update_events()
828 evt->cpu = first_childevt->cpu; in tmigr_update_events()
830 if (timerqueue_add(&group->events, &evt->nextevt)) in tmigr_update_events()
831 WRITE_ONCE(group->next_expiry, nextexp); in tmigr_update_events()
835 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) { in tmigr_update_events()
840 * handling. First timer in top level group which needs to be in tmigr_update_events()
841 * handled when top level group is not active, is calculated in tmigr_update_events()
848 * The top level group is idle and it has to be ensured the in tmigr_update_events()
854 data->firstexp = tmigr_next_groupevt_expires(group); in tmigr_update_events()
861 raw_spin_unlock(&group->lock); in tmigr_update_events()
864 raw_spin_unlock(&child->lock); in tmigr_update_events()
885 .evt = &tmc->cpuevt }; in tmigr_new_timer()
887 lockdep_assert_held(&tmc->lock); in tmigr_new_timer()
889 if (tmc->remote) in tmigr_new_timer()
894 tmc->cpuevt.ignore = false; in tmigr_new_timer()
912 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
929 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || in tmigr_handle_remote_cpu()
930 now < tmc->cpuevt.nextevt.expires) { in tmigr_handle_remote_cpu()
931 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
937 tmc->remote = true; in tmigr_handle_remote_cpu()
938 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_handle_remote_cpu()
941 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
947 * Lock ordering needs to be preserved - timer_base locks before tmigr in tmigr_handle_remote_cpu()
949 * the top). During fetching the next timer interrupt, also tmc->lock in tmigr_handle_remote_cpu()
963 raw_spin_lock(&tmc->lock); in tmigr_handle_remote_cpu()
974 * remote expiry" in the documentation at the top) in tmigr_handle_remote_cpu()
976 if (!tmc->online || !tmc->idle) { in tmigr_handle_remote_cpu()
987 data.evt = &tmc->cpuevt; in tmigr_handle_remote_cpu()
993 * after a remote expiry" in the documentation at the top) in tmigr_handle_remote_cpu()
998 tmc->remote = false; in tmigr_handle_remote_cpu()
999 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote_cpu()
1011 jif = data->basej; in tmigr_handle_remote_up()
1012 now = data->now; in tmigr_handle_remote_up()
1014 childmask = data->childmask; in tmigr_handle_remote_up()
1026 raw_spin_lock_irq(&group->lock); in tmigr_handle_remote_up()
1031 unsigned int remote_cpu = evt->cpu; in tmigr_handle_remote_up()
1033 raw_spin_unlock_irq(&group->lock); in tmigr_handle_remote_up()
1043 * (group->next_expiry was updated by tmigr_next_expired_groupevt(), in tmigr_handle_remote_up()
1046 data->firstexp = group->next_expiry; in tmigr_handle_remote_up()
1048 raw_spin_unlock_irq(&group->lock); in tmigr_handle_remote_up()
1054 * tmigr_handle_remote() - Handle global timers of remote idle CPUs
1066 data.childmask = tmc->groupmask; in tmigr_handle_remote()
1074 if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) { in tmigr_handle_remote()
1080 if (READ_ONCE(tmc->wakeup) == KTIME_MAX) in tmigr_handle_remote()
1087 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to in tmigr_handle_remote()
1088 * KTIME_MAX. Even if tmc->lock is not held during the whole remote in tmigr_handle_remote()
1089 * handling, tmc->wakeup is fine to be stale as it is called in in tmigr_handle_remote()
1096 raw_spin_lock_irq(&tmc->lock); in tmigr_handle_remote()
1097 WRITE_ONCE(tmc->wakeup, data.firstexp); in tmigr_handle_remote()
1098 raw_spin_unlock_irq(&tmc->lock); in tmigr_handle_remote()
1107 childmask = data->childmask; in tmigr_requires_handle_remote_up()
1119 * hierarchy walk is not active, proceed the walk to reach the top level in tmigr_requires_handle_remote_up()
1122 if (group->parent && !data->tmc_active) in tmigr_requires_handle_remote_up()
1132 data->firstexp = READ_ONCE(group->next_expiry); in tmigr_requires_handle_remote_up()
1133 if (data->now >= data->firstexp) { in tmigr_requires_handle_remote_up()
1134 data->check = true; in tmigr_requires_handle_remote_up()
1138 raw_spin_lock(&group->lock); in tmigr_requires_handle_remote_up()
1139 data->firstexp = group->next_expiry; in tmigr_requires_handle_remote_up()
1140 if (data->now >= group->next_expiry) { in tmigr_requires_handle_remote_up()
1141 data->check = true; in tmigr_requires_handle_remote_up()
1142 raw_spin_unlock(&group->lock); in tmigr_requires_handle_remote_up()
1145 raw_spin_unlock(&group->lock); in tmigr_requires_handle_remote_up()
1152 * tmigr_requires_handle_remote() - Check the need of remote timer handling
1167 data.childmask = tmc->groupmask; in tmigr_requires_handle_remote()
1169 data.tmc_active = !tmc->idle; in tmigr_requires_handle_remote()
1176 * Check is done lockless as interrupts are disabled and @tmc->idle is in tmigr_requires_handle_remote()
1179 if (!tmc->idle) { in tmigr_requires_handle_remote()
1186 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock in tmigr_requires_handle_remote()
1192 if (data.now >= READ_ONCE(tmc->wakeup)) in tmigr_requires_handle_remote()
1195 raw_spin_lock(&tmc->lock); in tmigr_requires_handle_remote()
1196 if (data.now >= tmc->wakeup) in tmigr_requires_handle_remote()
1198 raw_spin_unlock(&tmc->lock); in tmigr_requires_handle_remote()
1205 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1210 * and thereby the timer idle path is executed once more. @tmc->wakeup
1225 raw_spin_lock(&tmc->lock); in tmigr_cpu_new_timer()
1227 ret = READ_ONCE(tmc->wakeup); in tmigr_cpu_new_timer()
1229 if (nextexp != tmc->cpuevt.nextevt.expires || in tmigr_cpu_new_timer()
1230 tmc->cpuevt.ignore) { in tmigr_cpu_new_timer()
1236 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_new_timer()
1240 raw_spin_unlock(&tmc->lock); in tmigr_cpu_new_timer()
1252 childmask = data->childmask; in tmigr_inactive_up()
1261 curstate.state = atomic_read_acquire(&group->migr_state); in tmigr_inactive_up()
1265 childstate.state = atomic_read(&child->migr_state); in tmigr_inactive_up()
1299 if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)) { in tmigr_inactive_up()
1313 data->remote = false; in tmigr_inactive_up()
1325 .evt = &tmc->cpuevt, in __tmigr_cpu_deactivate()
1326 .childmask = tmc->groupmask }; in __tmigr_cpu_deactivate()
1334 tmc->cpuevt.ignore = false; in __tmigr_cpu_deactivate()
1341 * tmigr_cpu_deactivate() - Put current CPU into inactive state
1347 * from the hierarchy if this CPU is the top level migrator or the hierarchy is
1358 raw_spin_lock(&tmc->lock); in tmigr_cpu_deactivate()
1362 tmc->idle = true; in tmigr_cpu_deactivate()
1368 WRITE_ONCE(tmc->wakeup, ret); in tmigr_cpu_deactivate()
1371 raw_spin_unlock(&tmc->lock); in tmigr_cpu_deactivate()
1376 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to
1381 * * KTIME_MAX - when it is probable that nothing has to be done (not
1382 * the only one in the level 0 group; and if it is the
1383 * only one in level 0 group, but there are more than a
1384 * single group active on the way to top level)
1385 * * nextevt - when CPU is offline and has to handle timer on its own
1386 * or when on the way to top in every group only a single
1388 * next_expiry encountered while walking up to top level.
1389 * * next_expiry - value of lowest expiry encountered while walking groups
1396 struct tmigr_group *group = tmc->tmgroup; in tmigr_quick_check()
1401 if (WARN_ON_ONCE(tmc->idle)) in tmigr_quick_check()
1404 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask)) in tmigr_quick_check()
1413 * from bottom to the top because the CPU's event is ignored in tmigr_quick_check()
1414 * up to the top and its sibling's events not propagated upwards. in tmigr_quick_check()
1417 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry)); in tmigr_quick_check()
1418 if (!group->parent) in tmigr_quick_check()
1421 group = group->parent; in tmigr_quick_check()
1428 * tmigr_trigger_active() - trigger a CPU to become active again
1438 WARN_ON_ONCE(!tmc->online || tmc->idle); in tmigr_trigger_active()
1449 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_offline()
1450 tmc->online = false; in tmigr_cpu_offline()
1451 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_offline()
1459 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_offline()
1474 if (WARN_ON_ONCE(!tmc->tmgroup)) in tmigr_cpu_online()
1475 return -EINVAL; in tmigr_cpu_online()
1477 raw_spin_lock_irq(&tmc->lock); in tmigr_cpu_online()
1479 tmc->idle = timer_base_is_idle(); in tmigr_cpu_online()
1480 if (!tmc->idle) in tmigr_cpu_online()
1482 tmc->online = true; in tmigr_cpu_online()
1483 raw_spin_unlock_irq(&tmc->lock); in tmigr_cpu_online()
1492 raw_spin_lock_init(&group->lock); in tmigr_init_group()
1494 group->level = lvl; in tmigr_init_group()
1495 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE; in tmigr_init_group()
1497 group->num_children = 0; in tmigr_init_group()
1502 atomic_set(&group->migr_state, s.state); in tmigr_init_group()
1505 * If this is a new top-level, prepare its groupmask in advance. in tmigr_init_group()
1506 * This avoids accidents where yet another new top-level is in tmigr_init_group()
1510 group->groupmask = BIT(0); in tmigr_init_group()
1512 * The previous top level has prepared its groupmask already, in tmigr_init_group()
1516 group->num_children = 1; in tmigr_init_group()
1519 timerqueue_init_head(&group->events); in tmigr_init_group()
1520 timerqueue_init(&group->groupevt.nextevt); in tmigr_init_group()
1521 group->groupevt.nextevt.expires = KTIME_MAX; in tmigr_init_group()
1522 WRITE_ONCE(group->next_expiry, KTIME_MAX); in tmigr_init_group()
1523 group->groupevt.ignore = true; in tmigr_init_group()
1536 * If @lvl is below the cross NUMA node level, check whether in tmigr_get_group()
1539 if (lvl < tmigr_crossnode_level && tmp->numa_node != node) in tmigr_get_group()
1543 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP) in tmigr_get_group()
1548 * siblings end up in the same group of the lowest level of the in tmigr_get_group()
1563 return ERR_PTR(-ENOMEM); in tmigr_get_group()
1568 list_add(&group->list, &tmigr_level_list[lvl]); in tmigr_get_group()
1579 raw_spin_lock_irq(&child->lock); in tmigr_connect_child_parent()
1580 raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); in tmigr_connect_child_parent()
1584 * @child is the old top and @parent the new one. In this in tmigr_connect_child_parent()
1585 * case groupmask is pre-initialized and @child already in tmigr_connect_child_parent()
1589 WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2); in tmigr_connect_child_parent()
1592 child->groupmask = BIT(parent->num_children++); in tmigr_connect_child_parent()
1600 smp_store_release(&child->parent, parent); in tmigr_connect_child_parent()
1602 raw_spin_unlock(&parent->lock); in tmigr_connect_child_parent()
1603 raw_spin_unlock_irq(&child->lock); in tmigr_connect_child_parent()
1616 * the lowest level (and not higher then one level below the current in tmigr_connect_child_parent()
1617 * top level), then they are not active. They will be set active when in tmigr_connect_child_parent()
1620 * * But if a new group above the current top level is required, it is in tmigr_connect_child_parent()
1623 * executed with the formerly top level group (child) and the newly in tmigr_connect_child_parent()
1629 * the CPU executing the setup will be responsible up to current top in tmigr_connect_child_parent()
1630 * level group. And the next time it goes inactive, it will release in tmigr_connect_child_parent()
1634 data.childmask = child->groupmask; in tmigr_connect_child_parent()
1637 * There is only one new level per time (which is protected by in tmigr_connect_child_parent()
1640 * uppermost level. Otherwise there went something wrong! in tmigr_connect_child_parent()
1642 WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); in tmigr_connect_child_parent()
1648 int top = 0, err = 0, i = 0; in tmigr_setup_groups() local
1653 return -ENOMEM; in tmigr_setup_groups()
1662 top = i; in tmigr_setup_groups()
1669 * The loop is aborted as soon as the highest level, which might in tmigr_setup_groups()
1673 if (group->parent || list_is_singular(&tmigr_level_list[i - 1])) in tmigr_setup_groups()
1679 WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top])); in tmigr_setup_groups()
1682 group = stack[--i]; in tmigr_setup_groups()
1685 list_del(&group->list); in tmigr_setup_groups()
1690 WARN_ON_ONCE(i != group->level); in tmigr_setup_groups()
1693 * Update tmc -> group / child -> group connection in tmigr_setup_groups()
1698 raw_spin_lock_irq(&group->lock); in tmigr_setup_groups()
1700 tmc->tmgroup = group; in tmigr_setup_groups()
1701 tmc->groupmask = BIT(group->num_children++); in tmigr_setup_groups()
1703 raw_spin_unlock_irq(&group->lock); in tmigr_setup_groups()
1710 child = stack[i - 1]; in tmigr_setup_groups()
1715 /* check if uppermost level was newly created */ in tmigr_setup_groups()
1716 if (top != i) in tmigr_setup_groups()
1719 WARN_ON_ONCE(top == 0); in tmigr_setup_groups()
1721 lvllist = &tmigr_level_list[top]; in tmigr_setup_groups()
1724 * Newly created root level should have accounted the upcoming in tmigr_setup_groups()
1725 * CPU's child group and pre-accounted the old root. in tmigr_setup_groups()
1727 if (group->num_children == 2 && list_is_singular(lvllist)) { in tmigr_setup_groups()
1731 * it may spuriously activate the old top level group inside in tmigr_setup_groups()
1732 * the new one (nevertheless whether old top level group is in tmigr_setup_groups()
1737 lvllist = &tmigr_level_list[top - 1]; in tmigr_setup_groups()
1739 if (child->parent) in tmigr_setup_groups()
1770 if (tmc->tmgroup) in tmigr_cpu_prepare()
1773 raw_spin_lock_init(&tmc->lock); in tmigr_cpu_prepare()
1774 timerqueue_init(&tmc->cpuevt.nextevt); in tmigr_cpu_prepare()
1775 tmc->cpuevt.nextevt.expires = KTIME_MAX; in tmigr_cpu_prepare()
1776 tmc->cpuevt.ignore = true; in tmigr_cpu_prepare()
1777 tmc->cpuevt.cpu = cpu; in tmigr_cpu_prepare()
1778 tmc->remote = false; in tmigr_cpu_prepare()
1779 WRITE_ONCE(tmc->wakeup, KTIME_MAX); in tmigr_cpu_prepare()
1785 if (tmc->groupmask == 0) in tmigr_cpu_prepare()
1786 return -EINVAL; in tmigr_cpu_prepare()
1796 int ret = -ENOMEM; in tmigr_init()
1827 * If a NUMA node spawns more than one CPU level group then the next in tmigr_init()
1828 * level(s) of the hierarchy contains groups which handle all CPU groups in tmigr_init()
1829 * of the same NUMA node. The level above goes across NUMA nodes. Store in tmigr_init()
1830 * this information for the setup code to decide in which level node in tmigr_init()
1843 " %d crossnode level\n", in tmigr_init()