Lines Matching refs:rcu_node

102 /* Dump rcu_node combining tree at boot to verify correct setup. */
110 /* Control rcu_node-tree auto-balancing at boot time. */
150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
152 static struct task_struct *rcu_boost_task(struct rcu_node *rnp);
156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
160 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
161 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
229 * permit this function to be invoked without holding the root rcu_node
547 static struct rcu_node *rcu_get_root(void)
669 // handler and that the rcu_node lock is an irq-disabled lock
764 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
768 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
819 struct rcu_node *rnp = rdp->mynode;
841 * last task on a leaf rcu_node structure exiting its RCU read-side
846 * The rcu_node structure's ->lock is held here, which excludes
854 struct rcu_node *rnp1;
952 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
968 * rcu_node structure's ->gp_seq_needed field. Returns true if there
971 * The caller must hold the specified rcu_node structure's ->lock, which
976 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
980 struct rcu_node *rnp;
983 * Use funnel locking to either acquire the root rcu_node
988 * end of the grace period will scan the leaf rcu_node structures.
1051 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1129 * rcu_node structure. This function is idempotent, so it does not hurt
1135 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1176 * rcu_node structure's ->lock be held. It consults the cached value
1179 * while holding the leaf rcu_node structure's ->lock.
1181 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1211 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1234 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1240 // The grace period cannot end while we hold the rcu_node lock.
1261 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1265 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1318 struct rcu_node *rnp;
1406 struct rcu_node *rnp = rcu_get_root();
1422 struct rcu_node *rnp = rcu_get_root();
1440 // where caller does not hold the root rcu_node structure's lock.
1444 struct rcu_node *rnp = rcu_get_root();
1457 // caller does not hold the root rcu_node structure's lock.
1461 struct rcu_node *rnp = rcu_get_root();
1802 struct rcu_node *rnp = rcu_get_root();
1848 * the rcu_node tree. Note that this new grace period need not
1863 /* Nothing to do on this leaf rcu_node structure. */
1876 if (!oldmask) { /* First online CPU for rcu_node. */
1888 * done, and if all this rcu_node structure's CPUs are
1889 * still offline, propagate up the rcu_node tree and
1891 * rcu_node structure's CPUs has since come back online,
1908 * Set the quiescent-state-needed bits in all the rcu_node
1910 * order, starting from the root rcu_node structure, relying on the
1957 struct rcu_node *rnp = rcu_get_root();
1981 struct rcu_node *rnp = rcu_get_root();
2020 struct rcu_node *rnp = rcu_get_root();
2051 * Exit the loop if the root rcu_node structure indicates that the grace period
2053 * is required only for single-node rcu_node trees because readers blocking
2054 * the current grace period are queued only on leaf rcu_node structures.
2108 struct rcu_node *rnp = rcu_get_root();
2124 * period as completed in all of the rcu_node structures.
2130 * Propagate new ->gp_seq value to rcu_node structures so that
2135 * the rcu_node structures before the beginning of the next grace
2136 * period is recorded in any of the rcu_node structures.
2278 * to the specified rcu_node structure, though all the CPUs in the group
2279 * must be represented by the same rcu_node structure (which need not be a
2280 * leaf rcu_node structure, though it often will be). The gps parameter
2289 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2294 struct rcu_node *rnp_c;
2298 /* Walk up the rcu_node hierarchy. */
2348 * on the specified rcu_node structure and that were blocking the current
2354 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2359 struct rcu_node *rnp_p;
2373 * Only one rcu_node structure in the tree, so don't
2397 struct rcu_node *rnp;
2678 * Scan the leaf rcu_node structures. For each structure on which all
2688 struct rcu_node *rnp;
2747 struct rcu_node *rnp;
2748 struct rcu_node *rnp_old = NULL;
2765 /* Reached the root of the rcu_node tree, acquire lock. */
2791 struct rcu_node *rnp = rdp->mynode;
3019 * Check and if necessary update the leaf rcu_node structure's
3021 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
3024 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3036 * Check and if necessary update the leaf rcu_node structure's
3049 struct rcu_node *const rnp = rdp->mynode;
4087 struct rcu_node *rnp;
4174 struct rcu_node *rnp = rcu_get_root();
4195 struct rcu_node *rnp;
4325 * function. And this guarantee requires that the root rcu_node structure's
4328 * invoked between the time that the root rcu_node structure's ->gp_seq
4332 * then the root rcu_node structure is the one that needs to be polled.
4336 struct rcu_node *rnp = rcu_get_root();
4338 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
4413 struct rcu_node *rnp = rdp->mynode;
4724 * Compute the mask of online CPUs for the specified rcu_node structure.
4725 * This will not be stable unless the rcu_node structure's ->lock is
4729 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4799 * All CPUs for the specified rcu_node structure have gone offline,
4803 * the specified rcu_node structure's ->lock held and interrupts disabled.
4804 * This function therefore goes up the tree of rcu_node structures,
4806 * the leaf rcu_node structure's ->qsmaskinit field has already been
4809 * This function does check that the specified rcu_node structure has
4815 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4818 struct rcu_node *rnp = rnp_leaf;
4844 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4845 * first CPU in a given leaf rcu_node structure coming online. The caller
4846 * must hold the corresponding leaf rcu_node ->lock with interrupts
4849 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4853 struct rcu_node *rnp = rnp_leaf;
4897 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4919 static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
4945 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
4970 struct rcu_node *rnp = rcu_get_root();
4988 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4989 * propagation up the rcu_node tree will happen at the beginning
5014 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
5015 * served by the rcu_node in question. The CPU hotplug lock is still
5029 struct rcu_node *rnp;
5090 struct rcu_node *rnp;
5125 struct rcu_node *rnp;
5170 * the rcu_node tree's ->qsmaskinitnext bit masks.
5183 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
5195 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
5223 struct rcu_node *my_rnp;
5293 struct rcu_node *rnp = rdp->mynode;
5309 struct rcu_node *rnp;
5355 struct rcu_node *rnp;
5404 struct rcu_node *rnp;
5435 struct rcu_node *rnp;
5532 * Compute the rcu_node tree geometry from kernel parameters. This cannot
5579 * and cannot exceed the number of bits in the rcu_node masks.
5591 * Compute number of nodes that can be handled an rcu_node tree
5619 /* Calculate the total number of rcu_node structures. */
5626 * Dump out the structure of the rcu_node combining tree associated
5632 struct rcu_node *rnp;
5634 pr_info("rcu_node tree layout dump\n");
5733 /* -After- the rcu_node ->lock fields are initialized! */