1 /* 2 * RCU segmented callback lists, internal-to-rcu header file 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2017 19 * 20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 */ 22 23 #include <linux/rcu_segcblist.h> 24 25 /* 26 * Account for the fact that a previously dequeued callback turned out 27 * to be marked as lazy. 28 */ 29 static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) 30 { 31 rclp->len_lazy--; 32 } 33 34 /* 35 * Interim function to return rcu_cblist head pointer. Longer term, the 36 * rcu_cblist will be used more pervasively, removing the need for this 37 * function. 38 */ 39 static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp) 40 { 41 return rclp->head; 42 } 43 44 /* 45 * Interim function to return rcu_cblist head pointer. Longer term, the 46 * rcu_cblist will be used more pervasively, removing the need for this 47 * function. 48 */ 49 static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp) 50 { 51 WARN_ON_ONCE(!rclp->head); 52 return rclp->tail; 53 } 54 55 void rcu_cblist_init(struct rcu_cblist *rclp); 56 long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim); 57 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); 58 59 /* 60 * Is the specified rcu_segcblist structure empty? 61 * 62 * But careful! The fact that the ->head field is NULL does not 63 * necessarily imply that there are no callbacks associated with 64 * this structure. When callbacks are being invoked, they are 65 * removed as a group. If callback invocation must be preempted, 66 * the remaining callbacks will be added back to the list. Either 67 * way, the counts are updated later. 68 * 69 * So it is often the case that rcu_segcblist_n_cbs() should be used 70 * instead. 71 */ 72 static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) 73 { 74 return !rsclp->head; 75 } 76 77 /* Return number of callbacks in segmented callback list. */ 78 static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) 79 { 80 return READ_ONCE(rsclp->len); 81 } 82 83 /* Return number of lazy callbacks in segmented callback list. */ 84 static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) 85 { 86 return rsclp->len_lazy; 87 } 88 89 /* Return number of lazy callbacks in segmented callback list. */ 90 static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) 91 { 92 return rsclp->len - rsclp->len_lazy; 93 } 94 95 /* 96 * Is the specified rcu_segcblist enabled, for example, not corresponding 97 * to an offline or callback-offloaded CPU? 98 */ 99 static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) 100 { 101 return !!rsclp->tails[RCU_NEXT_TAIL]; 102 } 103 104 /* 105 * Are all segments following the specified segment of the specified 106 * rcu_segcblist structure empty of callbacks? (The specified 107 * segment might well contain callbacks.) 108 */ 109 static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) 110 { 111 return !*rsclp->tails[seg]; 112 } 113 114 /* 115 * Interim function to return rcu_segcblist head pointer. Longer term, the 116 * rcu_segcblist will be used more pervasively, removing the need for this 117 * function. 118 */ 119 static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp) 120 { 121 return rsclp->head; 122 } 123 124 /* 125 * Interim function to return rcu_segcblist head pointer. Longer term, the 126 * rcu_segcblist will be used more pervasively, removing the need for this 127 * function. 128 */ 129 static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp) 130 { 131 WARN_ON_ONCE(rcu_segcblist_empty(rsclp)); 132 return rsclp->tails[RCU_NEXT_TAIL]; 133 } 134 135 void rcu_segcblist_init(struct rcu_segcblist *rsclp); 136 void rcu_segcblist_disable(struct rcu_segcblist *rsclp); 137 bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg); 138 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); 139 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); 140 struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp); 141 void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp); 142 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); 143 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); 144 bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp); 145 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, 146 struct rcu_head *rhp, bool lazy); 147 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, 148 struct rcu_head *rhp, bool lazy); 149 void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, 150 struct rcu_cblist *rclp); 151 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, 152 struct rcu_cblist *rclp); 153 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, 154 struct rcu_cblist *rclp); 155 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, 156 struct rcu_cblist *rclp); 157 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, 158 struct rcu_cblist *rclp); 159 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, 160 struct rcu_cblist *rclp); 161 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq); 162 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq); 163 bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp, 164 unsigned long seq); 165