Lines Matching +full:lock +full:- +full:less

23  * Aggregate-sum counters are a form of fanned-out counter, used when atomic
31 * buckets. The core counter contains a lock for the entire counter, as well
33 * aggsum_bucket structure contains a per-bucket lock to protect the contents of
40 * help minimize lock and cache contention). If the bucket already has
51 * core counter; since all other operations access CPU-local resources,
67 * expensive is clearing buckets. This involves grabbing the global lock
69 * lock (preventing threads on those CPUs from modifying their delta), and
72 * suited for write-many read-rarely operations.
74 * Note that the aggsums do not expand if more CPUs are hot-added. In that
75 * case, we will have less fanout than boot_ncpus, but we don't want to always
91 as->as_lower_bound = as->as_upper_bound = value; in aggsum_init()
92 mutex_init(&as->as_lock, NULL, MUTEX_DEFAULT, NULL); in aggsum_init()
97 as->as_bucketshift = highbit64(boot_ncpus / 6) / 2; in aggsum_init()
98 as->as_numbuckets = ((boot_ncpus - 1) >> as->as_bucketshift) + 1; in aggsum_init()
99 as->as_buckets = kmem_zalloc(as->as_numbuckets * in aggsum_init()
101 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_init()
102 mutex_init(&as->as_buckets[i].asc_lock, in aggsum_init()
110 for (int i = 0; i < as->as_numbuckets; i++) in aggsum_fini()
111 mutex_destroy(&as->as_buckets[i].asc_lock); in aggsum_fini()
112 kmem_free(as->as_buckets, as->as_numbuckets * sizeof (aggsum_bucket_t)); in aggsum_fini()
113 mutex_destroy(&as->as_lock); in aggsum_fini()
119 return (atomic_load_64((volatile uint64_t *)&as->as_lower_bound)); in aggsum_lower_bound()
125 return (atomic_load_64(&as->as_upper_bound)); in aggsum_upper_bound()
134 mutex_enter(&as->as_lock); in aggsum_value()
135 lb = as->as_lower_bound; in aggsum_value()
136 ub = as->as_upper_bound; in aggsum_value()
138 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_value()
139 ASSERT0(as->as_buckets[i].asc_delta); in aggsum_value()
140 ASSERT0(as->as_buckets[i].asc_borrowed); in aggsum_value()
142 mutex_exit(&as->as_lock); in aggsum_value()
145 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_value()
146 struct aggsum_bucket *asb = &as->as_buckets[i]; in aggsum_value()
147 if (asb->asc_borrowed == 0) in aggsum_value()
149 mutex_enter(&asb->asc_lock); in aggsum_value()
150 lb += asb->asc_delta + asb->asc_borrowed; in aggsum_value()
151 ub += asb->asc_delta - asb->asc_borrowed; in aggsum_value()
152 asb->asc_delta = 0; in aggsum_value()
153 asb->asc_borrowed = 0; in aggsum_value()
154 mutex_exit(&asb->asc_lock); in aggsum_value()
157 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, lb); in aggsum_value()
158 atomic_store_64(&as->as_upper_bound, lb); in aggsum_value()
159 mutex_exit(&as->as_lock); in aggsum_value()
170 asb = &as->as_buckets[(CPU_SEQID_UNSTABLE >> as->as_bucketshift) % in aggsum_add()
171 as->as_numbuckets]; in aggsum_add()
174 mutex_enter(&asb->asc_lock); in aggsum_add()
175 if (asb->asc_delta + delta <= (int64_t)asb->asc_borrowed && in aggsum_add()
176 asb->asc_delta + delta >= -(int64_t)asb->asc_borrowed) { in aggsum_add()
177 asb->asc_delta += delta; in aggsum_add()
178 mutex_exit(&asb->asc_lock); in aggsum_add()
181 mutex_exit(&asb->asc_lock); in aggsum_add()
184 * We haven't borrowed enough. Take the global lock and borrow in aggsum_add()
187 borrow = (delta < 0 ? -delta : delta); in aggsum_add()
188 borrow <<= aggsum_borrow_shift + as->as_bucketshift; in aggsum_add()
189 mutex_enter(&as->as_lock); in aggsum_add()
190 if (borrow >= asb->asc_borrowed) in aggsum_add()
191 borrow -= asb->asc_borrowed; in aggsum_add()
193 borrow = (borrow - (int64_t)asb->asc_borrowed) / 4; in aggsum_add()
194 mutex_enter(&asb->asc_lock); in aggsum_add()
195 delta += asb->asc_delta; in aggsum_add()
196 asb->asc_delta = 0; in aggsum_add()
197 asb->asc_borrowed += borrow; in aggsum_add()
198 mutex_exit(&asb->asc_lock); in aggsum_add()
199 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, in aggsum_add()
200 as->as_lower_bound + delta - borrow); in aggsum_add()
201 atomic_store_64(&as->as_upper_bound, in aggsum_add()
202 as->as_upper_bound + delta + borrow); in aggsum_add()
203 mutex_exit(&as->as_lock); in aggsum_add()
207 * Compare the aggsum value to target efficiently. Returns -1 if the value
208 * represented by the aggsum is less than target, 1 if it's greater, and 0 if
218 if (atomic_load_64(&as->as_upper_bound) < target) in aggsum_compare()
219 return (-1); in aggsum_compare()
220 lb = atomic_load_64((volatile uint64_t *)&as->as_lower_bound); in aggsum_compare()
223 mutex_enter(&as->as_lock); in aggsum_compare()
224 lb = as->as_lower_bound; in aggsum_compare()
225 ub = as->as_upper_bound; in aggsum_compare()
226 for (i = 0; i < as->as_numbuckets; i++) { in aggsum_compare()
227 struct aggsum_bucket *asb = &as->as_buckets[i]; in aggsum_compare()
228 if (asb->asc_borrowed == 0) in aggsum_compare()
230 mutex_enter(&asb->asc_lock); in aggsum_compare()
231 lb += asb->asc_delta + asb->asc_borrowed; in aggsum_compare()
232 ub += asb->asc_delta - asb->asc_borrowed; in aggsum_compare()
233 asb->asc_delta = 0; in aggsum_compare()
234 asb->asc_borrowed = 0; in aggsum_compare()
235 mutex_exit(&asb->asc_lock); in aggsum_compare()
239 if (i >= as->as_numbuckets) in aggsum_compare()
241 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, lb); in aggsum_compare()
242 atomic_store_64(&as->as_upper_bound, ub); in aggsum_compare()
243 mutex_exit(&as->as_lock); in aggsum_compare()
244 return (ub < target ? -1 : (uint64_t)lb > target ? 1 : 0); in aggsum_compare()