Lines Matching full:as

31  * buckets. The core counter contains a lock for the entire counter, as well
32 * as the current upper and lower bounds on the value of the counter. The
88 aggsum_init(aggsum_t *as, uint64_t value) in aggsum_init() argument
90 memset(as, 0, sizeof (*as)); in aggsum_init()
91 as->as_lower_bound = as->as_upper_bound = value; in aggsum_init()
92 mutex_init(&as->as_lock, NULL, MUTEX_DEFAULT, NULL); in aggsum_init()
97 as->as_bucketshift = highbit64(boot_ncpus / 6) / 2; in aggsum_init()
98 as->as_numbuckets = ((boot_ncpus - 1) >> as->as_bucketshift) + 1; in aggsum_init()
99 as->as_buckets = kmem_zalloc(as->as_numbuckets * in aggsum_init()
101 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_init()
102 mutex_init(&as->as_buckets[i].asc_lock, in aggsum_init()
108 aggsum_fini(aggsum_t *as) in aggsum_fini() argument
110 for (int i = 0; i < as->as_numbuckets; i++) in aggsum_fini()
111 mutex_destroy(&as->as_buckets[i].asc_lock); in aggsum_fini()
112 kmem_free(as->as_buckets, as->as_numbuckets * sizeof (aggsum_bucket_t)); in aggsum_fini()
113 mutex_destroy(&as->as_lock); in aggsum_fini()
117 aggsum_lower_bound(aggsum_t *as) in aggsum_lower_bound() argument
119 return (atomic_load_64((volatile uint64_t *)&as->as_lower_bound)); in aggsum_lower_bound()
123 aggsum_upper_bound(aggsum_t *as) in aggsum_upper_bound() argument
125 return (atomic_load_64(&as->as_upper_bound)); in aggsum_upper_bound()
129 aggsum_value(aggsum_t *as) in aggsum_value() argument
134 mutex_enter(&as->as_lock); in aggsum_value()
135 lb = as->as_lower_bound; in aggsum_value()
136 ub = as->as_upper_bound; in aggsum_value()
138 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_value()
139 ASSERT0(as->as_buckets[i].asc_delta); in aggsum_value()
140 ASSERT0(as->as_buckets[i].asc_borrowed); in aggsum_value()
142 mutex_exit(&as->as_lock); in aggsum_value()
145 for (int i = 0; i < as->as_numbuckets; i++) { in aggsum_value()
146 struct aggsum_bucket *asb = &as->as_buckets[i]; in aggsum_value()
157 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, lb); in aggsum_value()
158 atomic_store_64(&as->as_upper_bound, lb); in aggsum_value()
159 mutex_exit(&as->as_lock); in aggsum_value()
165 aggsum_add(aggsum_t *as, int64_t delta) in aggsum_add() argument
170 asb = &as->as_buckets[(CPU_SEQID_UNSTABLE >> as->as_bucketshift) % in aggsum_add()
171 as->as_numbuckets]; in aggsum_add()
188 borrow <<= aggsum_borrow_shift + as->as_bucketshift; in aggsum_add()
189 mutex_enter(&as->as_lock); in aggsum_add()
199 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, in aggsum_add()
200 as->as_lower_bound + delta - borrow); in aggsum_add()
201 atomic_store_64(&as->as_upper_bound, in aggsum_add()
202 as->as_upper_bound + delta + borrow); in aggsum_add()
203 mutex_exit(&as->as_lock); in aggsum_add()
212 aggsum_compare(aggsum_t *as, uint64_t target) in aggsum_compare() argument
218 if (atomic_load_64(&as->as_upper_bound) < target) in aggsum_compare()
220 lb = atomic_load_64((volatile uint64_t *)&as->as_lower_bound); in aggsum_compare()
223 mutex_enter(&as->as_lock); in aggsum_compare()
224 lb = as->as_lower_bound; in aggsum_compare()
225 ub = as->as_upper_bound; in aggsum_compare()
226 for (i = 0; i < as->as_numbuckets; i++) { in aggsum_compare()
227 struct aggsum_bucket *asb = &as->as_buckets[i]; in aggsum_compare()
239 if (i >= as->as_numbuckets) in aggsum_compare()
241 atomic_store_64((volatile uint64_t *)&as->as_lower_bound, lb); in aggsum_compare()
242 atomic_store_64(&as->as_upper_bound, ub); in aggsum_compare()
243 mutex_exit(&as->as_lock); in aggsum_compare()