1 /* 2 * CDDL HEADER START 3 * 4 * This file and its contents are supplied under the terms of the 5 * Common Development and Distribution License ("CDDL"), version 1.0. 6 * You may only use this file in accordance with the terms of version 7 * 1.0 of the CDDL. 8 * 9 * A full copy of the text of the CDDL should have accompanied this 10 * source. A copy of the CDDL is also available via the Internet at 11 * http://www.illumos.org/license/CDDL. 12 * 13 * CDDL HEADER END 14 */ 15 /* 16 * Copyright (c) 2017, 2018 by Delphix. All rights reserved. 17 */ 18 19 #include <sys/zfs_context.h> 20 #include <sys/aggsum.h> 21 22 /* 23 * Aggregate-sum counters are a form of fanned-out counter, used when atomic 24 * instructions on a single field cause enough CPU cache line contention to 25 * slow system performance. Due to their increased overhead and the expense 26 * involved with precisely reading from them, they should only be used in cases 27 * where the write rate (increment/decrement) is much higher than the read rate 28 * (get value). 29 * 30 * Aggregate sum counters are comprised of two basic parts, the core and the 31 * buckets. The core counter contains a lock for the entire counter, as well 32 * as the current upper and lower bounds on the value of the counter. The 33 * aggsum_bucket structure contains a per-bucket lock to protect the contents of 34 * the bucket, the current amount that this bucket has changed from the global 35 * counter (called the delta), and the amount of increment and decrement we have 36 * "borrowed" from the core counter. 37 * 38 * The basic operation of an aggsum is simple. Threads that wish to modify the 39 * counter will modify one bucket's counter (determined by their current CPU, to 40 * help minimize lock and cache contention). If the bucket already has 41 * sufficient capacity borrowed from the core structure to handle their request, 42 * they simply modify the delta and return. If the bucket does not, we clear 43 * the bucket's current state (to prevent the borrowed amounts from getting too 44 * large), and borrow more from the core counter. Borrowing is done by adding to 45 * the upper bound (or subtracting from the lower bound) of the core counter, 46 * and setting the borrow value for the bucket to the amount added (or 47 * subtracted). Clearing the bucket is the opposite; we add the current delta 48 * to both the lower and upper bounds of the core counter, subtract the borrowed 49 * incremental from the upper bound, and add the borrowed decrement from the 50 * lower bound. Note that only borrowing and clearing require access to the 51 * core counter; since all other operations access CPU-local resources, 52 * performance can be much higher than a traditional counter. 53 * 54 * Threads that wish to read from the counter have a slightly more challenging 55 * task. It is fast to determine the upper and lower bounds of the aggum; this 56 * does not require grabbing any locks. This suffices for cases where an 57 * approximation of the aggsum's value is acceptable. However, if one needs to 58 * know whether some specific value is above or below the current value in the 59 * aggsum, they invoke aggsum_compare(). This function operates by repeatedly 60 * comparing the target value to the upper and lower bounds of the aggsum, and 61 * then clearing a bucket. This proceeds until the target is outside of the 62 * upper and lower bounds and we return a response, or the last bucket has been 63 * cleared and we know that the target is equal to the aggsum's value. Finally, 64 * the most expensive operation is determining the precise value of the aggsum. 65 * To do this, we clear every bucket and then return the upper bound (which must 66 * be equal to the lower bound). What makes aggsum_compare() and aggsum_value() 67 * expensive is clearing buckets. This involves grabbing the global lock 68 * (serializing against themselves and borrow operations), grabbing a bucket's 69 * lock (preventing threads on those CPUs from modifying their delta), and 70 * zeroing out the borrowed value (forcing that thread to borrow on its next 71 * request, which will also be expensive). This is what makes aggsums well 72 * suited for write-many read-rarely operations. 73 * 74 * Note that the aggsums do not expand if more CPUs are hot-added. In that 75 * case, we will have less fanout than boot_ncpus, but we don't want to always 76 * reserve the RAM necessary to create the extra slots for additional CPUs up 77 * front, and dynamically adding them is a complex task. 78 */ 79 80 /* 81 * We will borrow aggsum_borrow_multiplier times the current request, so we will 82 * have to get the as_lock approximately every aggsum_borrow_multiplier calls to 83 * aggsum_delta(). 84 */ 85 static uint_t aggsum_borrow_multiplier = 10; 86 87 void 88 aggsum_init(aggsum_t *as, uint64_t value) 89 { 90 bzero(as, sizeof (*as)); 91 as->as_lower_bound = as->as_upper_bound = value; 92 mutex_init(&as->as_lock, NULL, MUTEX_DEFAULT, NULL); 93 as->as_numbuckets = boot_ncpus; 94 as->as_buckets = kmem_zalloc(boot_ncpus * sizeof (aggsum_bucket_t), 95 KM_SLEEP); 96 for (int i = 0; i < as->as_numbuckets; i++) { 97 mutex_init(&as->as_buckets[i].asc_lock, 98 NULL, MUTEX_DEFAULT, NULL); 99 } 100 } 101 102 void 103 aggsum_fini(aggsum_t *as) 104 { 105 for (int i = 0; i < as->as_numbuckets; i++) 106 mutex_destroy(&as->as_buckets[i].asc_lock); 107 kmem_free(as->as_buckets, as->as_numbuckets * sizeof (aggsum_bucket_t)); 108 mutex_destroy(&as->as_lock); 109 } 110 111 int64_t 112 aggsum_lower_bound(aggsum_t *as) 113 { 114 return (as->as_lower_bound); 115 } 116 117 int64_t 118 aggsum_upper_bound(aggsum_t *as) 119 { 120 return (as->as_upper_bound); 121 } 122 123 static void 124 aggsum_flush_bucket(aggsum_t *as, struct aggsum_bucket *asb) 125 { 126 ASSERT(MUTEX_HELD(&as->as_lock)); 127 ASSERT(MUTEX_HELD(&asb->asc_lock)); 128 129 /* 130 * We use atomic instructions for this because we read the upper and 131 * lower bounds without the lock, so we need stores to be atomic. 132 */ 133 atomic_add_64((volatile uint64_t *)&as->as_lower_bound, 134 asb->asc_delta + asb->asc_borrowed); 135 atomic_add_64((volatile uint64_t *)&as->as_upper_bound, 136 asb->asc_delta - asb->asc_borrowed); 137 asb->asc_delta = 0; 138 asb->asc_borrowed = 0; 139 } 140 141 uint64_t 142 aggsum_value(aggsum_t *as) 143 { 144 int64_t rv; 145 146 mutex_enter(&as->as_lock); 147 if (as->as_lower_bound == as->as_upper_bound) { 148 rv = as->as_lower_bound; 149 for (int i = 0; i < as->as_numbuckets; i++) { 150 ASSERT0(as->as_buckets[i].asc_delta); 151 ASSERT0(as->as_buckets[i].asc_borrowed); 152 } 153 mutex_exit(&as->as_lock); 154 return (rv); 155 } 156 for (int i = 0; i < as->as_numbuckets; i++) { 157 struct aggsum_bucket *asb = &as->as_buckets[i]; 158 mutex_enter(&asb->asc_lock); 159 aggsum_flush_bucket(as, asb); 160 mutex_exit(&asb->asc_lock); 161 } 162 VERIFY3U(as->as_lower_bound, ==, as->as_upper_bound); 163 rv = as->as_lower_bound; 164 mutex_exit(&as->as_lock); 165 166 return (rv); 167 } 168 169 void 170 aggsum_add(aggsum_t *as, int64_t delta) 171 { 172 struct aggsum_bucket *asb; 173 int64_t borrow; 174 175 asb = &as->as_buckets[CPU_SEQID_UNSTABLE % as->as_numbuckets]; 176 177 /* Try fast path if we already borrowed enough before. */ 178 mutex_enter(&asb->asc_lock); 179 if (asb->asc_delta + delta <= (int64_t)asb->asc_borrowed && 180 asb->asc_delta + delta >= -(int64_t)asb->asc_borrowed) { 181 asb->asc_delta += delta; 182 mutex_exit(&asb->asc_lock); 183 return; 184 } 185 mutex_exit(&asb->asc_lock); 186 187 /* 188 * We haven't borrowed enough. Take the global lock and borrow 189 * considering what is requested now and what we borrowed before. 190 */ 191 borrow = (delta < 0 ? -delta : delta) * aggsum_borrow_multiplier; 192 mutex_enter(&as->as_lock); 193 mutex_enter(&asb->asc_lock); 194 delta += asb->asc_delta; 195 asb->asc_delta = 0; 196 if (borrow >= asb->asc_borrowed) 197 borrow -= asb->asc_borrowed; 198 else 199 borrow = (borrow - (int64_t)asb->asc_borrowed) / 4; 200 asb->asc_borrowed += borrow; 201 atomic_add_64((volatile uint64_t *)&as->as_lower_bound, 202 delta - borrow); 203 atomic_add_64((volatile uint64_t *)&as->as_upper_bound, 204 delta + borrow); 205 mutex_exit(&asb->asc_lock); 206 mutex_exit(&as->as_lock); 207 } 208 209 /* 210 * Compare the aggsum value to target efficiently. Returns -1 if the value 211 * represented by the aggsum is less than target, 1 if it's greater, and 0 if 212 * they are equal. 213 */ 214 int 215 aggsum_compare(aggsum_t *as, uint64_t target) 216 { 217 if (as->as_upper_bound < target) 218 return (-1); 219 if (as->as_lower_bound > target) 220 return (1); 221 mutex_enter(&as->as_lock); 222 for (int i = 0; i < as->as_numbuckets; i++) { 223 struct aggsum_bucket *asb = &as->as_buckets[i]; 224 mutex_enter(&asb->asc_lock); 225 aggsum_flush_bucket(as, asb); 226 mutex_exit(&asb->asc_lock); 227 if (as->as_upper_bound < target) { 228 mutex_exit(&as->as_lock); 229 return (-1); 230 } 231 if (as->as_lower_bound > target) { 232 mutex_exit(&as->as_lock); 233 return (1); 234 } 235 } 236 VERIFY3U(as->as_lower_bound, ==, as->as_upper_bound); 237 ASSERT3U(as->as_lower_bound, ==, target); 238 mutex_exit(&as->as_lock); 239 return (0); 240 } 241