1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/types.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/counter.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <machine/cpu.h> 38 39 #include "t4_mp_ring.h" 40 41 #if defined(__i386__) 42 #define atomic_cmpset_acq_64 atomic_cmpset_64 43 #define atomic_cmpset_rel_64 atomic_cmpset_64 44 #endif 45 46 union ring_state { 47 struct { 48 uint16_t pidx_head; 49 uint16_t pidx_tail; 50 uint16_t cidx; 51 uint16_t flags; 52 }; 53 uint64_t state; 54 }; 55 56 enum { 57 IDLE = 0, /* consumer ran to completion, nothing more to do. */ 58 BUSY, /* consumer is running already, or will be shortly. */ 59 STALLED, /* consumer stopped due to lack of resources. */ 60 ABDICATED, /* consumer stopped even though there was work to be 61 done because it wants another thread to take over. */ 62 }; 63 64 static inline uint16_t 65 space_available(struct mp_ring *r, union ring_state s) 66 { 67 uint16_t x = r->size - 1; 68 69 if (s.cidx == s.pidx_head) 70 return (x); 71 else if (s.cidx > s.pidx_head) 72 return (s.cidx - s.pidx_head - 1); 73 else 74 return (x - s.pidx_head + s.cidx); 75 } 76 77 static inline uint16_t 78 increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n) 79 { 80 int x = r->size - idx; 81 82 MPASS(x > 0); 83 return (x > n ? idx + n : n - x); 84 } 85 86 /* Consumer is about to update the ring's state to s */ 87 static inline uint16_t 88 state_to_flags(union ring_state s, int abdicate) 89 { 90 91 if (s.cidx == s.pidx_tail) 92 return (IDLE); 93 else if (abdicate && s.pidx_tail != s.pidx_head) 94 return (ABDICATED); 95 96 return (BUSY); 97 } 98 99 /* 100 * Caller passes in a state, with a guarantee that there is work to do and that 101 * all items up to the pidx_tail in the state are visible. 102 */ 103 static void 104 drain_ring(struct mp_ring *r, union ring_state os, uint16_t prev, int budget) 105 { 106 union ring_state ns; 107 int n, pending, total; 108 uint16_t cidx = os.cidx; 109 uint16_t pidx = os.pidx_tail; 110 111 MPASS(os.flags == BUSY); 112 MPASS(cidx != pidx); 113 114 if (prev == IDLE) 115 counter_u64_add(r->starts, 1); 116 pending = 0; 117 total = 0; 118 119 while (cidx != pidx) { 120 121 /* Items from cidx to pidx are available for consumption. */ 122 n = r->drain(r, cidx, pidx); 123 if (n == 0) { 124 critical_enter(); 125 os.state = r->state; 126 do { 127 ns.state = os.state; 128 ns.cidx = cidx; 129 ns.flags = STALLED; 130 } while (atomic_fcmpset_64(&r->state, &os.state, 131 ns.state) == 0); 132 critical_exit(); 133 if (prev != STALLED) 134 counter_u64_add(r->stalls, 1); 135 else if (total > 0) { 136 counter_u64_add(r->restarts, 1); 137 counter_u64_add(r->stalls, 1); 138 } 139 break; 140 } 141 cidx = increment_idx(r, cidx, n); 142 pending += n; 143 total += n; 144 145 /* 146 * We update the cidx only if we've caught up with the pidx, the 147 * real cidx is getting too far ahead of the one visible to 148 * everyone else, or we have exceeded our budget. 149 */ 150 if (cidx != pidx && pending < 64 && total < budget) 151 continue; 152 critical_enter(); 153 os.state = r->state; 154 do { 155 ns.state = os.state; 156 ns.cidx = cidx; 157 ns.flags = state_to_flags(ns, total >= budget); 158 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0); 159 critical_exit(); 160 161 if (ns.flags == ABDICATED) 162 counter_u64_add(r->abdications, 1); 163 if (ns.flags != BUSY) { 164 /* Wrong loop exit if we're going to stall. */ 165 MPASS(ns.flags != STALLED); 166 if (prev == STALLED) { 167 MPASS(total > 0); 168 counter_u64_add(r->restarts, 1); 169 } 170 break; 171 } 172 173 /* 174 * The acquire style atomic above guarantees visibility of items 175 * associated with any pidx change that we notice here. 176 */ 177 pidx = ns.pidx_tail; 178 pending = 0; 179 } 180 } 181 182 int 183 mp_ring_alloc(struct mp_ring **pr, int size, void *cookie, ring_drain_t drain, 184 ring_can_drain_t can_drain, struct malloc_type *mt, int flags) 185 { 186 struct mp_ring *r; 187 188 /* All idx are 16b so size can be 65536 at most */ 189 if (pr == NULL || size < 2 || size > 65536 || drain == NULL || 190 can_drain == NULL) 191 return (EINVAL); 192 *pr = NULL; 193 flags &= M_NOWAIT | M_WAITOK; 194 MPASS(flags != 0); 195 196 r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO); 197 if (r == NULL) 198 return (ENOMEM); 199 r->size = size; 200 r->cookie = cookie; 201 r->mt = mt; 202 r->drain = drain; 203 r->can_drain = can_drain; 204 r->enqueues = counter_u64_alloc(flags); 205 r->drops = counter_u64_alloc(flags); 206 r->starts = counter_u64_alloc(flags); 207 r->stalls = counter_u64_alloc(flags); 208 r->restarts = counter_u64_alloc(flags); 209 r->abdications = counter_u64_alloc(flags); 210 if (r->enqueues == NULL || r->drops == NULL || r->starts == NULL || 211 r->stalls == NULL || r->restarts == NULL || 212 r->abdications == NULL) { 213 mp_ring_free(r); 214 return (ENOMEM); 215 } 216 217 *pr = r; 218 return (0); 219 } 220 221 void 222 223 mp_ring_free(struct mp_ring *r) 224 { 225 226 if (r == NULL) 227 return; 228 229 if (r->enqueues != NULL) 230 counter_u64_free(r->enqueues); 231 if (r->drops != NULL) 232 counter_u64_free(r->drops); 233 if (r->starts != NULL) 234 counter_u64_free(r->starts); 235 if (r->stalls != NULL) 236 counter_u64_free(r->stalls); 237 if (r->restarts != NULL) 238 counter_u64_free(r->restarts); 239 if (r->abdications != NULL) 240 counter_u64_free(r->abdications); 241 242 free(r, r->mt); 243 } 244 245 /* 246 * Enqueue n items and maybe drain the ring for some time. 247 * 248 * Returns an errno. 249 */ 250 int 251 mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget) 252 { 253 union ring_state os, ns; 254 uint16_t pidx_start, pidx_stop; 255 int i; 256 257 MPASS(items != NULL); 258 MPASS(n > 0); 259 260 /* 261 * Reserve room for the new items. Our reservation, if successful, is 262 * from 'pidx_start' to 'pidx_stop'. 263 */ 264 os.state = r->state; 265 for (;;) { 266 if (n >= space_available(r, os)) { 267 counter_u64_add(r->drops, n); 268 MPASS(os.flags != IDLE); 269 if (os.flags == STALLED) 270 mp_ring_check_drainage(r, 0); 271 return (ENOBUFS); 272 } 273 ns.state = os.state; 274 ns.pidx_head = increment_idx(r, os.pidx_head, n); 275 critical_enter(); 276 if (atomic_fcmpset_64(&r->state, &os.state, ns.state)) 277 break; 278 critical_exit(); 279 cpu_spinwait(); 280 } 281 pidx_start = os.pidx_head; 282 pidx_stop = ns.pidx_head; 283 284 /* 285 * Wait for other producers who got in ahead of us to enqueue their 286 * items, one producer at a time. It is our turn when the ring's 287 * pidx_tail reaches the beginning of our reservation (pidx_start). 288 */ 289 while (ns.pidx_tail != pidx_start) { 290 cpu_spinwait(); 291 ns.state = r->state; 292 } 293 294 /* Now it is our turn to fill up the area we reserved earlier. */ 295 i = pidx_start; 296 do { 297 r->items[i] = *items++; 298 if (__predict_false(++i == r->size)) 299 i = 0; 300 } while (i != pidx_stop); 301 302 /* 303 * Update the ring's pidx_tail. The release style atomic guarantees 304 * that the items are visible to any thread that sees the updated pidx. 305 */ 306 os.state = r->state; 307 do { 308 ns.state = os.state; 309 ns.pidx_tail = pidx_stop; 310 ns.flags = BUSY; 311 } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0); 312 critical_exit(); 313 counter_u64_add(r->enqueues, n); 314 315 /* 316 * Turn into a consumer if some other thread isn't active as a consumer 317 * already. 318 */ 319 if (os.flags != BUSY) 320 drain_ring(r, ns, os.flags, budget); 321 322 return (0); 323 } 324 325 void 326 mp_ring_check_drainage(struct mp_ring *r, int budget) 327 { 328 union ring_state os, ns; 329 330 os.state = r->state; 331 if (os.flags != STALLED || os.pidx_head != os.pidx_tail || 332 r->can_drain(r) == 0) 333 return; 334 335 MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */ 336 ns.state = os.state; 337 ns.flags = BUSY; 338 339 /* 340 * The acquire style atomic guarantees visibility of items associated 341 * with the pidx that we read here. 342 */ 343 if (!atomic_cmpset_acq_64(&r->state, os.state, ns.state)) 344 return; 345 346 drain_ring(r, ns, os.flags, budget); 347 } 348 349 void 350 mp_ring_reset_stats(struct mp_ring *r) 351 { 352 353 counter_u64_zero(r->enqueues); 354 counter_u64_zero(r->drops); 355 counter_u64_zero(r->starts); 356 counter_u64_zero(r->stalls); 357 counter_u64_zero(r->restarts); 358 counter_u64_zero(r->abdications); 359 } 360 361 int 362 mp_ring_is_idle(struct mp_ring *r) 363 { 364 union ring_state s; 365 366 s.state = r->state; 367 if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx && 368 s.flags == IDLE) 369 return (1); 370 371 return (0); 372 } 373