xref: /freebsd/sys/dev/cxgbe/t4_mp_ring.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2014 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <machine/cpu.h>
38 
39 #include "t4_mp_ring.h"
40 
41 #if defined(__i386__)
42 #define atomic_cmpset_acq_64 atomic_cmpset_64
43 #define atomic_cmpset_rel_64 atomic_cmpset_64
44 #endif
45 
46 union ring_state {
47 	struct {
48 		uint16_t pidx_head;
49 		uint16_t pidx_tail;
50 		uint16_t cidx;
51 		uint16_t flags;
52 	};
53 	uint64_t state;
54 };
55 
56 enum {
57 	IDLE = 0,	/* consumer ran to completion, nothing more to do. */
58 	BUSY,		/* consumer is running already, or will be shortly. */
59 	STALLED,	/* consumer stopped due to lack of resources. */
60 	ABDICATED,	/* consumer stopped even though there was work to be
61 			   done because it wants another thread to take over. */
62 };
63 
64 static inline uint16_t
65 space_available(struct mp_ring *r, union ring_state s)
66 {
67 	uint16_t x = r->size - 1;
68 
69 	if (s.cidx == s.pidx_head)
70 		return (x);
71 	else if (s.cidx > s.pidx_head)
72 		return (s.cidx - s.pidx_head - 1);
73 	else
74 		return (x - s.pidx_head + s.cidx);
75 }
76 
77 static inline uint16_t
78 increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n)
79 {
80 	int x = r->size - idx;
81 
82 	MPASS(x > 0);
83 	return (x > n ? idx + n : n - x);
84 }
85 
86 /* Consumer is about to update the ring's state to s */
87 static inline uint16_t
88 state_to_flags(union ring_state s, int abdicate)
89 {
90 
91 	if (s.cidx == s.pidx_tail)
92 		return (IDLE);
93 	else if (abdicate && s.pidx_tail != s.pidx_head)
94 		return (ABDICATED);
95 
96 	return (BUSY);
97 }
98 
99 /*
100  * Caller passes in a state, with a guarantee that there is work to do and that
101  * all items up to the pidx_tail in the state are visible.
102  */
103 static void
104 drain_ring(struct mp_ring *r, union ring_state os, uint16_t prev, int budget)
105 {
106 	union ring_state ns;
107 	int n, pending, total;
108 	uint16_t cidx = os.cidx;
109 	uint16_t pidx = os.pidx_tail;
110 
111 	MPASS(os.flags == BUSY);
112 	MPASS(cidx != pidx);
113 
114 	if (prev == IDLE)
115 		counter_u64_add(r->starts, 1);
116 	pending = 0;
117 	total = 0;
118 
119 	while (cidx != pidx) {
120 
121 		/* Items from cidx to pidx are available for consumption. */
122 		n = r->drain(r, cidx, pidx);
123 		if (n == 0) {
124 			critical_enter();
125 			do {
126 				os.state = ns.state = r->state;
127 				ns.cidx = cidx;
128 				ns.flags = STALLED;
129 			} while (atomic_cmpset_64(&r->state, os.state,
130 			    ns.state) == 0);
131 			critical_exit();
132 			if (prev != STALLED)
133 				counter_u64_add(r->stalls, 1);
134 			else if (total > 0) {
135 				counter_u64_add(r->restarts, 1);
136 				counter_u64_add(r->stalls, 1);
137 			}
138 			break;
139 		}
140 		cidx = increment_idx(r, cidx, n);
141 		pending += n;
142 		total += n;
143 
144 		/*
145 		 * We update the cidx only if we've caught up with the pidx, the
146 		 * real cidx is getting too far ahead of the one visible to
147 		 * everyone else, or we have exceeded our budget.
148 		 */
149 		if (cidx != pidx && pending < 64 && total < budget)
150 			continue;
151 		critical_enter();
152 		do {
153 			os.state = ns.state = r->state;
154 			ns.cidx = cidx;
155 			ns.flags = state_to_flags(ns, total >= budget);
156 		} while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
157 		critical_exit();
158 
159 		if (ns.flags == ABDICATED)
160 			counter_u64_add(r->abdications, 1);
161 		if (ns.flags != BUSY) {
162 			/* Wrong loop exit if we're going to stall. */
163 			MPASS(ns.flags != STALLED);
164 			if (prev == STALLED) {
165 				MPASS(total > 0);
166 				counter_u64_add(r->restarts, 1);
167 			}
168 			break;
169 		}
170 
171 		/*
172 		 * The acquire style atomic above guarantees visibility of items
173 		 * associated with any pidx change that we notice here.
174 		 */
175 		pidx = ns.pidx_tail;
176 		pending = 0;
177 	}
178 }
179 
180 int
181 mp_ring_alloc(struct mp_ring **pr, int size, void *cookie, ring_drain_t drain,
182     ring_can_drain_t can_drain, struct malloc_type *mt, int flags)
183 {
184 	struct mp_ring *r;
185 
186 	/* All idx are 16b so size can be 65536 at most */
187 	if (pr == NULL || size < 2 || size > 65536 || drain == NULL ||
188 	    can_drain == NULL)
189 		return (EINVAL);
190 	*pr = NULL;
191 	flags &= M_NOWAIT | M_WAITOK;
192 	MPASS(flags != 0);
193 
194 	r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO);
195 	if (r == NULL)
196 		return (ENOMEM);
197 	r->size = size;
198 	r->cookie = cookie;
199 	r->mt = mt;
200 	r->drain = drain;
201 	r->can_drain = can_drain;
202 	r->enqueues = counter_u64_alloc(flags);
203 	r->drops = counter_u64_alloc(flags);
204 	r->starts = counter_u64_alloc(flags);
205 	r->stalls = counter_u64_alloc(flags);
206 	r->restarts = counter_u64_alloc(flags);
207 	r->abdications = counter_u64_alloc(flags);
208 	if (r->enqueues == NULL || r->drops == NULL || r->starts == NULL ||
209 	    r->stalls == NULL || r->restarts == NULL ||
210 	    r->abdications == NULL) {
211 		mp_ring_free(r);
212 		return (ENOMEM);
213 	}
214 
215 	*pr = r;
216 	return (0);
217 }
218 
219 void
220 
221 mp_ring_free(struct mp_ring *r)
222 {
223 
224 	if (r == NULL)
225 		return;
226 
227 	if (r->enqueues != NULL)
228 		counter_u64_free(r->enqueues);
229 	if (r->drops != NULL)
230 		counter_u64_free(r->drops);
231 	if (r->starts != NULL)
232 		counter_u64_free(r->starts);
233 	if (r->stalls != NULL)
234 		counter_u64_free(r->stalls);
235 	if (r->restarts != NULL)
236 		counter_u64_free(r->restarts);
237 	if (r->abdications != NULL)
238 		counter_u64_free(r->abdications);
239 
240 	free(r, r->mt);
241 }
242 
243 /*
244  * Enqueue n items and maybe drain the ring for some time.
245  *
246  * Returns an errno.
247  */
248 int
249 mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
250 {
251 	union ring_state os, ns;
252 	uint16_t pidx_start, pidx_stop;
253 	int i;
254 
255 	MPASS(items != NULL);
256 	MPASS(n > 0);
257 
258 	/*
259 	 * Reserve room for the new items.  Our reservation, if successful, is
260 	 * from 'pidx_start' to 'pidx_stop'.
261 	 */
262 	for (;;) {
263 		os.state = r->state;
264 		if (n >= space_available(r, os)) {
265 			counter_u64_add(r->drops, n);
266 			MPASS(os.flags != IDLE);
267 			if (os.flags == STALLED)
268 				mp_ring_check_drainage(r, 0);
269 			return (ENOBUFS);
270 		}
271 		ns.state = os.state;
272 		ns.pidx_head = increment_idx(r, os.pidx_head, n);
273 		critical_enter();
274 		if (atomic_cmpset_64(&r->state, os.state, ns.state))
275 			break;
276 		critical_exit();
277 		cpu_spinwait();
278 	}
279 	pidx_start = os.pidx_head;
280 	pidx_stop = ns.pidx_head;
281 
282 	/*
283 	 * Wait for other producers who got in ahead of us to enqueue their
284 	 * items, one producer at a time.  It is our turn when the ring's
285 	 * pidx_tail reaches the begining of our reservation (pidx_start).
286 	 */
287 	while (ns.pidx_tail != pidx_start) {
288 		cpu_spinwait();
289 		ns.state = r->state;
290 	}
291 
292 	/* Now it is our turn to fill up the area we reserved earlier. */
293 	i = pidx_start;
294 	do {
295 		r->items[i] = *items++;
296 		if (__predict_false(++i == r->size))
297 			i = 0;
298 	} while (i != pidx_stop);
299 
300 	/*
301 	 * Update the ring's pidx_tail.  The release style atomic guarantees
302 	 * that the items are visible to any thread that sees the updated pidx.
303 	 */
304 	do {
305 		os.state = ns.state = r->state;
306 		ns.pidx_tail = pidx_stop;
307 		ns.flags = BUSY;
308 	} while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
309 	critical_exit();
310 	counter_u64_add(r->enqueues, n);
311 
312 	/*
313 	 * Turn into a consumer if some other thread isn't active as a consumer
314 	 * already.
315 	 */
316 	if (os.flags != BUSY)
317 		drain_ring(r, ns, os.flags, budget);
318 
319 	return (0);
320 }
321 
322 void
323 mp_ring_check_drainage(struct mp_ring *r, int budget)
324 {
325 	union ring_state os, ns;
326 
327 	os.state = r->state;
328 	if (os.flags != STALLED || os.pidx_head != os.pidx_tail ||
329 	    r->can_drain(r) == 0)
330 		return;
331 
332 	MPASS(os.cidx != os.pidx_tail);	/* implied by STALLED */
333 	ns.state = os.state;
334 	ns.flags = BUSY;
335 
336 	/*
337 	 * The acquire style atomic guarantees visibility of items associated
338 	 * with the pidx that we read here.
339 	 */
340 	if (!atomic_cmpset_acq_64(&r->state, os.state, ns.state))
341 		return;
342 
343 	drain_ring(r, ns, os.flags, budget);
344 }
345 
346 void
347 mp_ring_reset_stats(struct mp_ring *r)
348 {
349 
350 	counter_u64_zero(r->enqueues);
351 	counter_u64_zero(r->drops);
352 	counter_u64_zero(r->starts);
353 	counter_u64_zero(r->stalls);
354 	counter_u64_zero(r->restarts);
355 	counter_u64_zero(r->abdications);
356 }
357 
358 int
359 mp_ring_is_idle(struct mp_ring *r)
360 {
361 	union ring_state s;
362 
363 	s.state = r->state;
364 	if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx &&
365 	    s.flags == IDLE)
366 		return (1);
367 
368 	return (0);
369 }
370