xref: /freebsd/sys/kern/subr_counter.c (revision fffcb56f7a7b1ee6fb1a91584ba69b62d133c2d5)
14e76af6aSGleb Smirnoff /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
44e76af6aSGleb Smirnoff  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
54e76af6aSGleb Smirnoff  * All rights reserved.
64e76af6aSGleb Smirnoff  *
74e76af6aSGleb Smirnoff  * Redistribution and use in source and binary forms, with or without
84e76af6aSGleb Smirnoff  * modification, are permitted provided that the following conditions
94e76af6aSGleb Smirnoff  * are met:
104e76af6aSGleb Smirnoff  * 1. Redistributions of source code must retain the above copyright
114e76af6aSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer.
124e76af6aSGleb Smirnoff  * 2. Redistributions in binary form must reproduce the above copyright
134e76af6aSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer in the
144e76af6aSGleb Smirnoff  *    documentation and/or other materials provided with the distribution.
154e76af6aSGleb Smirnoff  *
164e76af6aSGleb Smirnoff  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
174e76af6aSGleb Smirnoff  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184e76af6aSGleb Smirnoff  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194e76af6aSGleb Smirnoff  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
204e76af6aSGleb Smirnoff  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214e76af6aSGleb Smirnoff  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224e76af6aSGleb Smirnoff  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234e76af6aSGleb Smirnoff  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244e76af6aSGleb Smirnoff  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254e76af6aSGleb Smirnoff  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264e76af6aSGleb Smirnoff  * SUCH DAMAGE.
274e76af6aSGleb Smirnoff  */
284e76af6aSGleb Smirnoff 
294e76af6aSGleb Smirnoff #include <sys/cdefs.h>
304e76af6aSGleb Smirnoff __FBSDID("$FreeBSD$");
314e76af6aSGleb Smirnoff 
324e76af6aSGleb Smirnoff #include <sys/param.h>
334e76af6aSGleb Smirnoff #include <sys/systm.h>
344e76af6aSGleb Smirnoff #include <sys/kernel.h>
3570a7dd5dSKonstantin Belousov #include <sys/lock.h>
3670a7dd5dSKonstantin Belousov #include <sys/mutex.h>
3770a7dd5dSKonstantin Belousov #include <sys/proc.h>
3870a7dd5dSKonstantin Belousov #include <sys/sched.h>
394e76af6aSGleb Smirnoff #include <sys/smp.h>
404e76af6aSGleb Smirnoff #include <sys/sysctl.h>
414e76af6aSGleb Smirnoff #include <vm/uma.h>
424e76af6aSGleb Smirnoff 
4370a7dd5dSKonstantin Belousov #define IN_SUBR_COUNTER_C
4470a7dd5dSKonstantin Belousov #include <sys/counter.h>
4570a7dd5dSKonstantin Belousov 
464e76af6aSGleb Smirnoff void
474e76af6aSGleb Smirnoff counter_u64_zero(counter_u64_t c)
484e76af6aSGleb Smirnoff {
494e76af6aSGleb Smirnoff 
5070a7dd5dSKonstantin Belousov 	counter_u64_zero_inline(c);
514e76af6aSGleb Smirnoff }
524e76af6aSGleb Smirnoff 
534e76af6aSGleb Smirnoff uint64_t
544e76af6aSGleb Smirnoff counter_u64_fetch(counter_u64_t c)
554e76af6aSGleb Smirnoff {
564e76af6aSGleb Smirnoff 
5770a7dd5dSKonstantin Belousov 	return (counter_u64_fetch_inline(c));
584e76af6aSGleb Smirnoff }
594e76af6aSGleb Smirnoff 
604e76af6aSGleb Smirnoff counter_u64_t
614e76af6aSGleb Smirnoff counter_u64_alloc(int flags)
624e76af6aSGleb Smirnoff {
634e76af6aSGleb Smirnoff 
64013072f0SMark Johnston 	return (uma_zalloc_pcpu(pcpu_zone_64, flags | M_ZERO));
654e76af6aSGleb Smirnoff }
664e76af6aSGleb Smirnoff 
674e76af6aSGleb Smirnoff void
684e76af6aSGleb Smirnoff counter_u64_free(counter_u64_t c)
694e76af6aSGleb Smirnoff {
704e76af6aSGleb Smirnoff 
710001edb8SMateusz Guzik 	uma_zfree_pcpu(pcpu_zone_64, c);
724e76af6aSGleb Smirnoff }
734e76af6aSGleb Smirnoff 
744e76af6aSGleb Smirnoff int
754e76af6aSGleb Smirnoff sysctl_handle_counter_u64(SYSCTL_HANDLER_ARGS)
764e76af6aSGleb Smirnoff {
774e76af6aSGleb Smirnoff 	uint64_t out;
784e76af6aSGleb Smirnoff 	int error;
794e76af6aSGleb Smirnoff 
804e76af6aSGleb Smirnoff 	out = counter_u64_fetch(*(counter_u64_t *)arg1);
814e76af6aSGleb Smirnoff 
824e76af6aSGleb Smirnoff 	error = SYSCTL_OUT(req, &out, sizeof(uint64_t));
834e76af6aSGleb Smirnoff 
844e76af6aSGleb Smirnoff 	if (error || !req->newptr)
854e76af6aSGleb Smirnoff 		return (error);
864e76af6aSGleb Smirnoff 
874e76af6aSGleb Smirnoff 	/*
884e76af6aSGleb Smirnoff 	 * Any write attempt to a counter zeroes it.
894e76af6aSGleb Smirnoff 	 */
904e76af6aSGleb Smirnoff 	counter_u64_zero(*(counter_u64_t *)arg1);
914e76af6aSGleb Smirnoff 
924e76af6aSGleb Smirnoff 	return (0);
934e76af6aSGleb Smirnoff }
94b5b7b142SGleb Smirnoff 
95b5b7b142SGleb Smirnoff int
96b5b7b142SGleb Smirnoff sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS)
97b5b7b142SGleb Smirnoff {
98b5b7b142SGleb Smirnoff 	uint64_t *out;
99b5b7b142SGleb Smirnoff 	int error;
100b5b7b142SGleb Smirnoff 
101b5b7b142SGleb Smirnoff 	out = malloc(arg2 * sizeof(uint64_t), M_TEMP, M_WAITOK);
102b5b7b142SGleb Smirnoff 	for (int i = 0; i < arg2; i++)
103b5b7b142SGleb Smirnoff 		out[i] = counter_u64_fetch(((counter_u64_t *)arg1)[i]);
104b5b7b142SGleb Smirnoff 
105b5b7b142SGleb Smirnoff 	error = SYSCTL_OUT(req, out, arg2 * sizeof(uint64_t));
1061d522501SGleb Smirnoff 	free(out, M_TEMP);
107b5b7b142SGleb Smirnoff 
108b5b7b142SGleb Smirnoff 	if (error || !req->newptr)
109b5b7b142SGleb Smirnoff 		return (error);
110b5b7b142SGleb Smirnoff 
111b5b7b142SGleb Smirnoff 	/*
112b5b7b142SGleb Smirnoff 	 * Any write attempt to a counter zeroes it.
113b5b7b142SGleb Smirnoff 	 */
114b5b7b142SGleb Smirnoff 	for (int i = 0; i < arg2; i++)
115b5b7b142SGleb Smirnoff 		counter_u64_zero(((counter_u64_t *)arg1)[i]);
116b5b7b142SGleb Smirnoff 
117b5b7b142SGleb Smirnoff 	return (0);
118b5b7b142SGleb Smirnoff }
11916917020SGleb Smirnoff 
12016917020SGleb Smirnoff /*
12116917020SGleb Smirnoff  * MP-friendly version of ppsratecheck().
12216917020SGleb Smirnoff  *
12316917020SGleb Smirnoff  * Returns non-negative if we are in the rate, negative otherwise.
12416917020SGleb Smirnoff  *  0 - rate limit not reached.
12516917020SGleb Smirnoff  * -1 - rate limit reached.
12616917020SGleb Smirnoff  * >0 - rate limit was reached before, and was just reset. The return value
12716917020SGleb Smirnoff  *      is number of events since last reset.
12816917020SGleb Smirnoff  */
12916917020SGleb Smirnoff int64_t
13016917020SGleb Smirnoff counter_ratecheck(struct counter_rate *cr, int64_t limit)
13116917020SGleb Smirnoff {
13216917020SGleb Smirnoff 	int64_t val;
13316917020SGleb Smirnoff 	int now;
13416917020SGleb Smirnoff 
13516917020SGleb Smirnoff 	val = cr->cr_over;
13616917020SGleb Smirnoff 	now = ticks;
13716917020SGleb Smirnoff 
13895dce07dSGleb Smirnoff 	if ((u_int)(now - cr->cr_ticks) >= hz) {
13916917020SGleb Smirnoff 		/*
14016917020SGleb Smirnoff 		 * Time to clear the structure, we are in the next second.
14116917020SGleb Smirnoff 		 * First try unlocked read, and then proceed with atomic.
14216917020SGleb Smirnoff 		 */
14316917020SGleb Smirnoff 		if ((cr->cr_lock == 0) &&
1445040da77SGleb Smirnoff 		    atomic_cmpset_acq_int(&cr->cr_lock, 0, 1)) {
14516917020SGleb Smirnoff 			/*
14616917020SGleb Smirnoff 			 * Check if other thread has just went through the
14716917020SGleb Smirnoff 			 * reset sequence before us.
14816917020SGleb Smirnoff 			 */
14995dce07dSGleb Smirnoff 			if ((u_int)(now - cr->cr_ticks) >= hz) {
15016917020SGleb Smirnoff 				val = counter_u64_fetch(cr->cr_rate);
15116917020SGleb Smirnoff 				counter_u64_zero(cr->cr_rate);
15216917020SGleb Smirnoff 				cr->cr_over = 0;
15316917020SGleb Smirnoff 				cr->cr_ticks = now;
1541276a836SGleb Smirnoff 				if (val <= limit)
1551276a836SGleb Smirnoff 					val = 0;
15616917020SGleb Smirnoff 			}
15716917020SGleb Smirnoff 			atomic_store_rel_int(&cr->cr_lock, 0);
15816917020SGleb Smirnoff 		} else
15916917020SGleb Smirnoff 			/*
16016917020SGleb Smirnoff 			 * We failed to lock, in this case other thread may
16116917020SGleb Smirnoff 			 * be running counter_u64_zero(), so it is not safe
16216917020SGleb Smirnoff 			 * to do an update, we skip it.
16316917020SGleb Smirnoff 			 */
16416917020SGleb Smirnoff 			return (val);
16516917020SGleb Smirnoff 	}
16616917020SGleb Smirnoff 
16716917020SGleb Smirnoff 	counter_u64_add(cr->cr_rate, 1);
16816917020SGleb Smirnoff 	if (cr->cr_over != 0)
16916917020SGleb Smirnoff 		return (-1);
17016917020SGleb Smirnoff 	if (counter_u64_fetch(cr->cr_rate) > limit)
17116917020SGleb Smirnoff 		val = cr->cr_over = -1;
17216917020SGleb Smirnoff 
17316917020SGleb Smirnoff 	return (val);
17416917020SGleb Smirnoff }
175*fffcb56fSMark Johnston 
176*fffcb56fSMark Johnston void
177*fffcb56fSMark Johnston counter_u64_sysinit(void *arg)
178*fffcb56fSMark Johnston {
179*fffcb56fSMark Johnston 	counter_u64_t *cp;
180*fffcb56fSMark Johnston 
181*fffcb56fSMark Johnston 	cp = arg;
182*fffcb56fSMark Johnston 	*cp = counter_u64_alloc(M_WAITOK);
183*fffcb56fSMark Johnston }
184*fffcb56fSMark Johnston 
185*fffcb56fSMark Johnston void
186*fffcb56fSMark Johnston counter_u64_sysuninit(void *arg)
187*fffcb56fSMark Johnston {
188*fffcb56fSMark Johnston 	counter_u64_t *cp;
189*fffcb56fSMark Johnston 
190*fffcb56fSMark Johnston 	cp = arg;
191*fffcb56fSMark Johnston 	counter_u64_free(*cp);
192*fffcb56fSMark Johnston }
193