1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/sched.h>
36 #include <sys/smp.h>
37 #include <sys/sysctl.h>
38 #include <vm/uma.h>
39
40 #define IN_SUBR_COUNTER_C
41 #include <sys/counter.h>
42
43 void
counter_u64_zero(counter_u64_t c)44 counter_u64_zero(counter_u64_t c)
45 {
46
47 counter_u64_zero_inline(c);
48 }
49
50 uint64_t
counter_u64_fetch(counter_u64_t c)51 counter_u64_fetch(counter_u64_t c)
52 {
53
54 return (counter_u64_fetch_inline(c));
55 }
56
57 counter_u64_t
counter_u64_alloc(int flags)58 counter_u64_alloc(int flags)
59 {
60
61 return (uma_zalloc_pcpu(pcpu_zone_8, flags | M_ZERO));
62 }
63
64 void
counter_u64_free(counter_u64_t c)65 counter_u64_free(counter_u64_t c)
66 {
67
68 uma_zfree_pcpu(pcpu_zone_8, c);
69 }
70
71 int
sysctl_handle_counter_u64(SYSCTL_HANDLER_ARGS)72 sysctl_handle_counter_u64(SYSCTL_HANDLER_ARGS)
73 {
74 uint64_t out;
75 int error;
76
77 out = counter_u64_fetch(*(counter_u64_t *)arg1);
78
79 error = SYSCTL_OUT(req, &out, sizeof(uint64_t));
80
81 if (error || !req->newptr)
82 return (error);
83
84 /*
85 * Any write attempt to a counter zeroes it.
86 */
87 counter_u64_zero(*(counter_u64_t *)arg1);
88
89 return (0);
90 }
91
92 int
sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS)93 sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS)
94 {
95 uint64_t *out;
96 int error;
97
98 out = malloc(arg2 * sizeof(uint64_t), M_TEMP, M_WAITOK);
99 for (int i = 0; i < arg2; i++)
100 out[i] = counter_u64_fetch(((counter_u64_t *)arg1)[i]);
101
102 error = SYSCTL_OUT(req, out, arg2 * sizeof(uint64_t));
103 free(out, M_TEMP);
104
105 if (error || !req->newptr)
106 return (error);
107
108 /*
109 * Any write attempt to a counter zeroes it.
110 */
111 for (int i = 0; i < arg2; i++)
112 counter_u64_zero(((counter_u64_t *)arg1)[i]);
113
114 return (0);
115 }
116
117 /*
118 * MP-friendly version of ppsratecheck().
119 *
120 * Returns non-negative if we are in the rate, negative otherwise.
121 * 0 - rate limit not reached.
122 * -1 - rate limit reached.
123 * >0 - rate limit was reached before, and was just reset. The return value
124 * is number of events since last reset.
125 */
126 int64_t
counter_ratecheck(struct counter_rate * cr,int64_t limit)127 counter_ratecheck(struct counter_rate *cr, int64_t limit)
128 {
129 int64_t val;
130 int now;
131
132 val = cr->cr_over;
133 now = ticks;
134
135 if ((u_int)(now - cr->cr_ticks) >= hz) {
136 /*
137 * Time to clear the structure, we are in the next second.
138 * First try unlocked read, and then proceed with atomic.
139 */
140 if ((cr->cr_lock == 0) &&
141 atomic_cmpset_acq_int(&cr->cr_lock, 0, 1)) {
142 /*
143 * Check if other thread has just went through the
144 * reset sequence before us.
145 */
146 if ((u_int)(now - cr->cr_ticks) >= hz) {
147 val = counter_u64_fetch(cr->cr_rate);
148 counter_u64_zero(cr->cr_rate);
149 cr->cr_over = 0;
150 cr->cr_ticks = now;
151 if (val <= limit)
152 val = 0;
153 }
154 atomic_store_rel_int(&cr->cr_lock, 0);
155 } else
156 /*
157 * We failed to lock, in this case other thread may
158 * be running counter_u64_zero(), so it is not safe
159 * to do an update, we skip it.
160 */
161 return (val);
162 }
163
164 counter_u64_add(cr->cr_rate, 1);
165 if (cr->cr_over != 0)
166 return (-1);
167 if (counter_u64_fetch(cr->cr_rate) > limit)
168 val = cr->cr_over = -1;
169
170 return (val);
171 }
172
173 void
counter_u64_sysinit(void * arg)174 counter_u64_sysinit(void *arg)
175 {
176 counter_u64_t *cp;
177
178 cp = arg;
179 *cp = counter_u64_alloc(M_WAITOK);
180 }
181
182 void
counter_u64_sysuninit(void * arg)183 counter_u64_sysuninit(void *arg)
184 {
185 counter_u64_t *cp;
186
187 cp = arg;
188 counter_u64_free(*cp);
189 }
190