1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/proc.h> 38 #include <sys/sched.h> 39 #include <sys/smp.h> 40 #include <sys/sysctl.h> 41 #include <vm/uma.h> 42 43 #define IN_SUBR_COUNTER_C 44 #include <sys/counter.h> 45 46 static void 47 counter_u64_zero_sync(counter_u64_t c) 48 { 49 int cpu; 50 51 CPU_FOREACH(cpu) 52 *(uint64_t*)zpcpu_get_cpu(c, cpu) = 0; 53 } 54 55 void 56 counter_u64_zero(counter_u64_t c) 57 { 58 59 counter_u64_zero_inline(c); 60 } 61 62 uint64_t 63 counter_u64_fetch(counter_u64_t c) 64 { 65 66 return (counter_u64_fetch_inline(c)); 67 } 68 69 counter_u64_t 70 counter_u64_alloc(int flags) 71 { 72 counter_u64_t r; 73 74 r = uma_zalloc_pcpu(pcpu_zone_64, flags); 75 if (r != NULL) 76 counter_u64_zero_sync(r); 77 78 return (r); 79 } 80 81 void 82 counter_u64_free(counter_u64_t c) 83 { 84 85 uma_zfree_pcpu(pcpu_zone_64, c); 86 } 87 88 int 89 sysctl_handle_counter_u64(SYSCTL_HANDLER_ARGS) 90 { 91 uint64_t out; 92 int error; 93 94 out = counter_u64_fetch(*(counter_u64_t *)arg1); 95 96 error = SYSCTL_OUT(req, &out, sizeof(uint64_t)); 97 98 if (error || !req->newptr) 99 return (error); 100 101 /* 102 * Any write attempt to a counter zeroes it. 103 */ 104 counter_u64_zero(*(counter_u64_t *)arg1); 105 106 return (0); 107 } 108 109 int 110 sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS) 111 { 112 uint64_t *out; 113 int error; 114 115 out = malloc(arg2 * sizeof(uint64_t), M_TEMP, M_WAITOK); 116 for (int i = 0; i < arg2; i++) 117 out[i] = counter_u64_fetch(((counter_u64_t *)arg1)[i]); 118 119 error = SYSCTL_OUT(req, out, arg2 * sizeof(uint64_t)); 120 free(out, M_TEMP); 121 122 if (error || !req->newptr) 123 return (error); 124 125 /* 126 * Any write attempt to a counter zeroes it. 127 */ 128 for (int i = 0; i < arg2; i++) 129 counter_u64_zero(((counter_u64_t *)arg1)[i]); 130 131 return (0); 132 } 133 134 /* 135 * MP-friendly version of ppsratecheck(). 136 * 137 * Returns non-negative if we are in the rate, negative otherwise. 138 * 0 - rate limit not reached. 139 * -1 - rate limit reached. 140 * >0 - rate limit was reached before, and was just reset. The return value 141 * is number of events since last reset. 142 */ 143 int64_t 144 counter_ratecheck(struct counter_rate *cr, int64_t limit) 145 { 146 int64_t val; 147 int now; 148 149 val = cr->cr_over; 150 now = ticks; 151 152 if ((u_int)(now - cr->cr_ticks) >= hz) { 153 /* 154 * Time to clear the structure, we are in the next second. 155 * First try unlocked read, and then proceed with atomic. 156 */ 157 if ((cr->cr_lock == 0) && 158 atomic_cmpset_acq_int(&cr->cr_lock, 0, 1)) { 159 /* 160 * Check if other thread has just went through the 161 * reset sequence before us. 162 */ 163 if ((u_int)(now - cr->cr_ticks) >= hz) { 164 val = counter_u64_fetch(cr->cr_rate); 165 counter_u64_zero(cr->cr_rate); 166 cr->cr_over = 0; 167 cr->cr_ticks = now; 168 if (val <= limit) 169 val = 0; 170 } 171 atomic_store_rel_int(&cr->cr_lock, 0); 172 } else 173 /* 174 * We failed to lock, in this case other thread may 175 * be running counter_u64_zero(), so it is not safe 176 * to do an update, we skip it. 177 */ 178 return (val); 179 } 180 181 counter_u64_add(cr->cr_rate, 1); 182 if (cr->cr_over != 0) 183 return (-1); 184 if (counter_u64_fetch(cr->cr_rate) > limit) 185 val = cr->cr_over = -1; 186 187 return (val); 188 } 189