xref: /freebsd/sys/amd64/include/counter.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
14e76af6aSGleb Smirnoff /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3c49761ddSPedro F. Giffuni  *
44e76af6aSGleb Smirnoff  * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
54e76af6aSGleb Smirnoff  * All rights reserved.
64e76af6aSGleb Smirnoff  *
74e76af6aSGleb Smirnoff  * Redistribution and use in source and binary forms, with or without
84e76af6aSGleb Smirnoff  * modification, are permitted provided that the following conditions
94e76af6aSGleb Smirnoff  * are met:
104e76af6aSGleb Smirnoff  * 1. Redistributions of source code must retain the above copyright
114e76af6aSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer.
124e76af6aSGleb Smirnoff  * 2. Redistributions in binary form must reproduce the above copyright
134e76af6aSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer in the
144e76af6aSGleb Smirnoff  *    documentation and/or other materials provided with the distribution.
154e76af6aSGleb Smirnoff  *
164e76af6aSGleb Smirnoff  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
174e76af6aSGleb Smirnoff  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184e76af6aSGleb Smirnoff  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194e76af6aSGleb Smirnoff  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
204e76af6aSGleb Smirnoff  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214e76af6aSGleb Smirnoff  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224e76af6aSGleb Smirnoff  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234e76af6aSGleb Smirnoff  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244e76af6aSGleb Smirnoff  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254e76af6aSGleb Smirnoff  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264e76af6aSGleb Smirnoff  * SUCH DAMAGE.
274e76af6aSGleb Smirnoff  */
284e76af6aSGleb Smirnoff 
298dc3fdfeSBrooks Davis #ifdef __i386__
308dc3fdfeSBrooks Davis #include <i386/counter.h>
318dc3fdfeSBrooks Davis #else /* !__i386__ */
328dc3fdfeSBrooks Davis 
334e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__
344e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__
354e76af6aSGleb Smirnoff 
364e76af6aSGleb Smirnoff #include <sys/pcpu.h>
37*208fcb55SKristof Provost #include <sys/kassert.h>
384e76af6aSGleb Smirnoff 
39fb886947SMateusz Guzik #define	EARLY_COUNTER	(void *)__offsetof(struct pcpu, pc_early_dummy_counter)
404e76af6aSGleb Smirnoff 
414e76af6aSGleb Smirnoff #define	counter_enter()	do {} while (0)
424e76af6aSGleb Smirnoff #define	counter_exit()	do {} while (0)
434e76af6aSGleb Smirnoff 
4470a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C
4570a7dd5dSKonstantin Belousov static inline uint64_t
counter_u64_read_one(counter_u64_t c,int cpu)46e2b81f51SMateusz Guzik counter_u64_read_one(counter_u64_t c, int cpu)
4770a7dd5dSKonstantin Belousov {
4870a7dd5dSKonstantin Belousov 
49fb886947SMateusz Guzik 	MPASS(c != EARLY_COUNTER);
50e2b81f51SMateusz Guzik 	return (*zpcpu_get_cpu(c, cpu));
5170a7dd5dSKonstantin Belousov }
5270a7dd5dSKonstantin Belousov 
5370a7dd5dSKonstantin Belousov static inline uint64_t
counter_u64_fetch_inline(uint64_t * c)54e2b81f51SMateusz Guzik counter_u64_fetch_inline(uint64_t *c)
5570a7dd5dSKonstantin Belousov {
5670a7dd5dSKonstantin Belousov 	uint64_t r;
57e2b81f51SMateusz Guzik 	int cpu;
5870a7dd5dSKonstantin Belousov 
5970a7dd5dSKonstantin Belousov 	r = 0;
60e2b81f51SMateusz Guzik 	CPU_FOREACH(cpu)
61e2b81f51SMateusz Guzik 		r += counter_u64_read_one(c, cpu);
6270a7dd5dSKonstantin Belousov 
6370a7dd5dSKonstantin Belousov 	return (r);
6470a7dd5dSKonstantin Belousov }
6570a7dd5dSKonstantin Belousov 
6670a7dd5dSKonstantin Belousov static void
counter_u64_zero_one_cpu(void * arg)6770a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg)
6870a7dd5dSKonstantin Belousov {
69e2b81f51SMateusz Guzik 	counter_u64_t c;
7070a7dd5dSKonstantin Belousov 
71e2b81f51SMateusz Guzik 	c = arg;
72fb886947SMateusz Guzik 	MPASS(c != EARLY_COUNTER);
73e2b81f51SMateusz Guzik 	*(zpcpu_get(c)) = 0;
7470a7dd5dSKonstantin Belousov }
7570a7dd5dSKonstantin Belousov 
7670a7dd5dSKonstantin Belousov static inline void
counter_u64_zero_inline(counter_u64_t c)7770a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c)
7870a7dd5dSKonstantin Belousov {
7970a7dd5dSKonstantin Belousov 
8067d955aaSPatrick Kelsey 	smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
8167d955aaSPatrick Kelsey 	    smp_no_rendezvous_barrier, c);
8270a7dd5dSKonstantin Belousov }
8370a7dd5dSKonstantin Belousov #endif
8470a7dd5dSKonstantin Belousov 
854e76af6aSGleb Smirnoff #define	counter_u64_add_protected(c, i)	counter_u64_add(c, i)
864e76af6aSGleb Smirnoff 
874e76af6aSGleb Smirnoff static inline void
counter_u64_add(counter_u64_t c,int64_t inc)884e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc)
894e76af6aSGleb Smirnoff {
9053dec71dSKonstantin Belousov 
9113189065SKonstantin Belousov 	KASSERT(IS_BSP() || c != EARLY_COUNTER, ("EARLY_COUNTER used on AP"));
922318ed25SMateusz Guzik 	zpcpu_add(c, inc);
934e76af6aSGleb Smirnoff }
944e76af6aSGleb Smirnoff 
954e76af6aSGleb Smirnoff #endif	/* ! __MACHINE_COUNTER_H__ */
968dc3fdfeSBrooks Davis 
978dc3fdfeSBrooks Davis #endif /* __i386__ */
98