xref: /freebsd/sys/arm64/include/counter.h (revision e5acd89c78481f4d339bc7d74661604163fda433)
1*e5acd89cSAndrew Turner /*-
2*e5acd89cSAndrew Turner  * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
3*e5acd89cSAndrew Turner  * All rights reserved.
4*e5acd89cSAndrew Turner  *
5*e5acd89cSAndrew Turner  * Redistribution and use in source and binary forms, with or without
6*e5acd89cSAndrew Turner  * modification, are permitted provided that the following conditions
7*e5acd89cSAndrew Turner  * are met:
8*e5acd89cSAndrew Turner  * 1. Redistributions of source code must retain the above copyright
9*e5acd89cSAndrew Turner  *    notice, this list of conditions and the following disclaimer.
10*e5acd89cSAndrew Turner  * 2. Redistributions in binary form must reproduce the above copyright
11*e5acd89cSAndrew Turner  *    notice, this list of conditions and the following disclaimer in the
12*e5acd89cSAndrew Turner  *    documentation and/or other materials provided with the distribution.
13*e5acd89cSAndrew Turner  *
14*e5acd89cSAndrew Turner  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15*e5acd89cSAndrew Turner  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16*e5acd89cSAndrew Turner  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17*e5acd89cSAndrew Turner  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18*e5acd89cSAndrew Turner  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19*e5acd89cSAndrew Turner  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20*e5acd89cSAndrew Turner  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21*e5acd89cSAndrew Turner  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22*e5acd89cSAndrew Turner  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23*e5acd89cSAndrew Turner  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24*e5acd89cSAndrew Turner  * SUCH DAMAGE.
25*e5acd89cSAndrew Turner  *
26*e5acd89cSAndrew Turner  * $FreeBSD$
27*e5acd89cSAndrew Turner  */
28*e5acd89cSAndrew Turner 
29*e5acd89cSAndrew Turner #ifndef _MACHINE_COUNTER_H_
30*e5acd89cSAndrew Turner #define	_MACHINE_COUNTER_H_
31*e5acd89cSAndrew Turner 
32*e5acd89cSAndrew Turner #include <sys/pcpu.h>
33*e5acd89cSAndrew Turner #ifdef INVARIANTS
34*e5acd89cSAndrew Turner #include <sys/proc.h>
35*e5acd89cSAndrew Turner #endif
36*e5acd89cSAndrew Turner 
37*e5acd89cSAndrew Turner #define	counter_enter()	critical_enter()
38*e5acd89cSAndrew Turner #define	counter_exit()	critical_exit()
39*e5acd89cSAndrew Turner 
40*e5acd89cSAndrew Turner #ifdef IN_SUBR_COUNTER_C
41*e5acd89cSAndrew Turner static inline uint64_t
42*e5acd89cSAndrew Turner counter_u64_read_one(uint64_t *p, int cpu)
43*e5acd89cSAndrew Turner {
44*e5acd89cSAndrew Turner 
45*e5acd89cSAndrew Turner 	return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu));
46*e5acd89cSAndrew Turner }
47*e5acd89cSAndrew Turner 
48*e5acd89cSAndrew Turner static inline uint64_t
49*e5acd89cSAndrew Turner counter_u64_fetch_inline(uint64_t *p)
50*e5acd89cSAndrew Turner {
51*e5acd89cSAndrew Turner 	uint64_t r;
52*e5acd89cSAndrew Turner 	int i;
53*e5acd89cSAndrew Turner 
54*e5acd89cSAndrew Turner 	r = 0;
55*e5acd89cSAndrew Turner 	for (i = 0; i < mp_ncpus; i++)
56*e5acd89cSAndrew Turner 		r += counter_u64_read_one((uint64_t *)p, i);
57*e5acd89cSAndrew Turner 
58*e5acd89cSAndrew Turner 	return (r);
59*e5acd89cSAndrew Turner }
60*e5acd89cSAndrew Turner 
61*e5acd89cSAndrew Turner /* XXXKIB might interrupt increment */
62*e5acd89cSAndrew Turner static void
63*e5acd89cSAndrew Turner counter_u64_zero_one_cpu(void *arg)
64*e5acd89cSAndrew Turner {
65*e5acd89cSAndrew Turner 
66*e5acd89cSAndrew Turner 	*((uint64_t *)((char *)arg + sizeof(struct pcpu) *
67*e5acd89cSAndrew Turner 	    PCPU_GET(cpuid))) = 0;
68*e5acd89cSAndrew Turner }
69*e5acd89cSAndrew Turner 
70*e5acd89cSAndrew Turner static inline void
71*e5acd89cSAndrew Turner counter_u64_zero_inline(counter_u64_t c)
72*e5acd89cSAndrew Turner {
73*e5acd89cSAndrew Turner 
74*e5acd89cSAndrew Turner 	smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu,
75*e5acd89cSAndrew Turner 	    smp_no_rendevous_barrier, c);
76*e5acd89cSAndrew Turner }
77*e5acd89cSAndrew Turner #endif
78*e5acd89cSAndrew Turner 
79*e5acd89cSAndrew Turner #define	counter_u64_add_protected(c, inc)	do {	\
80*e5acd89cSAndrew Turner 	CRITICAL_ASSERT(curthread);			\
81*e5acd89cSAndrew Turner 	*(uint64_t *)zpcpu_get(c) += (inc);		\
82*e5acd89cSAndrew Turner } while (0)
83*e5acd89cSAndrew Turner 
84*e5acd89cSAndrew Turner static inline void
85*e5acd89cSAndrew Turner counter_u64_add(counter_u64_t c, int64_t inc)
86*e5acd89cSAndrew Turner {
87*e5acd89cSAndrew Turner 
88*e5acd89cSAndrew Turner 	counter_enter();
89*e5acd89cSAndrew Turner 	counter_u64_add_protected(c, inc);
90*e5acd89cSAndrew Turner 	counter_exit();
91*e5acd89cSAndrew Turner }
92*e5acd89cSAndrew Turner 
93*e5acd89cSAndrew Turner #endif	/* ! _MACHINE_COUNTER_H_ */
94