xref: /freebsd/sys/arm64/include/counter.h (revision 3b7a388b3e9c19e611f4ff9686fb72bf534cdf79)
1e5acd89cSAndrew Turner /*-
2e5acd89cSAndrew Turner  * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
3e5acd89cSAndrew Turner  * All rights reserved.
4e5acd89cSAndrew Turner  *
5e5acd89cSAndrew Turner  * Redistribution and use in source and binary forms, with or without
6e5acd89cSAndrew Turner  * modification, are permitted provided that the following conditions
7e5acd89cSAndrew Turner  * are met:
8e5acd89cSAndrew Turner  * 1. Redistributions of source code must retain the above copyright
9e5acd89cSAndrew Turner  *    notice, this list of conditions and the following disclaimer.
10e5acd89cSAndrew Turner  * 2. Redistributions in binary form must reproduce the above copyright
11e5acd89cSAndrew Turner  *    notice, this list of conditions and the following disclaimer in the
12e5acd89cSAndrew Turner  *    documentation and/or other materials provided with the distribution.
13e5acd89cSAndrew Turner  *
14e5acd89cSAndrew Turner  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15e5acd89cSAndrew Turner  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16e5acd89cSAndrew Turner  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17e5acd89cSAndrew Turner  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18e5acd89cSAndrew Turner  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19e5acd89cSAndrew Turner  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20e5acd89cSAndrew Turner  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21e5acd89cSAndrew Turner  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22e5acd89cSAndrew Turner  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23e5acd89cSAndrew Turner  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24e5acd89cSAndrew Turner  * SUCH DAMAGE.
25e5acd89cSAndrew Turner  *
26e5acd89cSAndrew Turner  * $FreeBSD$
27e5acd89cSAndrew Turner  */
28e5acd89cSAndrew Turner 
29e5acd89cSAndrew Turner #ifndef _MACHINE_COUNTER_H_
30e5acd89cSAndrew Turner #define	_MACHINE_COUNTER_H_
31e5acd89cSAndrew Turner 
32e5acd89cSAndrew Turner #include <sys/pcpu.h>
33*3b7a388bSKonstantin Belousov #include <machine/atomic.h>
34e5acd89cSAndrew Turner 
35*3b7a388bSKonstantin Belousov #define	counter_enter()	do {} while (0)
36*3b7a388bSKonstantin Belousov #define	counter_exit()	do {} while (0)
37e5acd89cSAndrew Turner 
38e5acd89cSAndrew Turner #ifdef IN_SUBR_COUNTER_C
39e5acd89cSAndrew Turner static inline uint64_t
40e5acd89cSAndrew Turner counter_u64_read_one(uint64_t *p, int cpu)
41e5acd89cSAndrew Turner {
42e5acd89cSAndrew Turner 
43e5acd89cSAndrew Turner 	return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu));
44e5acd89cSAndrew Turner }
45e5acd89cSAndrew Turner 
46e5acd89cSAndrew Turner static inline uint64_t
47e5acd89cSAndrew Turner counter_u64_fetch_inline(uint64_t *p)
48e5acd89cSAndrew Turner {
49e5acd89cSAndrew Turner 	uint64_t r;
50e5acd89cSAndrew Turner 	int i;
51e5acd89cSAndrew Turner 
52e5acd89cSAndrew Turner 	r = 0;
53*3b7a388bSKonstantin Belousov 	CPU_FOREACH(i)
54e5acd89cSAndrew Turner 		r += counter_u64_read_one((uint64_t *)p, i);
55e5acd89cSAndrew Turner 
56e5acd89cSAndrew Turner 	return (r);
57e5acd89cSAndrew Turner }
58e5acd89cSAndrew Turner 
59e5acd89cSAndrew Turner static void
60e5acd89cSAndrew Turner counter_u64_zero_one_cpu(void *arg)
61e5acd89cSAndrew Turner {
62e5acd89cSAndrew Turner 
63e5acd89cSAndrew Turner 	*((uint64_t *)((char *)arg + sizeof(struct pcpu) *
64e5acd89cSAndrew Turner 	    PCPU_GET(cpuid))) = 0;
65e5acd89cSAndrew Turner }
66e5acd89cSAndrew Turner 
67e5acd89cSAndrew Turner static inline void
68e5acd89cSAndrew Turner counter_u64_zero_inline(counter_u64_t c)
69e5acd89cSAndrew Turner {
70e5acd89cSAndrew Turner 
71e5acd89cSAndrew Turner 	smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu,
72e5acd89cSAndrew Turner 	    smp_no_rendevous_barrier, c);
73e5acd89cSAndrew Turner }
74e5acd89cSAndrew Turner #endif
75e5acd89cSAndrew Turner 
76*3b7a388bSKonstantin Belousov #define	counter_u64_add_protected(c, inc)	counter_u64_add(c, inc)
77e5acd89cSAndrew Turner 
78e5acd89cSAndrew Turner static inline void
79e5acd89cSAndrew Turner counter_u64_add(counter_u64_t c, int64_t inc)
80e5acd89cSAndrew Turner {
81e5acd89cSAndrew Turner 
82*3b7a388bSKonstantin Belousov 	atomic_add_64((uint64_t *)zpcpu_get(c), inc);
83e5acd89cSAndrew Turner }
84e5acd89cSAndrew Turner 
85e5acd89cSAndrew Turner #endif	/* ! _MACHINE_COUNTER_H_ */
86