xref: /freebsd/sys/powerpc/include/counter.h (revision 71e3c3083b47ad0f04322c5a1173377433c05a6e)
14e76af6aSGleb Smirnoff /*-
2*71e3c308SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*71e3c308SPedro F. Giffuni  *
44e76af6aSGleb Smirnoff  * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org>
54e76af6aSGleb Smirnoff  * All rights reserved.
64e76af6aSGleb Smirnoff  *
74e76af6aSGleb Smirnoff  * Redistribution and use in source and binary forms, with or without
84e76af6aSGleb Smirnoff  * modification, are permitted provided that the following conditions
94e76af6aSGleb Smirnoff  * are met:
104e76af6aSGleb Smirnoff  * 1. Redistributions of source code must retain the above copyright
114e76af6aSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer.
124e76af6aSGleb Smirnoff  * 2. Redistributions in binary form must reproduce the above copyright
134e76af6aSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer in the
144e76af6aSGleb Smirnoff  *    documentation and/or other materials provided with the distribution.
154e76af6aSGleb Smirnoff  *
164e76af6aSGleb Smirnoff  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
174e76af6aSGleb Smirnoff  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184e76af6aSGleb Smirnoff  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194e76af6aSGleb Smirnoff  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
204e76af6aSGleb Smirnoff  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214e76af6aSGleb Smirnoff  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224e76af6aSGleb Smirnoff  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234e76af6aSGleb Smirnoff  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244e76af6aSGleb Smirnoff  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254e76af6aSGleb Smirnoff  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264e76af6aSGleb Smirnoff  * SUCH DAMAGE.
274e76af6aSGleb Smirnoff  *
284e76af6aSGleb Smirnoff  * $FreeBSD$
294e76af6aSGleb Smirnoff  */
304e76af6aSGleb Smirnoff 
314e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__
324e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__
334e76af6aSGleb Smirnoff 
344e76af6aSGleb Smirnoff #include <sys/pcpu.h>
354e76af6aSGleb Smirnoff #ifdef INVARIANTS
364e76af6aSGleb Smirnoff #include <sys/proc.h>
374e76af6aSGleb Smirnoff #endif
384e76af6aSGleb Smirnoff 
3983c9dea1SGleb Smirnoff extern struct pcpu __pcpu[];
4083c9dea1SGleb Smirnoff 
4183c9dea1SGleb Smirnoff #define	EARLY_COUNTER	&__pcpu[0].pc_early_dummy_counter
4283c9dea1SGleb Smirnoff 
43b8b46489SNathan Whitehorn #ifdef __powerpc64__
444e76af6aSGleb Smirnoff 
454e76af6aSGleb Smirnoff #define	counter_enter()	do {} while (0)
464e76af6aSGleb Smirnoff #define	counter_exit()	do {} while (0)
474e76af6aSGleb Smirnoff 
4870a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C
4970a7dd5dSKonstantin Belousov static inline uint64_t
5070a7dd5dSKonstantin Belousov counter_u64_read_one(uint64_t *p, int cpu)
5170a7dd5dSKonstantin Belousov {
5270a7dd5dSKonstantin Belousov 
5370a7dd5dSKonstantin Belousov 	return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu));
5470a7dd5dSKonstantin Belousov }
5570a7dd5dSKonstantin Belousov 
5670a7dd5dSKonstantin Belousov static inline uint64_t
5770a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p)
5870a7dd5dSKonstantin Belousov {
5970a7dd5dSKonstantin Belousov 	uint64_t r;
6070a7dd5dSKonstantin Belousov 	int i;
6170a7dd5dSKonstantin Belousov 
6270a7dd5dSKonstantin Belousov 	r = 0;
6396c85efbSNathan Whitehorn 	CPU_FOREACH(i)
6470a7dd5dSKonstantin Belousov 		r += counter_u64_read_one((uint64_t *)p, i);
6570a7dd5dSKonstantin Belousov 
6670a7dd5dSKonstantin Belousov 	return (r);
6770a7dd5dSKonstantin Belousov }
6870a7dd5dSKonstantin Belousov 
6970a7dd5dSKonstantin Belousov static void
7070a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg)
7170a7dd5dSKonstantin Belousov {
7270a7dd5dSKonstantin Belousov 
7370a7dd5dSKonstantin Belousov 	*((uint64_t *)((char *)arg + sizeof(struct pcpu) *
7470a7dd5dSKonstantin Belousov 	    PCPU_GET(cpuid))) = 0;
7570a7dd5dSKonstantin Belousov }
7670a7dd5dSKonstantin Belousov 
7770a7dd5dSKonstantin Belousov static inline void
7870a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c)
7970a7dd5dSKonstantin Belousov {
8070a7dd5dSKonstantin Belousov 
8167d955aaSPatrick Kelsey 	smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
8267d955aaSPatrick Kelsey 	    smp_no_rendezvous_barrier, c);
8370a7dd5dSKonstantin Belousov }
8470a7dd5dSKonstantin Belousov #endif
8570a7dd5dSKonstantin Belousov 
864e76af6aSGleb Smirnoff #define	counter_u64_add_protected(c, i)	counter_u64_add(c, i)
874e76af6aSGleb Smirnoff 
884e76af6aSGleb Smirnoff static inline void
894e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc)
904e76af6aSGleb Smirnoff {
914e76af6aSGleb Smirnoff 	uint64_t ccpu, old;
924e76af6aSGleb Smirnoff 
934e76af6aSGleb Smirnoff 	__asm __volatile("\n"
944e76af6aSGleb Smirnoff       "1:\n\t"
954e76af6aSGleb Smirnoff 	    "mfsprg	%0, 0\n\t"
964e76af6aSGleb Smirnoff 	    "ldarx	%1, %0, %2\n\t"
974e76af6aSGleb Smirnoff 	    "add	%1, %1, %3\n\t"
984e76af6aSGleb Smirnoff 	    "stdcx.	%1, %0, %2\n\t"
994e76af6aSGleb Smirnoff 	    "bne-	1b"
1004e76af6aSGleb Smirnoff 	    : "=&b" (ccpu), "=&r" (old)
1014e76af6aSGleb Smirnoff 	    : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc)
102181ca73bSJustin Hibbits 	    : "cr0", "memory");
1034e76af6aSGleb Smirnoff }
1044e76af6aSGleb Smirnoff 
105b8b46489SNathan Whitehorn #else	/* !64bit */
1064e76af6aSGleb Smirnoff 
1074e76af6aSGleb Smirnoff #define	counter_enter()	critical_enter()
1084e76af6aSGleb Smirnoff #define	counter_exit()	critical_exit()
1094e76af6aSGleb Smirnoff 
11070a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C
11170a7dd5dSKonstantin Belousov /* XXXKIB non-atomic 64bit read */
11270a7dd5dSKonstantin Belousov static inline uint64_t
11370a7dd5dSKonstantin Belousov counter_u64_read_one(uint64_t *p, int cpu)
11470a7dd5dSKonstantin Belousov {
11570a7dd5dSKonstantin Belousov 
11670a7dd5dSKonstantin Belousov 	return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu));
11770a7dd5dSKonstantin Belousov }
11870a7dd5dSKonstantin Belousov 
11970a7dd5dSKonstantin Belousov static inline uint64_t
12070a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p)
12170a7dd5dSKonstantin Belousov {
12270a7dd5dSKonstantin Belousov 	uint64_t r;
12370a7dd5dSKonstantin Belousov 	int i;
12470a7dd5dSKonstantin Belousov 
12570a7dd5dSKonstantin Belousov 	r = 0;
12670a7dd5dSKonstantin Belousov 	for (i = 0; i < mp_ncpus; i++)
12770a7dd5dSKonstantin Belousov 		r += counter_u64_read_one((uint64_t *)p, i);
12870a7dd5dSKonstantin Belousov 
12970a7dd5dSKonstantin Belousov 	return (r);
13070a7dd5dSKonstantin Belousov }
13170a7dd5dSKonstantin Belousov 
13270a7dd5dSKonstantin Belousov /* XXXKIB non-atomic 64bit store, might interrupt increment */
13370a7dd5dSKonstantin Belousov static void
13470a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg)
13570a7dd5dSKonstantin Belousov {
13670a7dd5dSKonstantin Belousov 
13770a7dd5dSKonstantin Belousov 	*((uint64_t *)((char *)arg + sizeof(struct pcpu) *
13870a7dd5dSKonstantin Belousov 	    PCPU_GET(cpuid))) = 0;
13970a7dd5dSKonstantin Belousov }
14070a7dd5dSKonstantin Belousov 
14170a7dd5dSKonstantin Belousov static inline void
14270a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c)
14370a7dd5dSKonstantin Belousov {
14470a7dd5dSKonstantin Belousov 
14567d955aaSPatrick Kelsey 	smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
14667d955aaSPatrick Kelsey 	    smp_no_rendezvous_barrier, c);
14770a7dd5dSKonstantin Belousov }
14870a7dd5dSKonstantin Belousov #endif
14970a7dd5dSKonstantin Belousov 
1504e76af6aSGleb Smirnoff #define	counter_u64_add_protected(c, inc)	do {	\
1514e76af6aSGleb Smirnoff 	CRITICAL_ASSERT(curthread);			\
1524e76af6aSGleb Smirnoff 	*(uint64_t *)zpcpu_get(c) += (inc);		\
1534e76af6aSGleb Smirnoff } while (0)
1544e76af6aSGleb Smirnoff 
1554e76af6aSGleb Smirnoff static inline void
1564e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc)
1574e76af6aSGleb Smirnoff {
1584e76af6aSGleb Smirnoff 
1594e76af6aSGleb Smirnoff 	counter_enter();
1604e76af6aSGleb Smirnoff 	counter_u64_add_protected(c, inc);
1614e76af6aSGleb Smirnoff 	counter_exit();
1624e76af6aSGleb Smirnoff }
1634e76af6aSGleb Smirnoff 
164b8b46489SNathan Whitehorn #endif	/* 64bit */
1654e76af6aSGleb Smirnoff 
1664e76af6aSGleb Smirnoff #endif	/* ! __MACHINE_COUNTER_H__ */
167