14e76af6aSGleb Smirnoff /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
371e3c308SPedro F. Giffuni *
44e76af6aSGleb Smirnoff * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org>
54e76af6aSGleb Smirnoff * All rights reserved.
64e76af6aSGleb Smirnoff *
74e76af6aSGleb Smirnoff * Redistribution and use in source and binary forms, with or without
84e76af6aSGleb Smirnoff * modification, are permitted provided that the following conditions
94e76af6aSGleb Smirnoff * are met:
104e76af6aSGleb Smirnoff * 1. Redistributions of source code must retain the above copyright
114e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer.
124e76af6aSGleb Smirnoff * 2. Redistributions in binary form must reproduce the above copyright
134e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer in the
144e76af6aSGleb Smirnoff * documentation and/or other materials provided with the distribution.
154e76af6aSGleb Smirnoff *
164e76af6aSGleb Smirnoff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
174e76af6aSGleb Smirnoff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184e76af6aSGleb Smirnoff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194e76af6aSGleb Smirnoff * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
204e76af6aSGleb Smirnoff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214e76af6aSGleb Smirnoff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
224e76af6aSGleb Smirnoff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234e76af6aSGleb Smirnoff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244e76af6aSGleb Smirnoff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254e76af6aSGleb Smirnoff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264e76af6aSGleb Smirnoff * SUCH DAMAGE.
274e76af6aSGleb Smirnoff */
284e76af6aSGleb Smirnoff
294e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__
304e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__
314e76af6aSGleb Smirnoff
324e76af6aSGleb Smirnoff #include <sys/pcpu.h>
334e76af6aSGleb Smirnoff #ifdef INVARIANTS
344e76af6aSGleb Smirnoff #include <sys/proc.h>
354e76af6aSGleb Smirnoff #endif
364e76af6aSGleb Smirnoff
3783c9dea1SGleb Smirnoff #define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter
3883c9dea1SGleb Smirnoff
39b8b46489SNathan Whitehorn #ifdef __powerpc64__
404e76af6aSGleb Smirnoff
414e76af6aSGleb Smirnoff #define counter_enter() do {} while (0)
424e76af6aSGleb Smirnoff #define counter_exit() do {} while (0)
434e76af6aSGleb Smirnoff
4470a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C
4570a7dd5dSKonstantin Belousov static inline uint64_t
counter_u64_read_one(uint64_t * p,int cpu)4670a7dd5dSKonstantin Belousov counter_u64_read_one(uint64_t *p, int cpu)
4770a7dd5dSKonstantin Belousov {
4870a7dd5dSKonstantin Belousov
49ab3059a8SMatt Macy return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
5070a7dd5dSKonstantin Belousov }
5170a7dd5dSKonstantin Belousov
5270a7dd5dSKonstantin Belousov static inline uint64_t
counter_u64_fetch_inline(uint64_t * p)5370a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p)
5470a7dd5dSKonstantin Belousov {
5570a7dd5dSKonstantin Belousov uint64_t r;
5670a7dd5dSKonstantin Belousov int i;
5770a7dd5dSKonstantin Belousov
5870a7dd5dSKonstantin Belousov r = 0;
5996c85efbSNathan Whitehorn CPU_FOREACH(i)
6070a7dd5dSKonstantin Belousov r += counter_u64_read_one((uint64_t *)p, i);
6170a7dd5dSKonstantin Belousov
6270a7dd5dSKonstantin Belousov return (r);
6370a7dd5dSKonstantin Belousov }
6470a7dd5dSKonstantin Belousov
6570a7dd5dSKonstantin Belousov static void
counter_u64_zero_one_cpu(void * arg)6670a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg)
6770a7dd5dSKonstantin Belousov {
6870a7dd5dSKonstantin Belousov
69ab3059a8SMatt Macy *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
7070a7dd5dSKonstantin Belousov PCPU_GET(cpuid))) = 0;
7170a7dd5dSKonstantin Belousov }
7270a7dd5dSKonstantin Belousov
7370a7dd5dSKonstantin Belousov static inline void
counter_u64_zero_inline(counter_u64_t c)7470a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c)
7570a7dd5dSKonstantin Belousov {
7670a7dd5dSKonstantin Belousov
7767d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
7867d955aaSPatrick Kelsey smp_no_rendezvous_barrier, c);
7970a7dd5dSKonstantin Belousov }
8070a7dd5dSKonstantin Belousov #endif
8170a7dd5dSKonstantin Belousov
824e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, i) counter_u64_add(c, i)
834e76af6aSGleb Smirnoff
844e76af6aSGleb Smirnoff static inline void
counter_u64_add(counter_u64_t c,int64_t inc)854e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc)
864e76af6aSGleb Smirnoff {
874e76af6aSGleb Smirnoff uint64_t ccpu, old;
884e76af6aSGleb Smirnoff
894e76af6aSGleb Smirnoff __asm __volatile("\n"
904e76af6aSGleb Smirnoff "1:\n\t"
914e76af6aSGleb Smirnoff "mfsprg %0, 0\n\t"
924e76af6aSGleb Smirnoff "ldarx %1, %0, %2\n\t"
934e76af6aSGleb Smirnoff "add %1, %1, %3\n\t"
944e76af6aSGleb Smirnoff "stdcx. %1, %0, %2\n\t"
954e76af6aSGleb Smirnoff "bne- 1b"
964e76af6aSGleb Smirnoff : "=&b" (ccpu), "=&r" (old)
974e76af6aSGleb Smirnoff : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc)
98181ca73bSJustin Hibbits : "cr0", "memory");
994e76af6aSGleb Smirnoff }
1004e76af6aSGleb Smirnoff
101b8b46489SNathan Whitehorn #else /* !64bit */
1024e76af6aSGleb Smirnoff
103*d14c38ceSMark Johnston #include <sys/systm.h>
104*d14c38ceSMark Johnston
1054e76af6aSGleb Smirnoff #define counter_enter() critical_enter()
1064e76af6aSGleb Smirnoff #define counter_exit() critical_exit()
1074e76af6aSGleb Smirnoff
10870a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C
10970a7dd5dSKonstantin Belousov /* XXXKIB non-atomic 64bit read */
11070a7dd5dSKonstantin Belousov static inline uint64_t
counter_u64_read_one(uint64_t * p,int cpu)11170a7dd5dSKonstantin Belousov counter_u64_read_one(uint64_t *p, int cpu)
11270a7dd5dSKonstantin Belousov {
11370a7dd5dSKonstantin Belousov
114ab3059a8SMatt Macy return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
11570a7dd5dSKonstantin Belousov }
11670a7dd5dSKonstantin Belousov
11770a7dd5dSKonstantin Belousov static inline uint64_t
counter_u64_fetch_inline(uint64_t * p)11870a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p)
11970a7dd5dSKonstantin Belousov {
12070a7dd5dSKonstantin Belousov uint64_t r;
12170a7dd5dSKonstantin Belousov int i;
12270a7dd5dSKonstantin Belousov
12370a7dd5dSKonstantin Belousov r = 0;
12470a7dd5dSKonstantin Belousov for (i = 0; i < mp_ncpus; i++)
12570a7dd5dSKonstantin Belousov r += counter_u64_read_one((uint64_t *)p, i);
12670a7dd5dSKonstantin Belousov
12770a7dd5dSKonstantin Belousov return (r);
12870a7dd5dSKonstantin Belousov }
12970a7dd5dSKonstantin Belousov
13070a7dd5dSKonstantin Belousov /* XXXKIB non-atomic 64bit store, might interrupt increment */
13170a7dd5dSKonstantin Belousov static void
counter_u64_zero_one_cpu(void * arg)13270a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg)
13370a7dd5dSKonstantin Belousov {
13470a7dd5dSKonstantin Belousov
135ab3059a8SMatt Macy *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
13670a7dd5dSKonstantin Belousov PCPU_GET(cpuid))) = 0;
13770a7dd5dSKonstantin Belousov }
13870a7dd5dSKonstantin Belousov
13970a7dd5dSKonstantin Belousov static inline void
counter_u64_zero_inline(counter_u64_t c)14070a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c)
14170a7dd5dSKonstantin Belousov {
14270a7dd5dSKonstantin Belousov
14367d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
14467d955aaSPatrick Kelsey smp_no_rendezvous_barrier, c);
14570a7dd5dSKonstantin Belousov }
14670a7dd5dSKonstantin Belousov #endif
14770a7dd5dSKonstantin Belousov
1484e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, inc) do { \
1494e76af6aSGleb Smirnoff CRITICAL_ASSERT(curthread); \
1504e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += (inc); \
1514e76af6aSGleb Smirnoff } while (0)
1524e76af6aSGleb Smirnoff
1534e76af6aSGleb Smirnoff static inline void
counter_u64_add(counter_u64_t c,int64_t inc)1544e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc)
1554e76af6aSGleb Smirnoff {
1564e76af6aSGleb Smirnoff
1574e76af6aSGleb Smirnoff counter_enter();
1584e76af6aSGleb Smirnoff counter_u64_add_protected(c, inc);
1594e76af6aSGleb Smirnoff counter_exit();
1604e76af6aSGleb Smirnoff }
1614e76af6aSGleb Smirnoff
162b8b46489SNathan Whitehorn #endif /* 64bit */
1634e76af6aSGleb Smirnoff
1644e76af6aSGleb Smirnoff #endif /* ! __MACHINE_COUNTER_H__ */
165