14e76af6aSGleb Smirnoff /*- 24e76af6aSGleb Smirnoff * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org> 34e76af6aSGleb Smirnoff * All rights reserved. 44e76af6aSGleb Smirnoff * 54e76af6aSGleb Smirnoff * Redistribution and use in source and binary forms, with or without 64e76af6aSGleb Smirnoff * modification, are permitted provided that the following conditions 74e76af6aSGleb Smirnoff * are met: 84e76af6aSGleb Smirnoff * 1. Redistributions of source code must retain the above copyright 94e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer. 104e76af6aSGleb Smirnoff * 2. Redistributions in binary form must reproduce the above copyright 114e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer in the 124e76af6aSGleb Smirnoff * documentation and/or other materials provided with the distribution. 134e76af6aSGleb Smirnoff * 144e76af6aSGleb Smirnoff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 154e76af6aSGleb Smirnoff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 164e76af6aSGleb Smirnoff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 174e76af6aSGleb Smirnoff * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 184e76af6aSGleb Smirnoff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 194e76af6aSGleb Smirnoff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 204e76af6aSGleb Smirnoff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 214e76af6aSGleb Smirnoff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 224e76af6aSGleb Smirnoff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 234e76af6aSGleb Smirnoff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 244e76af6aSGleb Smirnoff * SUCH DAMAGE. 254e76af6aSGleb Smirnoff * 264e76af6aSGleb Smirnoff * $FreeBSD$ 274e76af6aSGleb Smirnoff */ 284e76af6aSGleb Smirnoff 294e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__ 304e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__ 314e76af6aSGleb Smirnoff 324e76af6aSGleb Smirnoff #include <sys/pcpu.h> 334e76af6aSGleb Smirnoff #ifdef INVARIANTS 344e76af6aSGleb Smirnoff #include <sys/proc.h> 354e76af6aSGleb Smirnoff #endif 364e76af6aSGleb Smirnoff 374e76af6aSGleb Smirnoff #if defined(AIM) && defined(__powerpc64__) 384e76af6aSGleb Smirnoff 394e76af6aSGleb Smirnoff #define counter_enter() do {} while (0) 404e76af6aSGleb Smirnoff #define counter_exit() do {} while (0) 414e76af6aSGleb Smirnoff 42*70a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C 43*70a7dd5dSKonstantin Belousov static inline uint64_t 44*70a7dd5dSKonstantin Belousov counter_u64_read_one(uint64_t *p, int cpu) 45*70a7dd5dSKonstantin Belousov { 46*70a7dd5dSKonstantin Belousov 47*70a7dd5dSKonstantin Belousov return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); 48*70a7dd5dSKonstantin Belousov } 49*70a7dd5dSKonstantin Belousov 50*70a7dd5dSKonstantin Belousov static inline uint64_t 51*70a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p) 52*70a7dd5dSKonstantin Belousov { 53*70a7dd5dSKonstantin Belousov uint64_t r; 54*70a7dd5dSKonstantin Belousov int i; 55*70a7dd5dSKonstantin Belousov 56*70a7dd5dSKonstantin Belousov r = 0; 57*70a7dd5dSKonstantin Belousov for (i = 0; i < mp_ncpus; i++) 58*70a7dd5dSKonstantin Belousov r += counter_u64_read_one((uint64_t *)p, i); 59*70a7dd5dSKonstantin Belousov 60*70a7dd5dSKonstantin Belousov return (r); 61*70a7dd5dSKonstantin Belousov } 62*70a7dd5dSKonstantin Belousov 63*70a7dd5dSKonstantin Belousov static void 64*70a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg) 65*70a7dd5dSKonstantin Belousov { 66*70a7dd5dSKonstantin Belousov 67*70a7dd5dSKonstantin Belousov *((uint64_t *)((char *)arg + sizeof(struct pcpu) * 68*70a7dd5dSKonstantin Belousov PCPU_GET(cpuid))) = 0; 69*70a7dd5dSKonstantin Belousov } 70*70a7dd5dSKonstantin Belousov 71*70a7dd5dSKonstantin Belousov static inline void 72*70a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c) 73*70a7dd5dSKonstantin Belousov { 74*70a7dd5dSKonstantin Belousov 75*70a7dd5dSKonstantin Belousov smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, 76*70a7dd5dSKonstantin Belousov smp_no_rendevous_barrier, c); 77*70a7dd5dSKonstantin Belousov } 78*70a7dd5dSKonstantin Belousov #endif 79*70a7dd5dSKonstantin Belousov 804e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, i) counter_u64_add(c, i) 814e76af6aSGleb Smirnoff 824f9d403cSKonstantin Belousov extern struct pcpu __pcpu[MAXCPU]; 834f9d403cSKonstantin Belousov 844e76af6aSGleb Smirnoff static inline void 854e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc) 864e76af6aSGleb Smirnoff { 874e76af6aSGleb Smirnoff uint64_t ccpu, old; 884e76af6aSGleb Smirnoff 894e76af6aSGleb Smirnoff __asm __volatile("\n" 904e76af6aSGleb Smirnoff "1:\n\t" 914e76af6aSGleb Smirnoff "mfsprg %0, 0\n\t" 924e76af6aSGleb Smirnoff "ldarx %1, %0, %2\n\t" 934e76af6aSGleb Smirnoff "add %1, %1, %3\n\t" 944e76af6aSGleb Smirnoff "stdcx. %1, %0, %2\n\t" 954e76af6aSGleb Smirnoff "bne- 1b" 964e76af6aSGleb Smirnoff : "=&b" (ccpu), "=&r" (old) 974e76af6aSGleb Smirnoff : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc) 984e76af6aSGleb Smirnoff : "cc", "memory"); 994e76af6aSGleb Smirnoff } 1004e76af6aSGleb Smirnoff 1014e76af6aSGleb Smirnoff #else /* !AIM || !64bit */ 1024e76af6aSGleb Smirnoff 1034e76af6aSGleb Smirnoff #define counter_enter() critical_enter() 1044e76af6aSGleb Smirnoff #define counter_exit() critical_exit() 1054e76af6aSGleb Smirnoff 106*70a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C 107*70a7dd5dSKonstantin Belousov /* XXXKIB non-atomic 64bit read */ 108*70a7dd5dSKonstantin Belousov static inline uint64_t 109*70a7dd5dSKonstantin Belousov counter_u64_read_one(uint64_t *p, int cpu) 110*70a7dd5dSKonstantin Belousov { 111*70a7dd5dSKonstantin Belousov 112*70a7dd5dSKonstantin Belousov return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); 113*70a7dd5dSKonstantin Belousov } 114*70a7dd5dSKonstantin Belousov 115*70a7dd5dSKonstantin Belousov static inline uint64_t 116*70a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p) 117*70a7dd5dSKonstantin Belousov { 118*70a7dd5dSKonstantin Belousov uint64_t r; 119*70a7dd5dSKonstantin Belousov int i; 120*70a7dd5dSKonstantin Belousov 121*70a7dd5dSKonstantin Belousov r = 0; 122*70a7dd5dSKonstantin Belousov for (i = 0; i < mp_ncpus; i++) 123*70a7dd5dSKonstantin Belousov r += counter_u64_read_one((uint64_t *)p, i); 124*70a7dd5dSKonstantin Belousov 125*70a7dd5dSKonstantin Belousov return (r); 126*70a7dd5dSKonstantin Belousov } 127*70a7dd5dSKonstantin Belousov 128*70a7dd5dSKonstantin Belousov /* XXXKIB non-atomic 64bit store, might interrupt increment */ 129*70a7dd5dSKonstantin Belousov static void 130*70a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg) 131*70a7dd5dSKonstantin Belousov { 132*70a7dd5dSKonstantin Belousov 133*70a7dd5dSKonstantin Belousov *((uint64_t *)((char *)arg + sizeof(struct pcpu) * 134*70a7dd5dSKonstantin Belousov PCPU_GET(cpuid))) = 0; 135*70a7dd5dSKonstantin Belousov } 136*70a7dd5dSKonstantin Belousov 137*70a7dd5dSKonstantin Belousov static inline void 138*70a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c) 139*70a7dd5dSKonstantin Belousov { 140*70a7dd5dSKonstantin Belousov 141*70a7dd5dSKonstantin Belousov smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, 142*70a7dd5dSKonstantin Belousov smp_no_rendevous_barrier, c); 143*70a7dd5dSKonstantin Belousov } 144*70a7dd5dSKonstantin Belousov #endif 145*70a7dd5dSKonstantin Belousov 1464e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, inc) do { \ 1474e76af6aSGleb Smirnoff CRITICAL_ASSERT(curthread); \ 1484e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += (inc); \ 1494e76af6aSGleb Smirnoff } while (0) 1504e76af6aSGleb Smirnoff 1514e76af6aSGleb Smirnoff static inline void 1524e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc) 1534e76af6aSGleb Smirnoff { 1544e76af6aSGleb Smirnoff 1554e76af6aSGleb Smirnoff counter_enter(); 1564e76af6aSGleb Smirnoff counter_u64_add_protected(c, inc); 1574e76af6aSGleb Smirnoff counter_exit(); 1584e76af6aSGleb Smirnoff } 1594e76af6aSGleb Smirnoff 1604e76af6aSGleb Smirnoff #endif /* AIM 64bit */ 1614e76af6aSGleb Smirnoff 1624e76af6aSGleb Smirnoff #endif /* ! __MACHINE_COUNTER_H__ */ 163