1 /*- 2 * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef __MACHINE_COUNTER_H__ 30 #define __MACHINE_COUNTER_H__ 31 32 #include <sys/pcpu.h> 33 #ifdef INVARIANTS 34 #include <sys/proc.h> 35 #endif 36 37 #ifdef __powerpc64__ 38 39 #define counter_enter() do {} while (0) 40 #define counter_exit() do {} while (0) 41 42 #ifdef IN_SUBR_COUNTER_C 43 static inline uint64_t 44 counter_u64_read_one(uint64_t *p, int cpu) 45 { 46 47 return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); 48 } 49 50 static inline uint64_t 51 counter_u64_fetch_inline(uint64_t *p) 52 { 53 uint64_t r; 54 int i; 55 56 r = 0; 57 for (i = 0; i < mp_ncpus; i++) 58 r += counter_u64_read_one((uint64_t *)p, i); 59 60 return (r); 61 } 62 63 static void 64 counter_u64_zero_one_cpu(void *arg) 65 { 66 67 *((uint64_t *)((char *)arg + sizeof(struct pcpu) * 68 PCPU_GET(cpuid))) = 0; 69 } 70 71 static inline void 72 counter_u64_zero_inline(counter_u64_t c) 73 { 74 75 smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, 76 smp_no_rendevous_barrier, c); 77 } 78 #endif 79 80 #define counter_u64_add_protected(c, i) counter_u64_add(c, i) 81 82 extern struct pcpu __pcpu[MAXCPU]; 83 84 static inline void 85 counter_u64_add(counter_u64_t c, int64_t inc) 86 { 87 uint64_t ccpu, old; 88 89 __asm __volatile("\n" 90 "1:\n\t" 91 "mfsprg %0, 0\n\t" 92 "ldarx %1, %0, %2\n\t" 93 "add %1, %1, %3\n\t" 94 "stdcx. %1, %0, %2\n\t" 95 "bne- 1b" 96 : "=&b" (ccpu), "=&r" (old) 97 : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc) 98 : "cr0", "memory"); 99 } 100 101 #else /* !64bit */ 102 103 #define counter_enter() critical_enter() 104 #define counter_exit() critical_exit() 105 106 #ifdef IN_SUBR_COUNTER_C 107 /* XXXKIB non-atomic 64bit read */ 108 static inline uint64_t 109 counter_u64_read_one(uint64_t *p, int cpu) 110 { 111 112 return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); 113 } 114 115 static inline uint64_t 116 counter_u64_fetch_inline(uint64_t *p) 117 { 118 uint64_t r; 119 int i; 120 121 r = 0; 122 for (i = 0; i < mp_ncpus; i++) 123 r += counter_u64_read_one((uint64_t *)p, i); 124 125 return (r); 126 } 127 128 /* XXXKIB non-atomic 64bit store, might interrupt increment */ 129 static void 130 counter_u64_zero_one_cpu(void *arg) 131 { 132 133 *((uint64_t *)((char *)arg + sizeof(struct pcpu) * 134 PCPU_GET(cpuid))) = 0; 135 } 136 137 static inline void 138 counter_u64_zero_inline(counter_u64_t c) 139 { 140 141 smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, 142 smp_no_rendevous_barrier, c); 143 } 144 #endif 145 146 #define counter_u64_add_protected(c, inc) do { \ 147 CRITICAL_ASSERT(curthread); \ 148 *(uint64_t *)zpcpu_get(c) += (inc); \ 149 } while (0) 150 151 static inline void 152 counter_u64_add(counter_u64_t c, int64_t inc) 153 { 154 155 counter_enter(); 156 counter_u64_add_protected(c, inc); 157 counter_exit(); 158 } 159 160 #endif /* 64bit */ 161 162 #endif /* ! __MACHINE_COUNTER_H__ */ 163