1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012, 2013 Konstantin Belousov <kib@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef __MACHINE_COUNTER_H__
30 #define __MACHINE_COUNTER_H__
31
32 #include <sys/pcpu.h>
33 #ifdef INVARIANTS
34 #include <sys/proc.h>
35 #endif
36
37 #define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter
38
39 #ifdef __powerpc64__
40
41 #define counter_enter() do {} while (0)
42 #define counter_exit() do {} while (0)
43
44 #ifdef IN_SUBR_COUNTER_C
45 static inline uint64_t
counter_u64_read_one(uint64_t * p,int cpu)46 counter_u64_read_one(uint64_t *p, int cpu)
47 {
48
49 return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
50 }
51
52 static inline uint64_t
counter_u64_fetch_inline(uint64_t * p)53 counter_u64_fetch_inline(uint64_t *p)
54 {
55 uint64_t r;
56 int i;
57
58 r = 0;
59 CPU_FOREACH(i)
60 r += counter_u64_read_one((uint64_t *)p, i);
61
62 return (r);
63 }
64
65 static void
counter_u64_zero_one_cpu(void * arg)66 counter_u64_zero_one_cpu(void *arg)
67 {
68
69 *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
70 PCPU_GET(cpuid))) = 0;
71 }
72
73 static inline void
counter_u64_zero_inline(counter_u64_t c)74 counter_u64_zero_inline(counter_u64_t c)
75 {
76
77 smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
78 smp_no_rendezvous_barrier, c);
79 }
80 #endif
81
82 #define counter_u64_add_protected(c, i) counter_u64_add(c, i)
83
84 static inline void
counter_u64_add(counter_u64_t c,int64_t inc)85 counter_u64_add(counter_u64_t c, int64_t inc)
86 {
87 uint64_t ccpu, old;
88
89 __asm __volatile("\n"
90 "1:\n\t"
91 "mfsprg %0, 0\n\t"
92 "ldarx %1, %0, %2\n\t"
93 "add %1, %1, %3\n\t"
94 "stdcx. %1, %0, %2\n\t"
95 "bne- 1b"
96 : "=&b" (ccpu), "=&r" (old)
97 : "r" ((char *)c - (char *)&__pcpu[0]), "r" (inc)
98 : "cr0", "memory");
99 }
100
101 #else /* !64bit */
102
103 #include <sys/systm.h>
104
105 #define counter_enter() critical_enter()
106 #define counter_exit() critical_exit()
107
108 #ifdef IN_SUBR_COUNTER_C
109 /* XXXKIB non-atomic 64bit read */
110 static inline uint64_t
counter_u64_read_one(uint64_t * p,int cpu)111 counter_u64_read_one(uint64_t *p, int cpu)
112 {
113
114 return (*(uint64_t *)((char *)p + UMA_PCPU_ALLOC_SIZE * cpu));
115 }
116
117 static inline uint64_t
counter_u64_fetch_inline(uint64_t * p)118 counter_u64_fetch_inline(uint64_t *p)
119 {
120 uint64_t r;
121 int i;
122
123 r = 0;
124 for (i = 0; i < mp_ncpus; i++)
125 r += counter_u64_read_one((uint64_t *)p, i);
126
127 return (r);
128 }
129
130 /* XXXKIB non-atomic 64bit store, might interrupt increment */
131 static void
counter_u64_zero_one_cpu(void * arg)132 counter_u64_zero_one_cpu(void *arg)
133 {
134
135 *((uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE *
136 PCPU_GET(cpuid))) = 0;
137 }
138
139 static inline void
counter_u64_zero_inline(counter_u64_t c)140 counter_u64_zero_inline(counter_u64_t c)
141 {
142
143 smp_rendezvous(smp_no_rendezvous_barrier, counter_u64_zero_one_cpu,
144 smp_no_rendezvous_barrier, c);
145 }
146 #endif
147
148 #define counter_u64_add_protected(c, inc) do { \
149 CRITICAL_ASSERT(curthread); \
150 *(uint64_t *)zpcpu_get(c) += (inc); \
151 } while (0)
152
153 static inline void
counter_u64_add(counter_u64_t c,int64_t inc)154 counter_u64_add(counter_u64_t c, int64_t inc)
155 {
156
157 counter_enter();
158 counter_u64_add_protected(c, inc);
159 counter_exit();
160 }
161
162 #endif /* 64bit */
163
164 #endif /* ! __MACHINE_COUNTER_H__ */
165