1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef __MACHINE_COUNTER_H__
30 #define __MACHINE_COUNTER_H__
31
32 #include <sys/pcpu.h>
33 #ifdef INVARIANTS
34 #include <sys/proc.h>
35 #endif
36 #include <sys/systm.h>
37 #include <machine/md_var.h>
38 #include <machine/specialreg.h>
39
40 #define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter
41
42 #define counter_enter() do { \
43 if ((cpu_feature & CPUID_CX8) == 0) \
44 critical_enter(); \
45 } while (0)
46
47 #define counter_exit() do { \
48 if ((cpu_feature & CPUID_CX8) == 0) \
49 critical_exit(); \
50 } while (0)
51
52 static inline void
counter_64_inc_8b(uint64_t * p,int64_t inc)53 counter_64_inc_8b(uint64_t *p, int64_t inc)
54 {
55
56 __asm __volatile(
57 "movl %%fs:(%%esi),%%eax\n\t"
58 "movl %%fs:4(%%esi),%%edx\n"
59 "1:\n\t"
60 "movl %%eax,%%ebx\n\t"
61 "movl %%edx,%%ecx\n\t"
62 "addl (%%edi),%%ebx\n\t"
63 "adcl 4(%%edi),%%ecx\n\t"
64 "cmpxchg8b %%fs:(%%esi)\n\t"
65 "jnz 1b"
66 :
67 : "S" ((char *)p - (char *)&__pcpu[0]), "D" (&inc)
68 : "memory", "cc", "eax", "edx", "ebx", "ecx");
69 }
70
71 #ifdef IN_SUBR_COUNTER_C
72 struct counter_u64_fetch_cx8_arg {
73 uint64_t res;
74 uint64_t *p;
75 };
76
77 static uint64_t
counter_u64_read_one_8b(uint64_t * p)78 counter_u64_read_one_8b(uint64_t *p)
79 {
80 uint32_t res_lo, res_high;
81
82 __asm __volatile(
83 "movl %%eax,%%ebx\n\t"
84 "movl %%edx,%%ecx\n\t"
85 "cmpxchg8b (%2)"
86 : "=a" (res_lo), "=d"(res_high)
87 : "SD" (p)
88 : "cc", "ebx", "ecx");
89 return (res_lo + ((uint64_t)res_high << 32));
90 }
91
92 static void
counter_u64_fetch_cx8_one(void * arg1)93 counter_u64_fetch_cx8_one(void *arg1)
94 {
95 struct counter_u64_fetch_cx8_arg *arg;
96 uint64_t val;
97
98 arg = arg1;
99 val = counter_u64_read_one_8b((uint64_t *)((char *)arg->p +
100 UMA_PCPU_ALLOC_SIZE * PCPU_GET(cpuid)));
101 atomic_add_64(&arg->res, val);
102 }
103
104 static inline uint64_t
counter_u64_fetch_inline(uint64_t * p)105 counter_u64_fetch_inline(uint64_t *p)
106 {
107 struct counter_u64_fetch_cx8_arg arg;
108 uint64_t res;
109 int i;
110
111 res = 0;
112 if ((cpu_feature & CPUID_CX8) == 0) {
113 /*
114 * The machines without cmpxchg8b are not SMP.
115 * Disabling the preemption provides atomicity of the
116 * counter reading, since update is done in the
117 * critical section as well.
118 */
119 critical_enter();
120 CPU_FOREACH(i) {
121 res += *(uint64_t *)((char *)p +
122 UMA_PCPU_ALLOC_SIZE * i);
123 }
124 critical_exit();
125 } else {
126 arg.p = p;
127 arg.res = 0;
128 smp_rendezvous(NULL, counter_u64_fetch_cx8_one, NULL, &arg);
129 res = arg.res;
130 }
131 return (res);
132 }
133
134 static inline void
counter_u64_zero_one_8b(uint64_t * p)135 counter_u64_zero_one_8b(uint64_t *p)
136 {
137
138 __asm __volatile(
139 "movl (%0),%%eax\n\t"
140 "movl 4(%0),%%edx\n"
141 "xorl %%ebx,%%ebx\n\t"
142 "xorl %%ecx,%%ecx\n\t"
143 "1:\n\t"
144 "cmpxchg8b (%0)\n\t"
145 "jnz 1b"
146 :
147 : "SD" (p)
148 : "memory", "cc", "eax", "edx", "ebx", "ecx");
149 }
150
151 static void
counter_u64_zero_one_cpu(void * arg)152 counter_u64_zero_one_cpu(void *arg)
153 {
154 uint64_t *p;
155
156 p = (uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE * PCPU_GET(cpuid));
157 counter_u64_zero_one_8b(p);
158 }
159
160 static inline void
counter_u64_zero_inline(counter_u64_t c)161 counter_u64_zero_inline(counter_u64_t c)
162 {
163 int i;
164
165 if ((cpu_feature & CPUID_CX8) == 0) {
166 critical_enter();
167 CPU_FOREACH(i)
168 *(uint64_t *)((char *)c + UMA_PCPU_ALLOC_SIZE * i) = 0;
169 critical_exit();
170 } else {
171 smp_rendezvous(smp_no_rendezvous_barrier,
172 counter_u64_zero_one_cpu, smp_no_rendezvous_barrier, c);
173 }
174 }
175 #endif
176
177 #define counter_u64_add_protected(c, inc) do { \
178 if ((cpu_feature & CPUID_CX8) == 0) { \
179 CRITICAL_ASSERT(curthread); \
180 *(uint64_t *)zpcpu_get(c) += (inc); \
181 } else \
182 counter_64_inc_8b((c), (inc)); \
183 } while (0)
184
185 static inline void
counter_u64_add(counter_u64_t c,int64_t inc)186 counter_u64_add(counter_u64_t c, int64_t inc)
187 {
188
189 if ((cpu_feature & CPUID_CX8) == 0) {
190 critical_enter();
191 *(uint64_t *)zpcpu_get(c) += inc;
192 critical_exit();
193 } else {
194 counter_64_inc_8b(c, inc);
195 }
196 }
197
198 #endif /* ! __MACHINE_COUNTER_H__ */
199