1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef __MACHINE_COUNTER_H__ 32 #define __MACHINE_COUNTER_H__ 33 34 #include <sys/pcpu.h> 35 #ifdef INVARIANTS 36 #include <sys/proc.h> 37 #endif 38 #include <machine/md_var.h> 39 #include <machine/specialreg.h> 40 41 extern struct pcpu __pcpu[]; 42 43 #define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter 44 45 #define counter_enter() do { \ 46 if ((cpu_feature & CPUID_CX8) == 0) \ 47 critical_enter(); \ 48 } while (0) 49 50 #define counter_exit() do { \ 51 if ((cpu_feature & CPUID_CX8) == 0) \ 52 critical_exit(); \ 53 } while (0) 54 55 static inline void 56 counter_64_inc_8b(uint64_t *p, int64_t inc) 57 { 58 59 __asm __volatile( 60 "movl %%fs:(%%esi),%%eax\n\t" 61 "movl %%fs:4(%%esi),%%edx\n" 62 "1:\n\t" 63 "movl %%eax,%%ebx\n\t" 64 "movl %%edx,%%ecx\n\t" 65 "addl (%%edi),%%ebx\n\t" 66 "adcl 4(%%edi),%%ecx\n\t" 67 "cmpxchg8b %%fs:(%%esi)\n\t" 68 "jnz 1b" 69 : 70 : "S" ((char *)p - (char *)&__pcpu[0]), "D" (&inc) 71 : "memory", "cc", "eax", "edx", "ebx", "ecx"); 72 } 73 74 #ifdef IN_SUBR_COUNTER_C 75 static inline uint64_t 76 counter_u64_read_one_8b(uint64_t *p) 77 { 78 uint32_t res_lo, res_high; 79 80 __asm __volatile( 81 "movl %%eax,%%ebx\n\t" 82 "movl %%edx,%%ecx\n\t" 83 "cmpxchg8b (%2)" 84 : "=a" (res_lo), "=d"(res_high) 85 : "SD" (p) 86 : "cc", "ebx", "ecx"); 87 return (res_lo + ((uint64_t)res_high << 32)); 88 } 89 90 static inline uint64_t 91 counter_u64_fetch_inline(uint64_t *p) 92 { 93 uint64_t res; 94 int i; 95 96 res = 0; 97 if ((cpu_feature & CPUID_CX8) == 0) { 98 /* 99 * The machines without cmpxchg8b are not SMP. 100 * Disabling the preemption provides atomicity of the 101 * counter reading, since update is done in the 102 * critical section as well. 103 */ 104 critical_enter(); 105 CPU_FOREACH(i) { 106 res += *(uint64_t *)((char *)p + 107 UMA_PCPU_ALLOC_SIZE * i); 108 } 109 critical_exit(); 110 } else { 111 CPU_FOREACH(i) 112 res += counter_u64_read_one_8b((uint64_t *)((char *)p + 113 UMA_PCPU_ALLOC_SIZE * i)); 114 } 115 return (res); 116 } 117 118 static inline void 119 counter_u64_zero_one_8b(uint64_t *p) 120 { 121 122 __asm __volatile( 123 "movl (%0),%%eax\n\t" 124 "movl 4(%0),%%edx\n" 125 "xorl %%ebx,%%ebx\n\t" 126 "xorl %%ecx,%%ecx\n\t" 127 "1:\n\t" 128 "cmpxchg8b (%0)\n\t" 129 "jnz 1b" 130 : 131 : "SD" (p) 132 : "memory", "cc", "eax", "edx", "ebx", "ecx"); 133 } 134 135 static void 136 counter_u64_zero_one_cpu(void *arg) 137 { 138 uint64_t *p; 139 140 p = (uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE * PCPU_GET(cpuid)); 141 counter_u64_zero_one_8b(p); 142 } 143 144 static inline void 145 counter_u64_zero_inline(counter_u64_t c) 146 { 147 int i; 148 149 if ((cpu_feature & CPUID_CX8) == 0) { 150 critical_enter(); 151 CPU_FOREACH(i) 152 *(uint64_t *)((char *)c + UMA_PCPU_ALLOC_SIZE * i) = 0; 153 critical_exit(); 154 } else { 155 smp_rendezvous(smp_no_rendezvous_barrier, 156 counter_u64_zero_one_cpu, smp_no_rendezvous_barrier, c); 157 } 158 } 159 #endif 160 161 #define counter_u64_add_protected(c, inc) do { \ 162 if ((cpu_feature & CPUID_CX8) == 0) { \ 163 CRITICAL_ASSERT(curthread); \ 164 *(uint64_t *)zpcpu_get(c) += (inc); \ 165 } else \ 166 counter_64_inc_8b((c), (inc)); \ 167 } while (0) 168 169 static inline void 170 counter_u64_add(counter_u64_t c, int64_t inc) 171 { 172 173 if ((cpu_feature & CPUID_CX8) == 0) { 174 critical_enter(); 175 *(uint64_t *)zpcpu_get(c) += inc; 176 critical_exit(); 177 } else { 178 counter_64_inc_8b(c, inc); 179 } 180 } 181 182 #endif /* ! __MACHINE_COUNTER_H__ */ 183