14e76af6aSGleb Smirnoff /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 383ef78beSPedro F. Giffuni * 44e76af6aSGleb Smirnoff * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 54e76af6aSGleb Smirnoff * All rights reserved. 64e76af6aSGleb Smirnoff * 74e76af6aSGleb Smirnoff * Redistribution and use in source and binary forms, with or without 84e76af6aSGleb Smirnoff * modification, are permitted provided that the following conditions 94e76af6aSGleb Smirnoff * are met: 104e76af6aSGleb Smirnoff * 1. Redistributions of source code must retain the above copyright 114e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer. 124e76af6aSGleb Smirnoff * 2. Redistributions in binary form must reproduce the above copyright 134e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer in the 144e76af6aSGleb Smirnoff * documentation and/or other materials provided with the distribution. 154e76af6aSGleb Smirnoff * 164e76af6aSGleb Smirnoff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 174e76af6aSGleb Smirnoff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 184e76af6aSGleb Smirnoff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 194e76af6aSGleb Smirnoff * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 204e76af6aSGleb Smirnoff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 214e76af6aSGleb Smirnoff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 224e76af6aSGleb Smirnoff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 234e76af6aSGleb Smirnoff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 244e76af6aSGleb Smirnoff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 254e76af6aSGleb Smirnoff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 264e76af6aSGleb Smirnoff * SUCH DAMAGE. 274e76af6aSGleb Smirnoff * 284e76af6aSGleb Smirnoff * $FreeBSD$ 294e76af6aSGleb Smirnoff */ 304e76af6aSGleb Smirnoff 314e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__ 324e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__ 334e76af6aSGleb Smirnoff 344e76af6aSGleb Smirnoff #include <sys/pcpu.h> 354e76af6aSGleb Smirnoff #ifdef INVARIANTS 364e76af6aSGleb Smirnoff #include <sys/proc.h> 374e76af6aSGleb Smirnoff #endif 38*f53b28c6SKristof Provost #include <sys/systm.h> 394e76af6aSGleb Smirnoff #include <machine/md_var.h> 404e76af6aSGleb Smirnoff #include <machine/specialreg.h> 414e76af6aSGleb Smirnoff 4283c9dea1SGleb Smirnoff #define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter 4383c9dea1SGleb Smirnoff 444e76af6aSGleb Smirnoff #define counter_enter() do { \ 454e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) \ 464e76af6aSGleb Smirnoff critical_enter(); \ 474e76af6aSGleb Smirnoff } while (0) 484e76af6aSGleb Smirnoff 494e76af6aSGleb Smirnoff #define counter_exit() do { \ 504e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) \ 514e76af6aSGleb Smirnoff critical_exit(); \ 524e76af6aSGleb Smirnoff } while (0) 534e76af6aSGleb Smirnoff 544e76af6aSGleb Smirnoff static inline void 554e76af6aSGleb Smirnoff counter_64_inc_8b(uint64_t *p, int64_t inc) 564e76af6aSGleb Smirnoff { 574e76af6aSGleb Smirnoff 584e76af6aSGleb Smirnoff __asm __volatile( 594e76af6aSGleb Smirnoff "movl %%fs:(%%esi),%%eax\n\t" 604e76af6aSGleb Smirnoff "movl %%fs:4(%%esi),%%edx\n" 614e76af6aSGleb Smirnoff "1:\n\t" 624e76af6aSGleb Smirnoff "movl %%eax,%%ebx\n\t" 634e76af6aSGleb Smirnoff "movl %%edx,%%ecx\n\t" 644e76af6aSGleb Smirnoff "addl (%%edi),%%ebx\n\t" 654e76af6aSGleb Smirnoff "adcl 4(%%edi),%%ecx\n\t" 664e76af6aSGleb Smirnoff "cmpxchg8b %%fs:(%%esi)\n\t" 674e76af6aSGleb Smirnoff "jnz 1b" 684e76af6aSGleb Smirnoff : 69706c56e4SKonstantin Belousov : "S" ((char *)p - (char *)&__pcpu[0]), "D" (&inc) 704e76af6aSGleb Smirnoff : "memory", "cc", "eax", "edx", "ebx", "ecx"); 714e76af6aSGleb Smirnoff } 724e76af6aSGleb Smirnoff 7370a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C 74cbb65b7eSKonstantin Belousov struct counter_u64_fetch_cx8_arg { 75cbb65b7eSKonstantin Belousov uint64_t res; 76cbb65b7eSKonstantin Belousov uint64_t *p; 77cbb65b7eSKonstantin Belousov }; 78cbb65b7eSKonstantin Belousov 79cbb65b7eSKonstantin Belousov static uint64_t 8070a7dd5dSKonstantin Belousov counter_u64_read_one_8b(uint64_t *p) 8170a7dd5dSKonstantin Belousov { 8270a7dd5dSKonstantin Belousov uint32_t res_lo, res_high; 8370a7dd5dSKonstantin Belousov 8470a7dd5dSKonstantin Belousov __asm __volatile( 8570a7dd5dSKonstantin Belousov "movl %%eax,%%ebx\n\t" 8670a7dd5dSKonstantin Belousov "movl %%edx,%%ecx\n\t" 8770a7dd5dSKonstantin Belousov "cmpxchg8b (%2)" 8870a7dd5dSKonstantin Belousov : "=a" (res_lo), "=d"(res_high) 8970a7dd5dSKonstantin Belousov : "SD" (p) 9070a7dd5dSKonstantin Belousov : "cc", "ebx", "ecx"); 9170a7dd5dSKonstantin Belousov return (res_lo + ((uint64_t)res_high << 32)); 9270a7dd5dSKonstantin Belousov } 9370a7dd5dSKonstantin Belousov 94cbb65b7eSKonstantin Belousov static void 95cbb65b7eSKonstantin Belousov counter_u64_fetch_cx8_one(void *arg1) 96cbb65b7eSKonstantin Belousov { 97cbb65b7eSKonstantin Belousov struct counter_u64_fetch_cx8_arg *arg; 98cbb65b7eSKonstantin Belousov uint64_t val; 99cbb65b7eSKonstantin Belousov 100cbb65b7eSKonstantin Belousov arg = arg1; 101cbb65b7eSKonstantin Belousov val = counter_u64_read_one_8b((uint64_t *)((char *)arg->p + 102cbb65b7eSKonstantin Belousov UMA_PCPU_ALLOC_SIZE * PCPU_GET(cpuid))); 103cbb65b7eSKonstantin Belousov atomic_add_64(&arg->res, val); 104cbb65b7eSKonstantin Belousov } 105cbb65b7eSKonstantin Belousov 10670a7dd5dSKonstantin Belousov static inline uint64_t 10770a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p) 10870a7dd5dSKonstantin Belousov { 109cbb65b7eSKonstantin Belousov struct counter_u64_fetch_cx8_arg arg; 11070a7dd5dSKonstantin Belousov uint64_t res; 11170a7dd5dSKonstantin Belousov int i; 11270a7dd5dSKonstantin Belousov 11370a7dd5dSKonstantin Belousov res = 0; 11470a7dd5dSKonstantin Belousov if ((cpu_feature & CPUID_CX8) == 0) { 11570a7dd5dSKonstantin Belousov /* 11670a7dd5dSKonstantin Belousov * The machines without cmpxchg8b are not SMP. 11770a7dd5dSKonstantin Belousov * Disabling the preemption provides atomicity of the 11870a7dd5dSKonstantin Belousov * counter reading, since update is done in the 11970a7dd5dSKonstantin Belousov * critical section as well. 12070a7dd5dSKonstantin Belousov */ 12170a7dd5dSKonstantin Belousov critical_enter(); 12296c85efbSNathan Whitehorn CPU_FOREACH(i) { 12370a7dd5dSKonstantin Belousov res += *(uint64_t *)((char *)p + 124ab3059a8SMatt Macy UMA_PCPU_ALLOC_SIZE * i); 12570a7dd5dSKonstantin Belousov } 12670a7dd5dSKonstantin Belousov critical_exit(); 12770a7dd5dSKonstantin Belousov } else { 128cbb65b7eSKonstantin Belousov arg.p = p; 129cbb65b7eSKonstantin Belousov arg.res = 0; 130cbb65b7eSKonstantin Belousov smp_rendezvous(NULL, counter_u64_fetch_cx8_one, NULL, &arg); 131cbb65b7eSKonstantin Belousov res = arg.res; 13270a7dd5dSKonstantin Belousov } 13370a7dd5dSKonstantin Belousov return (res); 13470a7dd5dSKonstantin Belousov } 13570a7dd5dSKonstantin Belousov 13670a7dd5dSKonstantin Belousov static inline void 13770a7dd5dSKonstantin Belousov counter_u64_zero_one_8b(uint64_t *p) 13870a7dd5dSKonstantin Belousov { 13970a7dd5dSKonstantin Belousov 14070a7dd5dSKonstantin Belousov __asm __volatile( 14170a7dd5dSKonstantin Belousov "movl (%0),%%eax\n\t" 14270a7dd5dSKonstantin Belousov "movl 4(%0),%%edx\n" 14370a7dd5dSKonstantin Belousov "xorl %%ebx,%%ebx\n\t" 14470a7dd5dSKonstantin Belousov "xorl %%ecx,%%ecx\n\t" 14570a7dd5dSKonstantin Belousov "1:\n\t" 14670a7dd5dSKonstantin Belousov "cmpxchg8b (%0)\n\t" 14770a7dd5dSKonstantin Belousov "jnz 1b" 14870a7dd5dSKonstantin Belousov : 14970a7dd5dSKonstantin Belousov : "SD" (p) 15070a7dd5dSKonstantin Belousov : "memory", "cc", "eax", "edx", "ebx", "ecx"); 15170a7dd5dSKonstantin Belousov } 15270a7dd5dSKonstantin Belousov 15370a7dd5dSKonstantin Belousov static void 15470a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg) 15570a7dd5dSKonstantin Belousov { 15670a7dd5dSKonstantin Belousov uint64_t *p; 15770a7dd5dSKonstantin Belousov 158ab3059a8SMatt Macy p = (uint64_t *)((char *)arg + UMA_PCPU_ALLOC_SIZE * PCPU_GET(cpuid)); 15970a7dd5dSKonstantin Belousov counter_u64_zero_one_8b(p); 16070a7dd5dSKonstantin Belousov } 16170a7dd5dSKonstantin Belousov 16270a7dd5dSKonstantin Belousov static inline void 16370a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c) 16470a7dd5dSKonstantin Belousov { 16570a7dd5dSKonstantin Belousov int i; 16670a7dd5dSKonstantin Belousov 16770a7dd5dSKonstantin Belousov if ((cpu_feature & CPUID_CX8) == 0) { 16870a7dd5dSKonstantin Belousov critical_enter(); 16996c85efbSNathan Whitehorn CPU_FOREACH(i) 170ab3059a8SMatt Macy *(uint64_t *)((char *)c + UMA_PCPU_ALLOC_SIZE * i) = 0; 17170a7dd5dSKonstantin Belousov critical_exit(); 17270a7dd5dSKonstantin Belousov } else { 17367d955aaSPatrick Kelsey smp_rendezvous(smp_no_rendezvous_barrier, 17467d955aaSPatrick Kelsey counter_u64_zero_one_cpu, smp_no_rendezvous_barrier, c); 17570a7dd5dSKonstantin Belousov } 17670a7dd5dSKonstantin Belousov } 17770a7dd5dSKonstantin Belousov #endif 17870a7dd5dSKonstantin Belousov 1794e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, inc) do { \ 1804e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) { \ 1814e76af6aSGleb Smirnoff CRITICAL_ASSERT(curthread); \ 1824e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += (inc); \ 1834e76af6aSGleb Smirnoff } else \ 1844e76af6aSGleb Smirnoff counter_64_inc_8b((c), (inc)); \ 1854e76af6aSGleb Smirnoff } while (0) 1864e76af6aSGleb Smirnoff 1874e76af6aSGleb Smirnoff static inline void 1884e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc) 1894e76af6aSGleb Smirnoff { 1904e76af6aSGleb Smirnoff 1914e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) { 1924e76af6aSGleb Smirnoff critical_enter(); 1934e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += inc; 1944e76af6aSGleb Smirnoff critical_exit(); 1954e76af6aSGleb Smirnoff } else { 1964e76af6aSGleb Smirnoff counter_64_inc_8b(c, inc); 1974e76af6aSGleb Smirnoff } 1984e76af6aSGleb Smirnoff } 1994e76af6aSGleb Smirnoff 2004e76af6aSGleb Smirnoff #endif /* ! __MACHINE_COUNTER_H__ */ 201