14e76af6aSGleb Smirnoff /*- 24e76af6aSGleb Smirnoff * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 34e76af6aSGleb Smirnoff * All rights reserved. 44e76af6aSGleb Smirnoff * 54e76af6aSGleb Smirnoff * Redistribution and use in source and binary forms, with or without 64e76af6aSGleb Smirnoff * modification, are permitted provided that the following conditions 74e76af6aSGleb Smirnoff * are met: 84e76af6aSGleb Smirnoff * 1. Redistributions of source code must retain the above copyright 94e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer. 104e76af6aSGleb Smirnoff * 2. Redistributions in binary form must reproduce the above copyright 114e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer in the 124e76af6aSGleb Smirnoff * documentation and/or other materials provided with the distribution. 134e76af6aSGleb Smirnoff * 144e76af6aSGleb Smirnoff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 154e76af6aSGleb Smirnoff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 164e76af6aSGleb Smirnoff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 174e76af6aSGleb Smirnoff * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 184e76af6aSGleb Smirnoff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 194e76af6aSGleb Smirnoff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 204e76af6aSGleb Smirnoff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 214e76af6aSGleb Smirnoff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 224e76af6aSGleb Smirnoff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 234e76af6aSGleb Smirnoff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 244e76af6aSGleb Smirnoff * SUCH DAMAGE. 254e76af6aSGleb Smirnoff * 264e76af6aSGleb Smirnoff * $FreeBSD$ 274e76af6aSGleb Smirnoff */ 284e76af6aSGleb Smirnoff 294e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__ 304e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__ 314e76af6aSGleb Smirnoff 324e76af6aSGleb Smirnoff #include <sys/pcpu.h> 334e76af6aSGleb Smirnoff #ifdef INVARIANTS 344e76af6aSGleb Smirnoff #include <sys/proc.h> 354e76af6aSGleb Smirnoff #endif 364e76af6aSGleb Smirnoff #include <machine/md_var.h> 374e76af6aSGleb Smirnoff #include <machine/specialreg.h> 384e76af6aSGleb Smirnoff 394e76af6aSGleb Smirnoff #define counter_enter() do { \ 404e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) \ 414e76af6aSGleb Smirnoff critical_enter(); \ 424e76af6aSGleb Smirnoff } while (0) 434e76af6aSGleb Smirnoff 444e76af6aSGleb Smirnoff #define counter_exit() do { \ 454e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) \ 464e76af6aSGleb Smirnoff critical_exit(); \ 474e76af6aSGleb Smirnoff } while (0) 484e76af6aSGleb Smirnoff 49706c56e4SKonstantin Belousov extern struct pcpu __pcpu[MAXCPU]; 50706c56e4SKonstantin Belousov 514e76af6aSGleb Smirnoff static inline void 524e76af6aSGleb Smirnoff counter_64_inc_8b(uint64_t *p, int64_t inc) 534e76af6aSGleb Smirnoff { 544e76af6aSGleb Smirnoff 554e76af6aSGleb Smirnoff __asm __volatile( 564e76af6aSGleb Smirnoff "movl %%fs:(%%esi),%%eax\n\t" 574e76af6aSGleb Smirnoff "movl %%fs:4(%%esi),%%edx\n" 584e76af6aSGleb Smirnoff "1:\n\t" 594e76af6aSGleb Smirnoff "movl %%eax,%%ebx\n\t" 604e76af6aSGleb Smirnoff "movl %%edx,%%ecx\n\t" 614e76af6aSGleb Smirnoff "addl (%%edi),%%ebx\n\t" 624e76af6aSGleb Smirnoff "adcl 4(%%edi),%%ecx\n\t" 634e76af6aSGleb Smirnoff "cmpxchg8b %%fs:(%%esi)\n\t" 644e76af6aSGleb Smirnoff "jnz 1b" 654e76af6aSGleb Smirnoff : 66706c56e4SKonstantin Belousov : "S" ((char *)p - (char *)&__pcpu[0]), "D" (&inc) 674e76af6aSGleb Smirnoff : "memory", "cc", "eax", "edx", "ebx", "ecx"); 684e76af6aSGleb Smirnoff } 694e76af6aSGleb Smirnoff 7070a7dd5dSKonstantin Belousov #ifdef IN_SUBR_COUNTER_C 7170a7dd5dSKonstantin Belousov static inline uint64_t 7270a7dd5dSKonstantin Belousov counter_u64_read_one_8b(uint64_t *p) 7370a7dd5dSKonstantin Belousov { 7470a7dd5dSKonstantin Belousov uint32_t res_lo, res_high; 7570a7dd5dSKonstantin Belousov 7670a7dd5dSKonstantin Belousov __asm __volatile( 7770a7dd5dSKonstantin Belousov "movl %%eax,%%ebx\n\t" 7870a7dd5dSKonstantin Belousov "movl %%edx,%%ecx\n\t" 7970a7dd5dSKonstantin Belousov "cmpxchg8b (%2)" 8070a7dd5dSKonstantin Belousov : "=a" (res_lo), "=d"(res_high) 8170a7dd5dSKonstantin Belousov : "SD" (p) 8270a7dd5dSKonstantin Belousov : "cc", "ebx", "ecx"); 8370a7dd5dSKonstantin Belousov return (res_lo + ((uint64_t)res_high << 32)); 8470a7dd5dSKonstantin Belousov } 8570a7dd5dSKonstantin Belousov 8670a7dd5dSKonstantin Belousov static inline uint64_t 8770a7dd5dSKonstantin Belousov counter_u64_fetch_inline(uint64_t *p) 8870a7dd5dSKonstantin Belousov { 8970a7dd5dSKonstantin Belousov uint64_t res; 9070a7dd5dSKonstantin Belousov int i; 9170a7dd5dSKonstantin Belousov 9270a7dd5dSKonstantin Belousov res = 0; 9370a7dd5dSKonstantin Belousov if ((cpu_feature & CPUID_CX8) == 0) { 9470a7dd5dSKonstantin Belousov /* 9570a7dd5dSKonstantin Belousov * The machines without cmpxchg8b are not SMP. 9670a7dd5dSKonstantin Belousov * Disabling the preemption provides atomicity of the 9770a7dd5dSKonstantin Belousov * counter reading, since update is done in the 9870a7dd5dSKonstantin Belousov * critical section as well. 9970a7dd5dSKonstantin Belousov */ 10070a7dd5dSKonstantin Belousov critical_enter(); 101*96c85efbSNathan Whitehorn CPU_FOREACH(i) { 10270a7dd5dSKonstantin Belousov res += *(uint64_t *)((char *)p + 10370a7dd5dSKonstantin Belousov sizeof(struct pcpu) * i); 10470a7dd5dSKonstantin Belousov } 10570a7dd5dSKonstantin Belousov critical_exit(); 10670a7dd5dSKonstantin Belousov } else { 107*96c85efbSNathan Whitehorn CPU_FOREACH(i) 10870a7dd5dSKonstantin Belousov res += counter_u64_read_one_8b((uint64_t *)((char *)p + 10970a7dd5dSKonstantin Belousov sizeof(struct pcpu) * i)); 11070a7dd5dSKonstantin Belousov } 11170a7dd5dSKonstantin Belousov return (res); 11270a7dd5dSKonstantin Belousov } 11370a7dd5dSKonstantin Belousov 11470a7dd5dSKonstantin Belousov static inline void 11570a7dd5dSKonstantin Belousov counter_u64_zero_one_8b(uint64_t *p) 11670a7dd5dSKonstantin Belousov { 11770a7dd5dSKonstantin Belousov 11870a7dd5dSKonstantin Belousov __asm __volatile( 11970a7dd5dSKonstantin Belousov "movl (%0),%%eax\n\t" 12070a7dd5dSKonstantin Belousov "movl 4(%0),%%edx\n" 12170a7dd5dSKonstantin Belousov "xorl %%ebx,%%ebx\n\t" 12270a7dd5dSKonstantin Belousov "xorl %%ecx,%%ecx\n\t" 12370a7dd5dSKonstantin Belousov "1:\n\t" 12470a7dd5dSKonstantin Belousov "cmpxchg8b (%0)\n\t" 12570a7dd5dSKonstantin Belousov "jnz 1b" 12670a7dd5dSKonstantin Belousov : 12770a7dd5dSKonstantin Belousov : "SD" (p) 12870a7dd5dSKonstantin Belousov : "memory", "cc", "eax", "edx", "ebx", "ecx"); 12970a7dd5dSKonstantin Belousov } 13070a7dd5dSKonstantin Belousov 13170a7dd5dSKonstantin Belousov static void 13270a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu(void *arg) 13370a7dd5dSKonstantin Belousov { 13470a7dd5dSKonstantin Belousov uint64_t *p; 13570a7dd5dSKonstantin Belousov 13670a7dd5dSKonstantin Belousov p = (uint64_t *)((char *)arg + sizeof(struct pcpu) * PCPU_GET(cpuid)); 13770a7dd5dSKonstantin Belousov counter_u64_zero_one_8b(p); 13870a7dd5dSKonstantin Belousov } 13970a7dd5dSKonstantin Belousov 14070a7dd5dSKonstantin Belousov static inline void 14170a7dd5dSKonstantin Belousov counter_u64_zero_inline(counter_u64_t c) 14270a7dd5dSKonstantin Belousov { 14370a7dd5dSKonstantin Belousov int i; 14470a7dd5dSKonstantin Belousov 14570a7dd5dSKonstantin Belousov if ((cpu_feature & CPUID_CX8) == 0) { 14670a7dd5dSKonstantin Belousov critical_enter(); 147*96c85efbSNathan Whitehorn CPU_FOREACH(i) 14870a7dd5dSKonstantin Belousov *(uint64_t *)((char *)c + sizeof(struct pcpu) * i) = 0; 14970a7dd5dSKonstantin Belousov critical_exit(); 15070a7dd5dSKonstantin Belousov } else { 15170a7dd5dSKonstantin Belousov smp_rendezvous(smp_no_rendevous_barrier, 15270a7dd5dSKonstantin Belousov counter_u64_zero_one_cpu, smp_no_rendevous_barrier, c); 15370a7dd5dSKonstantin Belousov } 15470a7dd5dSKonstantin Belousov } 15570a7dd5dSKonstantin Belousov #endif 15670a7dd5dSKonstantin Belousov 1574e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, inc) do { \ 1584e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) { \ 1594e76af6aSGleb Smirnoff CRITICAL_ASSERT(curthread); \ 1604e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += (inc); \ 1614e76af6aSGleb Smirnoff } else \ 1624e76af6aSGleb Smirnoff counter_64_inc_8b((c), (inc)); \ 1634e76af6aSGleb Smirnoff } while (0) 1644e76af6aSGleb Smirnoff 1654e76af6aSGleb Smirnoff static inline void 1664e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc) 1674e76af6aSGleb Smirnoff { 1684e76af6aSGleb Smirnoff 1694e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) { 1704e76af6aSGleb Smirnoff critical_enter(); 1714e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += inc; 1724e76af6aSGleb Smirnoff critical_exit(); 1734e76af6aSGleb Smirnoff } else { 1744e76af6aSGleb Smirnoff counter_64_inc_8b(c, inc); 1754e76af6aSGleb Smirnoff } 1764e76af6aSGleb Smirnoff } 1774e76af6aSGleb Smirnoff 1784e76af6aSGleb Smirnoff #endif /* ! __MACHINE_COUNTER_H__ */ 179