1*4e76af6aSGleb Smirnoff /*- 2*4e76af6aSGleb Smirnoff * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 3*4e76af6aSGleb Smirnoff * All rights reserved. 4*4e76af6aSGleb Smirnoff * 5*4e76af6aSGleb Smirnoff * Redistribution and use in source and binary forms, with or without 6*4e76af6aSGleb Smirnoff * modification, are permitted provided that the following conditions 7*4e76af6aSGleb Smirnoff * are met: 8*4e76af6aSGleb Smirnoff * 1. Redistributions of source code must retain the above copyright 9*4e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer. 10*4e76af6aSGleb Smirnoff * 2. Redistributions in binary form must reproduce the above copyright 11*4e76af6aSGleb Smirnoff * notice, this list of conditions and the following disclaimer in the 12*4e76af6aSGleb Smirnoff * documentation and/or other materials provided with the distribution. 13*4e76af6aSGleb Smirnoff * 14*4e76af6aSGleb Smirnoff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15*4e76af6aSGleb Smirnoff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16*4e76af6aSGleb Smirnoff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17*4e76af6aSGleb Smirnoff * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18*4e76af6aSGleb Smirnoff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19*4e76af6aSGleb Smirnoff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20*4e76af6aSGleb Smirnoff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21*4e76af6aSGleb Smirnoff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22*4e76af6aSGleb Smirnoff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23*4e76af6aSGleb Smirnoff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24*4e76af6aSGleb Smirnoff * SUCH DAMAGE. 25*4e76af6aSGleb Smirnoff * 26*4e76af6aSGleb Smirnoff * $FreeBSD$ 27*4e76af6aSGleb Smirnoff */ 28*4e76af6aSGleb Smirnoff 29*4e76af6aSGleb Smirnoff #ifndef __MACHINE_COUNTER_H__ 30*4e76af6aSGleb Smirnoff #define __MACHINE_COUNTER_H__ 31*4e76af6aSGleb Smirnoff 32*4e76af6aSGleb Smirnoff #include <sys/pcpu.h> 33*4e76af6aSGleb Smirnoff #ifdef INVARIANTS 34*4e76af6aSGleb Smirnoff #include <sys/proc.h> 35*4e76af6aSGleb Smirnoff #endif 36*4e76af6aSGleb Smirnoff #include <machine/md_var.h> 37*4e76af6aSGleb Smirnoff #include <machine/specialreg.h> 38*4e76af6aSGleb Smirnoff 39*4e76af6aSGleb Smirnoff #define counter_enter() do { \ 40*4e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) \ 41*4e76af6aSGleb Smirnoff critical_enter(); \ 42*4e76af6aSGleb Smirnoff } while (0) 43*4e76af6aSGleb Smirnoff 44*4e76af6aSGleb Smirnoff #define counter_exit() do { \ 45*4e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) \ 46*4e76af6aSGleb Smirnoff critical_exit(); \ 47*4e76af6aSGleb Smirnoff } while (0) 48*4e76af6aSGleb Smirnoff 49*4e76af6aSGleb Smirnoff static inline void 50*4e76af6aSGleb Smirnoff counter_64_inc_8b(uint64_t *p, int64_t inc) 51*4e76af6aSGleb Smirnoff { 52*4e76af6aSGleb Smirnoff 53*4e76af6aSGleb Smirnoff __asm __volatile( 54*4e76af6aSGleb Smirnoff "movl %%fs:(%%esi),%%eax\n\t" 55*4e76af6aSGleb Smirnoff "movl %%fs:4(%%esi),%%edx\n" 56*4e76af6aSGleb Smirnoff "1:\n\t" 57*4e76af6aSGleb Smirnoff "movl %%eax,%%ebx\n\t" 58*4e76af6aSGleb Smirnoff "movl %%edx,%%ecx\n\t" 59*4e76af6aSGleb Smirnoff "addl (%%edi),%%ebx\n\t" 60*4e76af6aSGleb Smirnoff "adcl 4(%%edi),%%ecx\n\t" 61*4e76af6aSGleb Smirnoff "cmpxchg8b %%fs:(%%esi)\n\t" 62*4e76af6aSGleb Smirnoff "jnz 1b" 63*4e76af6aSGleb Smirnoff : 64*4e76af6aSGleb Smirnoff : "S" (p), "D" (&inc) 65*4e76af6aSGleb Smirnoff : "memory", "cc", "eax", "edx", "ebx", "ecx"); 66*4e76af6aSGleb Smirnoff } 67*4e76af6aSGleb Smirnoff 68*4e76af6aSGleb Smirnoff #define counter_u64_add_protected(c, inc) do { \ 69*4e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) { \ 70*4e76af6aSGleb Smirnoff CRITICAL_ASSERT(curthread); \ 71*4e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += (inc); \ 72*4e76af6aSGleb Smirnoff } else \ 73*4e76af6aSGleb Smirnoff counter_64_inc_8b((c), (inc)); \ 74*4e76af6aSGleb Smirnoff } while (0) 75*4e76af6aSGleb Smirnoff 76*4e76af6aSGleb Smirnoff static inline void 77*4e76af6aSGleb Smirnoff counter_u64_add(counter_u64_t c, int64_t inc) 78*4e76af6aSGleb Smirnoff { 79*4e76af6aSGleb Smirnoff 80*4e76af6aSGleb Smirnoff if ((cpu_feature & CPUID_CX8) == 0) { 81*4e76af6aSGleb Smirnoff critical_enter(); 82*4e76af6aSGleb Smirnoff *(uint64_t *)zpcpu_get(c) += inc; 83*4e76af6aSGleb Smirnoff critical_exit(); 84*4e76af6aSGleb Smirnoff } else { 85*4e76af6aSGleb Smirnoff counter_64_inc_8b(c, inc); 86*4e76af6aSGleb Smirnoff } 87*4e76af6aSGleb Smirnoff } 88*4e76af6aSGleb Smirnoff 89*4e76af6aSGleb Smirnoff #endif /* ! __MACHINE_COUNTER_H__ */ 90