xref: /illumos-gate/usr/src/uts/intel/io/vmm/vmm_stat.h (revision 32640292339b07090f10ce34d455f98711077343)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*
32  * Copyright 2018 Joyent, Inc.
33  */
34 
35 #ifndef _VMM_STAT_H_
36 #define	_VMM_STAT_H_
37 
38 #include <machine/vmm.h>
39 
40 struct vm;
41 
42 #ifdef __FreeBSD__
43 #define	MAX_VMM_STAT_ELEMS	64			/* arbitrary */
44 #else
45 #define	MAX_VMM_STAT_ELEMS	(64 + VM_MAXCPU)	/* arbitrary */
46 #endif
47 
48 enum vmm_stat_scope {
49 	VMM_STAT_SCOPE_ANY,
50 	VMM_STAT_SCOPE_INTEL,		/* Intel VMX specific statistic */
51 	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
52 };
53 
54 struct vmm_stat_type;
55 typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
56     struct vmm_stat_type *stat);
57 
58 struct vmm_stat_type {
59 	int	index;			/* position in the stats buffer */
60 	int	nelems;			/* standalone or array */
61 	const char *desc;		/* description of statistic */
62 	vmm_stat_func_t func;
63 	enum vmm_stat_scope scope;
64 };
65 
66 void	vmm_stat_register(void *arg);
67 
68 #define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
69 	struct vmm_stat_type type[1] = {				\
70 		{ -1, nelems, desc, func, scope }			\
71 	};								\
72 	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
73 
74 #define	VMM_STAT_DEFINE(type, nelems, desc, scope)			\
75 	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
76 
77 #define	VMM_STAT_DECLARE(type)						\
78 	extern struct vmm_stat_type type[1]
79 
80 #define	VMM_STAT(type, desc)		\
81 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
82 #define	VMM_STAT_INTEL(type, desc)	\
83 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
84 #define	VMM_STAT_AMD(type, desc)	\
85 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
86 
87 #define	VMM_STAT_FUNC(type, desc, func)	\
88 	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
89 
90 #define	VMM_STAT_ARRAY(type, nelems, desc)	\
91 	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
92 
93 void	*vmm_stat_alloc(void);
94 void	vmm_stat_init(void *vp);
95 void	vmm_stat_free(void *vp);
96 
97 int	vmm_stat_copy(struct vm *vm, int vcpu, int index, int count,
98 	    int *num_stats, uint64_t *buf);
99 int	vmm_stat_desc_copy(int index, char *buf, int buflen);
100 
101 static __inline void
vmm_stat_array_incr(struct vm * vm,int vcpu,struct vmm_stat_type * vst,int statidx,uint64_t x)102 vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
103     int statidx, uint64_t x)
104 {
105 #ifdef VMM_KEEP_STATS
106 	uint64_t *stats;
107 
108 	stats = vcpu_stats(vm, vcpu);
109 
110 	if (vst->index >= 0 && statidx < vst->nelems)
111 		stats[vst->index + statidx] += x;
112 #endif
113 }
114 
115 static __inline void
vmm_stat_array_set(struct vm * vm,int vcpu,struct vmm_stat_type * vst,int statidx,uint64_t val)116 vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
117     int statidx, uint64_t val)
118 {
119 #ifdef VMM_KEEP_STATS
120 	uint64_t *stats;
121 
122 	stats = vcpu_stats(vm, vcpu);
123 
124 	if (vst->index >= 0 && statidx < vst->nelems)
125 		stats[vst->index + statidx] = val;
126 #endif
127 }
128 
129 static __inline void
vmm_stat_incr(struct vm * vm,int vcpu,struct vmm_stat_type * vst,uint64_t x)130 vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
131 {
132 
133 #ifdef VMM_KEEP_STATS
134 	vmm_stat_array_incr(vm, vcpu, vst, 0, x);
135 #endif
136 }
137 
138 static __inline void
vmm_stat_set(struct vm * vm,int vcpu,struct vmm_stat_type * vst,uint64_t val)139 vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
140 {
141 
142 #ifdef VMM_KEEP_STATS
143 	vmm_stat_array_set(vm, vcpu, vst, 0, val);
144 #endif
145 }
146 
147 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
148 VMM_STAT_DECLARE(VMEXIT_COUNT);
149 VMM_STAT_DECLARE(VMEXIT_EXTINT);
150 VMM_STAT_DECLARE(VMEXIT_HLT);
151 VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
152 VMM_STAT_DECLARE(VMEXIT_RDMSR);
153 VMM_STAT_DECLARE(VMEXIT_WRMSR);
154 VMM_STAT_DECLARE(VMEXIT_MTRAP);
155 VMM_STAT_DECLARE(VMEXIT_PAUSE);
156 VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
157 VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
158 VMM_STAT_DECLARE(VMEXIT_INOUT);
159 VMM_STAT_DECLARE(VMEXIT_CPUID);
160 VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
161 VMM_STAT_DECLARE(VMEXIT_MMIO_EMUL);
162 VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
163 VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
164 VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
165 VMM_STAT_DECLARE(VMEXIT_REQIDLE);
166 VMM_STAT_DECLARE(VMEXIT_RUN_STATE);
167 #endif
168