xref: /freebsd/sys/amd64/vmm/vmm_stat.h (revision 4543ef516683042d46f3bd3bb8a4f3f746e00499)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef _VMM_STAT_H_
33 #define	_VMM_STAT_H_
34 
35 struct vm;
36 
37 #define	MAX_VMM_STAT_ELEMS	64		/* arbitrary */
38 
39 enum vmm_stat_scope {
40 	VMM_STAT_SCOPE_ANY,
41 	VMM_STAT_SCOPE_INTEL,		/* Intel VMX specific statistic */
42 	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
43 };
44 
45 struct vmm_stat_type;
46 typedef void (*vmm_stat_func_t)(struct vcpu *vcpu,
47     struct vmm_stat_type *stat);
48 
49 struct vmm_stat_type {
50 	int	index;			/* position in the stats buffer */
51 	int	nelems;			/* standalone or array */
52 	const char *desc;		/* description of statistic */
53 	vmm_stat_func_t func;
54 	enum vmm_stat_scope scope;
55 };
56 
57 void	vmm_stat_register(void *arg);
58 
59 #define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
60 	struct vmm_stat_type type[1] = {				\
61 		{ -1, nelems, desc, func, scope }			\
62 	};								\
63 	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
64 
65 #define VMM_STAT_DEFINE(type, nelems, desc, scope) 			\
66 	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
67 
68 #define	VMM_STAT_DECLARE(type)						\
69 	extern struct vmm_stat_type type[1]
70 
71 #define	VMM_STAT(type, desc)		\
72 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
73 #define	VMM_STAT_INTEL(type, desc)	\
74 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
75 #define	VMM_STAT_AMD(type, desc)	\
76 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
77 
78 #define	VMM_STAT_FUNC(type, desc, func)	\
79 	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
80 
81 #define	VMM_STAT_ARRAY(type, nelems, desc)	\
82 	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
83 
84 void	*vmm_stat_alloc(void);
85 void	vmm_stat_init(void *vp);
86 void 	vmm_stat_free(void *vp);
87 
88 int	vmm_stat_copy(struct vcpu *vcpu, int index, int count,
89 	    int *num_stats, uint64_t *buf);
90 int	vmm_stat_desc_copy(int index, char *buf, int buflen);
91 
92 static void __inline
93 vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
94     uint64_t x)
95 {
96 #ifdef VMM_KEEP_STATS
97 	uint64_t *stats;
98 
99 	stats = vcpu_stats(vcpu);
100 
101 	if (vst->index >= 0 && statidx < vst->nelems)
102 		stats[vst->index + statidx] += x;
103 #endif
104 }
105 
106 static void __inline
107 vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
108     uint64_t val)
109 {
110 #ifdef VMM_KEEP_STATS
111 	uint64_t *stats;
112 
113 	stats = vcpu_stats(vcpu);
114 
115 	if (vst->index >= 0 && statidx < vst->nelems)
116 		stats[vst->index + statidx] = val;
117 #endif
118 }
119 
120 static void __inline
121 vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
122 {
123 
124 #ifdef VMM_KEEP_STATS
125 	vmm_stat_array_incr(vcpu, vst, 0, x);
126 #endif
127 }
128 
129 static void __inline
130 vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
131 {
132 
133 #ifdef VMM_KEEP_STATS
134 	vmm_stat_array_set(vcpu, vst, 0, val);
135 #endif
136 }
137 
138 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
139 VMM_STAT_DECLARE(VMEXIT_COUNT);
140 VMM_STAT_DECLARE(VMEXIT_EXTINT);
141 VMM_STAT_DECLARE(VMEXIT_HLT);
142 VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
143 VMM_STAT_DECLARE(VMEXIT_RDMSR);
144 VMM_STAT_DECLARE(VMEXIT_WRMSR);
145 VMM_STAT_DECLARE(VMEXIT_MTRAP);
146 VMM_STAT_DECLARE(VMEXIT_PAUSE);
147 VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
148 VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
149 VMM_STAT_DECLARE(VMEXIT_INOUT);
150 VMM_STAT_DECLARE(VMEXIT_CPUID);
151 VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
152 VMM_STAT_DECLARE(VMEXIT_INST_EMUL);
153 VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
154 VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
155 VMM_STAT_DECLARE(VMEXIT_USERSPACE);
156 VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
157 VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
158 VMM_STAT_DECLARE(VMEXIT_REQIDLE);
159 #endif
160