xref: /freebsd/sys/amd64/vmm/vmm_stat.h (revision e32fecd0c2c3ee37c47ee100f169e7eb0282a873)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #ifndef _VMM_STAT_H_
35 #define	_VMM_STAT_H_
36 
37 struct vm;
38 
39 #define	MAX_VMM_STAT_ELEMS	64		/* arbitrary */
40 
41 enum vmm_stat_scope {
42 	VMM_STAT_SCOPE_ANY,
43 	VMM_STAT_SCOPE_INTEL,		/* Intel VMX specific statistic */
44 	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
45 };
46 
47 struct vmm_stat_type;
48 typedef void (*vmm_stat_func_t)(struct vcpu *vcpu,
49     struct vmm_stat_type *stat);
50 
51 struct vmm_stat_type {
52 	int	index;			/* position in the stats buffer */
53 	int	nelems;			/* standalone or array */
54 	const char *desc;		/* description of statistic */
55 	vmm_stat_func_t func;
56 	enum vmm_stat_scope scope;
57 };
58 
59 void	vmm_stat_register(void *arg);
60 
61 #define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
62 	struct vmm_stat_type type[1] = {				\
63 		{ -1, nelems, desc, func, scope }			\
64 	};								\
65 	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
66 
67 #define VMM_STAT_DEFINE(type, nelems, desc, scope) 			\
68 	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
69 
70 #define	VMM_STAT_DECLARE(type)						\
71 	extern struct vmm_stat_type type[1]
72 
73 #define	VMM_STAT(type, desc)		\
74 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
75 #define	VMM_STAT_INTEL(type, desc)	\
76 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
77 #define	VMM_STAT_AMD(type, desc)	\
78 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
79 
80 #define	VMM_STAT_FUNC(type, desc, func)	\
81 	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
82 
83 #define	VMM_STAT_ARRAY(type, nelems, desc)	\
84 	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
85 
86 void	*vmm_stat_alloc(void);
87 void	vmm_stat_init(void *vp);
88 void 	vmm_stat_free(void *vp);
89 
90 int	vmm_stat_copy(struct vcpu *vcpu, int index, int count,
91 	    int *num_stats, uint64_t *buf);
92 int	vmm_stat_desc_copy(int index, char *buf, int buflen);
93 
94 static void __inline
95 vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
96     uint64_t x)
97 {
98 #ifdef VMM_KEEP_STATS
99 	uint64_t *stats;
100 
101 	stats = vcpu_stats(vcpu);
102 
103 	if (vst->index >= 0 && statidx < vst->nelems)
104 		stats[vst->index + statidx] += x;
105 #endif
106 }
107 
108 static void __inline
109 vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
110     uint64_t val)
111 {
112 #ifdef VMM_KEEP_STATS
113 	uint64_t *stats;
114 
115 	stats = vcpu_stats(vcpu);
116 
117 	if (vst->index >= 0 && statidx < vst->nelems)
118 		stats[vst->index + statidx] = val;
119 #endif
120 }
121 
122 static void __inline
123 vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
124 {
125 
126 #ifdef VMM_KEEP_STATS
127 	vmm_stat_array_incr(vcpu, vst, 0, x);
128 #endif
129 }
130 
131 static void __inline
132 vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
133 {
134 
135 #ifdef VMM_KEEP_STATS
136 	vmm_stat_array_set(vcpu, vst, 0, val);
137 #endif
138 }
139 
140 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
141 VMM_STAT_DECLARE(VMEXIT_COUNT);
142 VMM_STAT_DECLARE(VMEXIT_EXTINT);
143 VMM_STAT_DECLARE(VMEXIT_HLT);
144 VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
145 VMM_STAT_DECLARE(VMEXIT_RDMSR);
146 VMM_STAT_DECLARE(VMEXIT_WRMSR);
147 VMM_STAT_DECLARE(VMEXIT_MTRAP);
148 VMM_STAT_DECLARE(VMEXIT_PAUSE);
149 VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
150 VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
151 VMM_STAT_DECLARE(VMEXIT_INOUT);
152 VMM_STAT_DECLARE(VMEXIT_CPUID);
153 VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
154 VMM_STAT_DECLARE(VMEXIT_INST_EMUL);
155 VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
156 VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
157 VMM_STAT_DECLARE(VMEXIT_USERSPACE);
158 VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
159 VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
160 VMM_STAT_DECLARE(VMEXIT_REQIDLE);
161 #endif
162