xref: /freebsd/sys/amd64/vmm/vmm_stat.h (revision 657729a89dd578d8cfc70d6616f5c65a48a8b33a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #ifndef _VMM_STAT_H_
35 #define	_VMM_STAT_H_
36 
37 struct vm;
38 
39 #define	MAX_VMM_STAT_ELEMS	64		/* arbitrary */
40 
41 enum vmm_stat_scope {
42 	VMM_STAT_SCOPE_ANY,
43 	VMM_STAT_SCOPE_INTEL,		/* Intel VMX specific statistic */
44 	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
45 };
46 
47 struct vmm_stat_type;
48 typedef void (*vmm_stat_func_t)(struct vcpu *vcpu,
49     struct vmm_stat_type *stat);
50 
51 struct vmm_stat_type {
52 	int	index;			/* position in the stats buffer */
53 	int	nelems;			/* standalone or array */
54 	const char *desc;		/* description of statistic */
55 	vmm_stat_func_t func;
56 	enum vmm_stat_scope scope;
57 };
58 
59 void	vmm_stat_register(void *arg);
60 
61 #define	VMM_STAT_NELEMS_VCPU	(-1)
62 
63 #define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
64 	struct vmm_stat_type type[1] = {				\
65 		{ -1, nelems, desc, func, scope }			\
66 	};								\
67 	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
68 
69 #define VMM_STAT_DEFINE(type, nelems, desc, scope) 			\
70 	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
71 
72 #define	VMM_STAT_DECLARE(type)						\
73 	extern struct vmm_stat_type type[1]
74 
75 #define	VMM_STAT(type, desc)		\
76 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
77 #define	VMM_STAT_INTEL(type, desc)	\
78 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
79 #define	VMM_STAT_AMD(type, desc)	\
80 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
81 
82 #define	VMM_STAT_FUNC(type, desc, func)	\
83 	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
84 
85 #define	VMM_STAT_ARRAY(type, nelems, desc)	\
86 	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
87 
88 void	*vmm_stat_alloc(void);
89 void	vmm_stat_init(void *vp);
90 void 	vmm_stat_free(void *vp);
91 
92 int	vmm_stat_copy(struct vcpu *vcpu, int index, int count,
93 	    int *num_stats, uint64_t *buf);
94 int	vmm_stat_desc_copy(int index, char *buf, int buflen);
95 
96 static void __inline
97 vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
98     uint64_t x)
99 {
100 #ifdef VMM_KEEP_STATS
101 	uint64_t *stats;
102 
103 	stats = vcpu_stats(vcpu);
104 
105 	if (vst->index >= 0 && statidx < vst->nelems)
106 		stats[vst->index + statidx] += x;
107 #endif
108 }
109 
110 static void __inline
111 vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
112     uint64_t val)
113 {
114 #ifdef VMM_KEEP_STATS
115 	uint64_t *stats;
116 
117 	stats = vcpu_stats(vcpu);
118 
119 	if (vst->index >= 0 && statidx < vst->nelems)
120 		stats[vst->index + statidx] = val;
121 #endif
122 }
123 
124 static void __inline
125 vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
126 {
127 
128 #ifdef VMM_KEEP_STATS
129 	vmm_stat_array_incr(vcpu, vst, 0, x);
130 #endif
131 }
132 
133 static void __inline
134 vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
135 {
136 
137 #ifdef VMM_KEEP_STATS
138 	vmm_stat_array_set(vcpu, vst, 0, val);
139 #endif
140 }
141 
142 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
143 VMM_STAT_DECLARE(VMEXIT_COUNT);
144 VMM_STAT_DECLARE(VMEXIT_EXTINT);
145 VMM_STAT_DECLARE(VMEXIT_HLT);
146 VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
147 VMM_STAT_DECLARE(VMEXIT_RDMSR);
148 VMM_STAT_DECLARE(VMEXIT_WRMSR);
149 VMM_STAT_DECLARE(VMEXIT_MTRAP);
150 VMM_STAT_DECLARE(VMEXIT_PAUSE);
151 VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
152 VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
153 VMM_STAT_DECLARE(VMEXIT_INOUT);
154 VMM_STAT_DECLARE(VMEXIT_CPUID);
155 VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
156 VMM_STAT_DECLARE(VMEXIT_INST_EMUL);
157 VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
158 VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
159 VMM_STAT_DECLARE(VMEXIT_USERSPACE);
160 VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
161 VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
162 VMM_STAT_DECLARE(VMEXIT_REQIDLE);
163 #endif
164