xref: /freebsd/sys/amd64/vmm/vmm_stat.h (revision eda14cbc264d6969b02f2b1994cef11148e914f1)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #ifndef _VMM_STAT_H_
35 #define	_VMM_STAT_H_
36 
37 struct vm;
38 
39 #define	MAX_VMM_STAT_ELEMS	64		/* arbitrary */
40 
41 enum vmm_stat_scope {
42 	VMM_STAT_SCOPE_ANY,
43 	VMM_STAT_SCOPE_INTEL,		/* Intel VMX specific statistic */
44 	VMM_STAT_SCOPE_AMD,		/* AMD SVM specific statistic */
45 };
46 
47 struct vmm_stat_type;
48 typedef void (*vmm_stat_func_t)(struct vm *vm, int vcpu,
49     struct vmm_stat_type *stat);
50 
51 struct vmm_stat_type {
52 	int	index;			/* position in the stats buffer */
53 	int	nelems;			/* standalone or array */
54 	const char *desc;		/* description of statistic */
55 	vmm_stat_func_t func;
56 	enum vmm_stat_scope scope;
57 };
58 
59 void	vmm_stat_register(void *arg);
60 
61 #define	VMM_STAT_FDEFINE(type, nelems, desc, func, scope)		\
62 	struct vmm_stat_type type[1] = {				\
63 		{ -1, nelems, desc, func, scope }			\
64 	};								\
65 	SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
66 
67 #define VMM_STAT_DEFINE(type, nelems, desc, scope) 			\
68 	VMM_STAT_FDEFINE(type, nelems, desc, NULL, scope)
69 
70 #define	VMM_STAT_DECLARE(type)						\
71 	extern struct vmm_stat_type type[1]
72 
73 #define	VMM_STAT(type, desc)		\
74 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_ANY)
75 #define	VMM_STAT_INTEL(type, desc)	\
76 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_INTEL)
77 #define	VMM_STAT_AMD(type, desc)	\
78 	VMM_STAT_DEFINE(type, 1, desc, VMM_STAT_SCOPE_AMD)
79 
80 #define	VMM_STAT_FUNC(type, desc, func)	\
81 	VMM_STAT_FDEFINE(type, 1, desc, func, VMM_STAT_SCOPE_ANY)
82 
83 #define	VMM_STAT_ARRAY(type, nelems, desc)	\
84 	VMM_STAT_DEFINE(type, nelems, desc, VMM_STAT_SCOPE_ANY)
85 
86 void	*vmm_stat_alloc(void);
87 void	vmm_stat_init(void *vp);
88 void 	vmm_stat_free(void *vp);
89 
90 /*
91  * 'buf' should be at least fit 'MAX_VMM_STAT_TYPES' entries
92  */
93 int	vmm_stat_copy(struct vm *vm, int vcpu, int *num_stats, uint64_t *buf);
94 int	vmm_stat_desc_copy(int index, char *buf, int buflen);
95 
96 static void __inline
97 vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
98 		    int statidx, uint64_t x)
99 {
100 #ifdef VMM_KEEP_STATS
101 	uint64_t *stats;
102 
103 	stats = vcpu_stats(vm, vcpu);
104 
105 	if (vst->index >= 0 && statidx < vst->nelems)
106 		stats[vst->index + statidx] += x;
107 #endif
108 }
109 
110 static void __inline
111 vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
112 		   int statidx, uint64_t val)
113 {
114 #ifdef VMM_KEEP_STATS
115 	uint64_t *stats;
116 
117 	stats = vcpu_stats(vm, vcpu);
118 
119 	if (vst->index >= 0 && statidx < vst->nelems)
120 		stats[vst->index + statidx] = val;
121 #endif
122 }
123 
124 static void __inline
125 vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
126 {
127 
128 #ifdef VMM_KEEP_STATS
129 	vmm_stat_array_incr(vm, vcpu, vst, 0, x);
130 #endif
131 }
132 
133 static void __inline
134 vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
135 {
136 
137 #ifdef VMM_KEEP_STATS
138 	vmm_stat_array_set(vm, vcpu, vst, 0, val);
139 #endif
140 }
141 
142 VMM_STAT_DECLARE(VCPU_MIGRATIONS);
143 VMM_STAT_DECLARE(VMEXIT_COUNT);
144 VMM_STAT_DECLARE(VMEXIT_EXTINT);
145 VMM_STAT_DECLARE(VMEXIT_HLT);
146 VMM_STAT_DECLARE(VMEXIT_CR_ACCESS);
147 VMM_STAT_DECLARE(VMEXIT_RDMSR);
148 VMM_STAT_DECLARE(VMEXIT_WRMSR);
149 VMM_STAT_DECLARE(VMEXIT_MTRAP);
150 VMM_STAT_DECLARE(VMEXIT_PAUSE);
151 VMM_STAT_DECLARE(VMEXIT_INTR_WINDOW);
152 VMM_STAT_DECLARE(VMEXIT_NMI_WINDOW);
153 VMM_STAT_DECLARE(VMEXIT_INOUT);
154 VMM_STAT_DECLARE(VMEXIT_CPUID);
155 VMM_STAT_DECLARE(VMEXIT_NESTED_FAULT);
156 VMM_STAT_DECLARE(VMEXIT_INST_EMUL);
157 VMM_STAT_DECLARE(VMEXIT_UNKNOWN);
158 VMM_STAT_DECLARE(VMEXIT_ASTPENDING);
159 VMM_STAT_DECLARE(VMEXIT_USERSPACE);
160 VMM_STAT_DECLARE(VMEXIT_RENDEZVOUS);
161 VMM_STAT_DECLARE(VMEXIT_EXCEPTION);
162 VMM_STAT_DECLARE(VMEXIT_REQIDLE);
163 #endif
164