xref: /illumos-gate/usr/src/uts/intel/io/vmm/amd/svm_softc.h (revision 09ea9c53cd9ac02c506f68475d98e8f07b457ffc)
1  /*-
2   * SPDX-License-Identifier: BSD-2-Clause
3   *
4   * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
5   * All rights reserved.
6   *
7   * Redistribution and use in source and binary forms, with or without
8   * modification, are permitted provided that the following conditions
9   * are met:
10   * 1. Redistributions of source code must retain the above copyright
11   *    notice unmodified, this list of conditions, and the following
12   *    disclaimer.
13   * 2. Redistributions in binary form must reproduce the above copyright
14   *    notice, this list of conditions and the following disclaimer in the
15   *    documentation and/or other materials provided with the distribution.
16   *
17   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27   */
28  
29  #ifndef	_SVM_SOFTC_H_
30  #define	_SVM_SOFTC_H_
31  
32  #define	SVM_IO_BITMAP_SIZE	(3 * PAGE_SIZE)
33  #define	SVM_MSR_BITMAP_SIZE	(2 * PAGE_SIZE)
34  
35  #include <sys/hma.h>
36  
37  #include "vmcb.h"
38  #include "svm_pmu.h"
39  
40  /* This must match HOST_MSR_NUM in svm_msr.c (where it is CTASSERTed) */
41  #define	SVM_HOST_MSR_NUM	4
42  
43  /*
44   * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space
45   * due to VMCB alignment requirements.
46   */
47  struct svm_vcpu {
48  	struct vmcb	vmcb;	 /* hardware saved vcpu context */
49  	struct svm_regctx swctx; /* software saved vcpu context */
50  	uint64_t	vmcb_pa; /* VMCB physical address */
51  	uint64_t	nextrip; /* next instruction to be executed by guest */
52  	int		lastcpu; /* host cpu that the vcpu last ran on */
53  	uint32_t	dirty;	 /* state cache bits that must be cleared */
54  	uint64_t	nptgen;	 /* page table gen when the vcpu last ran */
55  	hma_svm_asid_t	hma_asid;
56  	boolean_t	loaded;
57  	struct svm_pmu_vcpu pmu;
58  } __aligned(PAGE_SIZE);
59  
60  /*
61   * SVM softc, one per virtual machine.
62   */
63  struct svm_softc {
64  	uint8_t apic_page[VM_MAXCPU][PAGE_SIZE];
65  	struct svm_vcpu vcpu[VM_MAXCPU];
66  	uint64_t	nptp;		/* nested page table (host PA) */
67  	uint8_t		*iopm_bitmap;	/* shared by all vcpus */
68  	uint8_t		*msr_bitmap;	/* shared by all vcpus */
69  	struct vm	*vm;
70  	uint64_t	host_msrs[VM_MAXCPU][SVM_HOST_MSR_NUM];
71  	svm_pmu_flavor_t pmu_flavor;
72  };
73  
74  /*
75   * Since the VMCB must be page-aligned, and is the first member of svm_vcpu,
76   * which is slated to be page-aligned, this is a belt-and-suspenders check to
77   * see that such alignment instructions are being heeded.
78   */
79  CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);
80  
81  static __inline struct svm_vcpu *
svm_get_vcpu(struct svm_softc * sc,int vcpu)82  svm_get_vcpu(struct svm_softc *sc, int vcpu)
83  {
84  	return (&(sc->vcpu[vcpu]));
85  }
86  
87  static __inline struct vmcb *
svm_get_vmcb(struct svm_softc * sc,int vcpu)88  svm_get_vmcb(struct svm_softc *sc, int vcpu)
89  {
90  	return (&(sc->vcpu[vcpu].vmcb));
91  }
92  
93  static __inline struct vmcb_state *
svm_get_vmcb_state(struct svm_softc * sc,int vcpu)94  svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
95  {
96  	return (&(sc->vcpu[vcpu].vmcb.state));
97  }
98  
99  static __inline struct vmcb_ctrl *
svm_get_vmcb_ctrl(struct svm_softc * sc,int vcpu)100  svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
101  {
102  	return (&(sc->vcpu[vcpu].vmcb.ctrl));
103  }
104  
105  static __inline struct svm_regctx *
svm_get_guest_regctx(struct svm_softc * sc,int vcpu)106  svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
107  {
108  	return (&(sc->vcpu[vcpu].swctx));
109  }
110  
111  static __inline struct svm_pmu_vcpu *
svm_get_pmu(struct svm_softc * sc,int vcpu)112  svm_get_pmu(struct svm_softc *sc, int vcpu)
113  {
114  	return (&(sc->vcpu[vcpu].pmu));
115  }
116  
117  static __inline void
svm_set_dirty(struct svm_softc * sc,int vcpu,uint32_t dirtybits)118  svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
119  {
120  	struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpu);
121  
122  	vcpustate->dirty |= dirtybits;
123  }
124  
125  static __inline void
svm_apply_dirty(struct svm_softc * sc,int vcpu)126  svm_apply_dirty(struct svm_softc *sc, int vcpu)
127  {
128  	struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpu);
129  	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
130  
131  	ctrl->vmcb_clean = ~vcpustate->dirty;
132  	vcpustate->dirty = 0;
133  }
134  
135  int svm_get_intercept(struct svm_softc *, int, int, uint32_t);
136  void svm_set_intercept(struct svm_softc *, int, int, uint32_t, int);
137  
138  static __inline void
svm_disable_intercept(struct svm_softc * sc,int vcpu,int off,uint32_t bitmask)139  svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
140  {
141  	svm_set_intercept(sc, vcpu, off, bitmask, 0);
142  }
143  
144  static __inline void
svm_enable_intercept(struct svm_softc * sc,int vcpu,int off,uint32_t bitmask)145  svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
146  {
147  	svm_set_intercept(sc, vcpu, off, bitmask, 1);
148  }
149  
150  #endif /* _SVM_SOFTC_H_ */
151