1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/smp.h>
32
33 #include <x86/specialreg.h>
34 #include <x86/apicreg.h>
35
36 #include <dev/vmm/vmm_ktr.h>
37
38 #include <machine/vmm.h>
39 #include "vmm_lapic.h"
40 #include "vlapic.h"
41
42 /*
43 * Some MSI message definitions
44 */
45 #define MSI_X86_ADDR_MASK 0xfff00000
46 #define MSI_X86_ADDR_BASE 0xfee00000
47 #define MSI_X86_ADDR_RH 0x00000008 /* Redirection Hint */
48 #define MSI_X86_ADDR_LOG 0x00000004 /* Destination Mode */
49
50 int
lapic_set_intr(struct vcpu * vcpu,int vector,bool level)51 lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
52 {
53 struct vlapic *vlapic;
54
55 /*
56 * According to section "Maskable Hardware Interrupts" in Intel SDM
57 * vectors 16 through 255 can be delivered through the local APIC.
58 */
59 if (vector < 16 || vector > 255)
60 return (EINVAL);
61
62 vlapic = vm_lapic(vcpu);
63 if (vlapic_set_intr_ready(vlapic, vector, level))
64 vcpu_notify_event(vcpu, true);
65 return (0);
66 }
67
68 int
lapic_set_local_intr(struct vm * vm,struct vcpu * vcpu,int vector)69 lapic_set_local_intr(struct vm *vm, struct vcpu *vcpu, int vector)
70 {
71 struct vlapic *vlapic;
72 cpuset_t dmask;
73 int cpu, error;
74
75 if (vcpu == NULL) {
76 error = 0;
77 dmask = vm_active_cpus(vm);
78 CPU_FOREACH_ISSET(cpu, &dmask) {
79 vlapic = vm_lapic(vm_vcpu(vm, cpu));
80 error = vlapic_trigger_lvt(vlapic, vector);
81 if (error)
82 break;
83 }
84 } else {
85 vlapic = vm_lapic(vcpu);
86 error = vlapic_trigger_lvt(vlapic, vector);
87 }
88
89 return (error);
90 }
91
92 int
lapic_intr_msi(struct vm * vm,uint64_t addr,uint64_t msg)93 lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
94 {
95 int delmode, vec;
96 uint32_t dest;
97 bool phys;
98
99 VM_CTR2(vm, "lapic MSI addr: %#lx msg: %#lx", addr, msg);
100
101 if ((addr & MSI_X86_ADDR_MASK) != MSI_X86_ADDR_BASE) {
102 VM_CTR1(vm, "lapic MSI invalid addr %#lx", addr);
103 return (-1);
104 }
105
106 /*
107 * Extract the x86-specific fields from the MSI addr/msg
108 * params according to the Intel Arch spec, Vol3 Ch 10.
109 *
110 * The PCI specification does not support level triggered
111 * MSI/MSI-X so ignore trigger level in 'msg'.
112 *
113 * The 'dest' is interpreted as a logical APIC ID if both
114 * the Redirection Hint and Destination Mode are '1' and
115 * physical otherwise.
116 */
117 dest = (addr >> 12) & 0xff;
118 /*
119 * Extended Destination ID support uses bits 5-11 of the address:
120 * http://david.woodhou.se/ExtDestId.pdf
121 */
122 dest |= ((addr >> 5) & 0x7f) << 8;
123 phys = ((addr & (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG)) !=
124 (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG));
125 delmode = msg & APIC_DELMODE_MASK;
126 vec = msg & 0xff;
127
128 VM_CTR3(vm, "lapic MSI %s dest %#x, vec %d",
129 phys ? "physical" : "logical", dest, vec);
130
131 vlapic_deliver_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec);
132 return (0);
133 }
134
135 static bool
x2apic_msr(u_int msr)136 x2apic_msr(u_int msr)
137 {
138 return (msr >= 0x800 && msr <= 0xBFF);
139 }
140
141 static u_int
x2apic_msr_to_regoff(u_int msr)142 x2apic_msr_to_regoff(u_int msr)
143 {
144
145 return ((msr - 0x800) << 4);
146 }
147
148 bool
lapic_msr(u_int msr)149 lapic_msr(u_int msr)
150 {
151
152 return (x2apic_msr(msr) || msr == MSR_APICBASE);
153 }
154
155 int
lapic_rdmsr(struct vcpu * vcpu,u_int msr,uint64_t * rval,bool * retu)156 lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu)
157 {
158 int error;
159 u_int offset;
160 struct vlapic *vlapic;
161
162 vlapic = vm_lapic(vcpu);
163
164 if (msr == MSR_APICBASE) {
165 *rval = vlapic_get_apicbase(vlapic);
166 error = 0;
167 } else {
168 offset = x2apic_msr_to_regoff(msr);
169 error = vlapic_read(vlapic, 0, offset, rval, retu);
170 }
171
172 return (error);
173 }
174
175 int
lapic_wrmsr(struct vcpu * vcpu,u_int msr,uint64_t val,bool * retu)176 lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t val, bool *retu)
177 {
178 int error;
179 u_int offset;
180 struct vlapic *vlapic;
181
182 vlapic = vm_lapic(vcpu);
183
184 if (msr == MSR_APICBASE) {
185 error = vlapic_set_apicbase(vlapic, val);
186 } else {
187 offset = x2apic_msr_to_regoff(msr);
188 error = vlapic_write(vlapic, 0, offset, val, retu);
189 }
190
191 return (error);
192 }
193
194 int
lapic_mmio_write(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)195 lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size,
196 void *arg)
197 {
198 int error;
199 uint64_t off;
200 struct vlapic *vlapic;
201
202 off = gpa - DEFAULT_APIC_BASE;
203
204 /*
205 * Memory mapped local apic accesses must be 4 bytes wide and
206 * aligned on a 16-byte boundary.
207 */
208 if (size != 4 || off & 0xf)
209 return (EINVAL);
210
211 vlapic = vm_lapic(vcpu);
212 error = vlapic_write(vlapic, 1, off, wval, arg);
213 return (error);
214 }
215
216 int
lapic_mmio_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)217 lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
218 void *arg)
219 {
220 int error;
221 uint64_t off;
222 struct vlapic *vlapic;
223
224 off = gpa - DEFAULT_APIC_BASE;
225
226 /*
227 * Memory mapped local apic accesses should be aligned on a
228 * 16-byte boundary. They are also suggested to be 4 bytes
229 * wide, alas not all OSes follow suggestions.
230 */
231 off &= ~3;
232 if (off & 0xf)
233 return (EINVAL);
234
235 vlapic = vm_lapic(vcpu);
236 error = vlapic_read(vlapic, 1, off, rval, arg);
237 return (error);
238 }
239