1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/smp.h>
32
33 #include <x86/specialreg.h>
34 #include <x86/apicreg.h>
35
36 #include <dev/vmm/vmm_ktr.h>
37
38 #include <machine/vmm.h>
39 #include "vmm_lapic.h"
40 #include "vlapic.h"
41
42 /*
43 * Some MSI message definitions
44 */
45 #define MSI_X86_ADDR_MASK 0xfff00000
46 #define MSI_X86_ADDR_BASE 0xfee00000
47 #define MSI_X86_ADDR_RH 0x00000008 /* Redirection Hint */
48 #define MSI_X86_ADDR_LOG 0x00000004 /* Destination Mode */
49
50 int
lapic_set_intr(struct vcpu * vcpu,int vector,bool level)51 lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
52 {
53 struct vlapic *vlapic;
54
55 /*
56 * According to section "Maskable Hardware Interrupts" in Intel SDM
57 * vectors 16 through 255 can be delivered through the local APIC.
58 */
59 if (vector < 16 || vector > 255)
60 return (EINVAL);
61
62 vlapic = vm_lapic(vcpu);
63 if (vlapic_set_intr_ready(vlapic, vector, level))
64 vcpu_notify_event(vcpu, true);
65 return (0);
66 }
67
68 int
lapic_set_local_intr(struct vm * vm,struct vcpu * vcpu,int vector)69 lapic_set_local_intr(struct vm *vm, struct vcpu *vcpu, int vector)
70 {
71 struct vlapic *vlapic;
72 cpuset_t dmask;
73 int cpu, error;
74
75 if (vcpu == NULL) {
76 error = 0;
77 dmask = vm_active_cpus(vm);
78 CPU_FOREACH_ISSET(cpu, &dmask) {
79 vlapic = vm_lapic(vm_vcpu(vm, cpu));
80 error = vlapic_trigger_lvt(vlapic, vector);
81 if (error)
82 break;
83 }
84 } else {
85 vlapic = vm_lapic(vcpu);
86 error = vlapic_trigger_lvt(vlapic, vector);
87 }
88
89 return (error);
90 }
91
92 int
lapic_intr_msi(struct vm * vm,uint64_t addr,uint64_t msg)93 lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
94 {
95 int delmode, vec;
96 uint32_t dest;
97 bool phys;
98
99 VM_CTR2(vm, "lapic MSI addr: %#lx msg: %#lx", addr, msg);
100
101 if ((addr & MSI_X86_ADDR_MASK) != MSI_X86_ADDR_BASE) {
102 VM_CTR1(vm, "lapic MSI invalid addr %#lx", addr);
103 return (-1);
104 }
105
106 /*
107 * Extract the x86-specific fields from the MSI addr/msg
108 * params according to the Intel Arch spec, Vol3 Ch 10.
109 *
110 * The PCI specification does not support level triggered
111 * MSI/MSI-X so ignore trigger level in 'msg'.
112 *
113 * The 'dest' is interpreted as a logical APIC ID if both
114 * the Redirection Hint and Destination Mode are '1' and
115 * physical otherwise.
116 */
117 dest = (addr >> 12) & 0xff;
118 phys = ((addr & (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG)) !=
119 (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG));
120 delmode = msg & APIC_DELMODE_MASK;
121 vec = msg & 0xff;
122
123 VM_CTR3(vm, "lapic MSI %s dest %#x, vec %d",
124 phys ? "physical" : "logical", dest, vec);
125
126 vlapic_deliver_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec);
127 return (0);
128 }
129
130 static bool
x2apic_msr(u_int msr)131 x2apic_msr(u_int msr)
132 {
133 return (msr >= 0x800 && msr <= 0xBFF);
134 }
135
136 static u_int
x2apic_msr_to_regoff(u_int msr)137 x2apic_msr_to_regoff(u_int msr)
138 {
139
140 return ((msr - 0x800) << 4);
141 }
142
143 bool
lapic_msr(u_int msr)144 lapic_msr(u_int msr)
145 {
146
147 return (x2apic_msr(msr) || msr == MSR_APICBASE);
148 }
149
150 int
lapic_rdmsr(struct vcpu * vcpu,u_int msr,uint64_t * rval,bool * retu)151 lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu)
152 {
153 int error;
154 u_int offset;
155 struct vlapic *vlapic;
156
157 vlapic = vm_lapic(vcpu);
158
159 if (msr == MSR_APICBASE) {
160 *rval = vlapic_get_apicbase(vlapic);
161 error = 0;
162 } else {
163 offset = x2apic_msr_to_regoff(msr);
164 error = vlapic_read(vlapic, 0, offset, rval, retu);
165 }
166
167 return (error);
168 }
169
170 int
lapic_wrmsr(struct vcpu * vcpu,u_int msr,uint64_t val,bool * retu)171 lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t val, bool *retu)
172 {
173 int error;
174 u_int offset;
175 struct vlapic *vlapic;
176
177 vlapic = vm_lapic(vcpu);
178
179 if (msr == MSR_APICBASE) {
180 error = vlapic_set_apicbase(vlapic, val);
181 } else {
182 offset = x2apic_msr_to_regoff(msr);
183 error = vlapic_write(vlapic, 0, offset, val, retu);
184 }
185
186 return (error);
187 }
188
189 int
lapic_mmio_write(struct vcpu * vcpu,uint64_t gpa,uint64_t wval,int size,void * arg)190 lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size,
191 void *arg)
192 {
193 int error;
194 uint64_t off;
195 struct vlapic *vlapic;
196
197 off = gpa - DEFAULT_APIC_BASE;
198
199 /*
200 * Memory mapped local apic accesses must be 4 bytes wide and
201 * aligned on a 16-byte boundary.
202 */
203 if (size != 4 || off & 0xf)
204 return (EINVAL);
205
206 vlapic = vm_lapic(vcpu);
207 error = vlapic_write(vlapic, 1, off, wval, arg);
208 return (error);
209 }
210
211 int
lapic_mmio_read(struct vcpu * vcpu,uint64_t gpa,uint64_t * rval,int size,void * arg)212 lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
213 void *arg)
214 {
215 int error;
216 uint64_t off;
217 struct vlapic *vlapic;
218
219 off = gpa - DEFAULT_APIC_BASE;
220
221 /*
222 * Memory mapped local apic accesses should be aligned on a
223 * 16-byte boundary. They are also suggested to be 4 bytes
224 * wide, alas not all OSes follow suggestions.
225 */
226 off &= ~3;
227 if (off & 0xf)
228 return (EINVAL);
229
230 vlapic = vm_lapic(vcpu);
231 error = vlapic_read(vlapic, 1, off, rval, arg);
232 return (error);
233 }
234