xref: /linux/arch/x86/kvm/svm/avic.c (revision a3d14d1602ca11429d242d230c31af8f822f614f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
21 
22 #include <asm/irq_remapping.h>
23 #include <asm/msr.h>
24 
25 #include "trace.h"
26 #include "lapic.h"
27 #include "x86.h"
28 #include "irq.h"
29 #include "svm.h"
30 
31 /*
32  * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
33  * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
34  * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
35  *
36  * For the vCPU ID, use however many bits are currently allowed for the max
37  * guest physical APIC ID (limited by the size of the physical ID table), and
38  * use whatever bits remain to assign arbitrary AVIC IDs to VMs.  Note, the
39  * size of the GATag is defined by hardware (32 bits), but is an opaque value
40  * as far as hardware is concerned.
41  */
42 #define AVIC_VCPU_ID_MASK		AVIC_PHYSICAL_MAX_INDEX_MASK
43 
44 #define AVIC_VM_ID_SHIFT		HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
45 #define AVIC_VM_ID_MASK			(GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
46 
47 #define AVIC_GATAG_TO_VMID(x)		((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
48 #define AVIC_GATAG_TO_VCPUID(x)		(x & AVIC_VCPU_ID_MASK)
49 
50 #define __AVIC_GATAG(vm_id, vcpu_id)	((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
51 					 ((vcpu_id) & AVIC_VCPU_ID_MASK))
52 #define AVIC_GATAG(vm_id, vcpu_id)					\
53 ({									\
54 	u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id);			\
55 									\
56 	WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id));	\
57 	WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id));		\
58 	ga_tag;								\
59 })
60 
61 static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
62 
63 static bool force_avic;
64 module_param_unsafe(force_avic, bool, 0444);
65 
66 /* Note:
67  * This hash table is used to map VM_ID to a struct kvm_svm,
68  * when handling AMD IOMMU GALOG notification to schedule in
69  * a particular vCPU.
70  */
71 #define SVM_VM_DATA_HASH_BITS	8
72 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
73 static u32 next_vm_id = 0;
74 static bool next_vm_id_wrapped = 0;
75 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
76 bool x2avic_enabled;
77 
78 /*
79  * This is a wrapper of struct amd_iommu_ir_data.
80  */
81 struct amd_svm_iommu_ir {
82 	struct list_head node;	/* Used by SVM for per-vcpu ir_list */
83 	void *data;		/* Storing pointer to struct amd_ir_data */
84 };
85 
avic_activate_vmcb(struct vcpu_svm * svm)86 static void avic_activate_vmcb(struct vcpu_svm *svm)
87 {
88 	struct vmcb *vmcb = svm->vmcb01.ptr;
89 
90 	vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
91 	vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
92 
93 	vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
94 
95 	/*
96 	 * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR
97 	 * accesses, while interrupt injection to a running vCPU can be
98 	 * achieved using AVIC doorbell.  KVM disables the APIC access page
99 	 * (deletes the memslot) if any vCPU has x2APIC enabled, thus enabling
100 	 * AVIC in hybrid mode activates only the doorbell mechanism.
101 	 */
102 	if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) {
103 		vmcb->control.int_ctl |= X2APIC_MODE_MASK;
104 		vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
105 		/* Disabling MSR intercept for x2APIC registers */
106 		svm_set_x2apic_msr_interception(svm, false);
107 	} else {
108 		/*
109 		 * Flush the TLB, the guest may have inserted a non-APIC
110 		 * mapping into the TLB while AVIC was disabled.
111 		 */
112 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
113 
114 		/* For xAVIC and hybrid-xAVIC modes */
115 		vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
116 		/* Enabling MSR intercept for x2APIC registers */
117 		svm_set_x2apic_msr_interception(svm, true);
118 	}
119 }
120 
avic_deactivate_vmcb(struct vcpu_svm * svm)121 static void avic_deactivate_vmcb(struct vcpu_svm *svm)
122 {
123 	struct vmcb *vmcb = svm->vmcb01.ptr;
124 
125 	vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
126 	vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
127 
128 	/*
129 	 * If running nested and the guest uses its own MSR bitmap, there
130 	 * is no need to update L0's msr bitmap
131 	 */
132 	if (is_guest_mode(&svm->vcpu) &&
133 	    vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))
134 		return;
135 
136 	/* Enabling MSR intercept for x2APIC registers */
137 	svm_set_x2apic_msr_interception(svm, true);
138 }
139 
140 /* Note:
141  * This function is called from IOMMU driver to notify
142  * SVM to schedule in a particular vCPU of a particular VM.
143  */
avic_ga_log_notifier(u32 ga_tag)144 int avic_ga_log_notifier(u32 ga_tag)
145 {
146 	unsigned long flags;
147 	struct kvm_svm *kvm_svm;
148 	struct kvm_vcpu *vcpu = NULL;
149 	u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
150 	u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
151 
152 	pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
153 	trace_kvm_avic_ga_log(vm_id, vcpu_id);
154 
155 	spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
156 	hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
157 		if (kvm_svm->avic_vm_id != vm_id)
158 			continue;
159 		vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
160 		break;
161 	}
162 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
163 
164 	/* Note:
165 	 * At this point, the IOMMU should have already set the pending
166 	 * bit in the vAPIC backing page. So, we just need to schedule
167 	 * in the vcpu.
168 	 */
169 	if (vcpu)
170 		kvm_vcpu_wake_up(vcpu);
171 
172 	return 0;
173 }
174 
avic_vm_destroy(struct kvm * kvm)175 void avic_vm_destroy(struct kvm *kvm)
176 {
177 	unsigned long flags;
178 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
179 
180 	if (!enable_apicv)
181 		return;
182 
183 	if (kvm_svm->avic_logical_id_table_page)
184 		__free_page(kvm_svm->avic_logical_id_table_page);
185 	if (kvm_svm->avic_physical_id_table_page)
186 		__free_page(kvm_svm->avic_physical_id_table_page);
187 
188 	spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
189 	hash_del(&kvm_svm->hnode);
190 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
191 }
192 
avic_vm_init(struct kvm * kvm)193 int avic_vm_init(struct kvm *kvm)
194 {
195 	unsigned long flags;
196 	int err = -ENOMEM;
197 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
198 	struct kvm_svm *k2;
199 	struct page *p_page;
200 	struct page *l_page;
201 	u32 vm_id;
202 
203 	if (!enable_apicv)
204 		return 0;
205 
206 	/* Allocating physical APIC ID table (4KB) */
207 	p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
208 	if (!p_page)
209 		goto free_avic;
210 
211 	kvm_svm->avic_physical_id_table_page = p_page;
212 
213 	/* Allocating logical APIC ID table (4KB) */
214 	l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
215 	if (!l_page)
216 		goto free_avic;
217 
218 	kvm_svm->avic_logical_id_table_page = l_page;
219 
220 	spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
221  again:
222 	vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
223 	if (vm_id == 0) { /* id is 1-based, zero is not okay */
224 		next_vm_id_wrapped = 1;
225 		goto again;
226 	}
227 	/* Is it still in use? Only possible if wrapped at least once */
228 	if (next_vm_id_wrapped) {
229 		hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
230 			if (k2->avic_vm_id == vm_id)
231 				goto again;
232 		}
233 	}
234 	kvm_svm->avic_vm_id = vm_id;
235 	hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
236 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
237 
238 	return 0;
239 
240 free_avic:
241 	avic_vm_destroy(kvm);
242 	return err;
243 }
244 
avic_init_vmcb(struct vcpu_svm * svm,struct vmcb * vmcb)245 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
246 {
247 	struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
248 	phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
249 	phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
250 	phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
251 
252 	vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
253 	vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
254 	vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
255 	vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
256 
257 	if (kvm_apicv_activated(svm->vcpu.kvm))
258 		avic_activate_vmcb(svm);
259 	else
260 		avic_deactivate_vmcb(svm);
261 }
262 
avic_get_physical_id_entry(struct kvm_vcpu * vcpu,unsigned int index)263 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
264 				       unsigned int index)
265 {
266 	u64 *avic_physical_id_table;
267 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
268 
269 	if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) ||
270 	    (index > X2AVIC_MAX_PHYSICAL_ID))
271 		return NULL;
272 
273 	avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
274 
275 	return &avic_physical_id_table[index];
276 }
277 
avic_init_backing_page(struct kvm_vcpu * vcpu)278 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
279 {
280 	u64 *entry, new_entry;
281 	int id = vcpu->vcpu_id;
282 	struct vcpu_svm *svm = to_svm(vcpu);
283 
284 	if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) ||
285 	    (id > X2AVIC_MAX_PHYSICAL_ID))
286 		return -EINVAL;
287 
288 	if (!vcpu->arch.apic->regs)
289 		return -EINVAL;
290 
291 	if (kvm_apicv_activated(vcpu->kvm)) {
292 		int ret;
293 
294 		/*
295 		 * Note, AVIC hardware walks the nested page table to check
296 		 * permissions, but does not use the SPA address specified in
297 		 * the leaf SPTE since it uses address in the AVIC_BACKING_PAGE
298 		 * pointer field of the VMCB.
299 		 */
300 		ret = kvm_alloc_apic_access_page(vcpu->kvm);
301 		if (ret)
302 			return ret;
303 	}
304 
305 	svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
306 
307 	/* Setting AVIC backing page address in the phy APIC ID table */
308 	entry = avic_get_physical_id_entry(vcpu, id);
309 	if (!entry)
310 		return -EINVAL;
311 
312 	new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
313 			      AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
314 			      AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
315 	WRITE_ONCE(*entry, new_entry);
316 
317 	svm->avic_physical_id_cache = entry;
318 
319 	return 0;
320 }
321 
avic_ring_doorbell(struct kvm_vcpu * vcpu)322 void avic_ring_doorbell(struct kvm_vcpu *vcpu)
323 {
324 	/*
325 	 * Note, the vCPU could get migrated to a different pCPU at any point,
326 	 * which could result in signalling the wrong/previous pCPU.  But if
327 	 * that happens the vCPU is guaranteed to do a VMRUN (after being
328 	 * migrated) and thus will process pending interrupts, i.e. a doorbell
329 	 * is not needed (and the spurious one is harmless).
330 	 */
331 	int cpu = READ_ONCE(vcpu->cpu);
332 
333 	if (cpu != get_cpu()) {
334 		wrmsrq(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
335 		trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu));
336 	}
337 	put_cpu();
338 }
339 
340 
avic_kick_vcpu(struct kvm_vcpu * vcpu,u32 icrl)341 static void avic_kick_vcpu(struct kvm_vcpu *vcpu, u32 icrl)
342 {
343 	vcpu->arch.apic->irr_pending = true;
344 	svm_complete_interrupt_delivery(vcpu,
345 					icrl & APIC_MODE_MASK,
346 					icrl & APIC_INT_LEVELTRIG,
347 					icrl & APIC_VECTOR_MASK);
348 }
349 
avic_kick_vcpu_by_physical_id(struct kvm * kvm,u32 physical_id,u32 icrl)350 static void avic_kick_vcpu_by_physical_id(struct kvm *kvm, u32 physical_id,
351 					  u32 icrl)
352 {
353 	/*
354 	 * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID,
355 	 * i.e. APIC ID == vCPU ID.
356 	 */
357 	struct kvm_vcpu *target_vcpu = kvm_get_vcpu_by_id(kvm, physical_id);
358 
359 	/* Once again, nothing to do if the target vCPU doesn't exist. */
360 	if (unlikely(!target_vcpu))
361 		return;
362 
363 	avic_kick_vcpu(target_vcpu, icrl);
364 }
365 
avic_kick_vcpu_by_logical_id(struct kvm * kvm,u32 * avic_logical_id_table,u32 logid_index,u32 icrl)366 static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
367 					 u32 logid_index, u32 icrl)
368 {
369 	u32 physical_id;
370 
371 	if (avic_logical_id_table) {
372 		u32 logid_entry = avic_logical_id_table[logid_index];
373 
374 		/* Nothing to do if the logical destination is invalid. */
375 		if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
376 			return;
377 
378 		physical_id = logid_entry &
379 			      AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
380 	} else {
381 		/*
382 		 * For x2APIC, the logical APIC ID is a read-only value that is
383 		 * derived from the x2APIC ID, thus the x2APIC ID can be found
384 		 * by reversing the calculation (stored in logid_index).  Note,
385 		 * bits 31:20 of the x2APIC ID aren't propagated to the logical
386 		 * ID, but KVM limits the x2APIC ID limited to KVM_MAX_VCPU_IDS.
387 		 */
388 		physical_id = logid_index;
389 	}
390 
391 	avic_kick_vcpu_by_physical_id(kvm, physical_id, icrl);
392 }
393 
394 /*
395  * A fast-path version of avic_kick_target_vcpus(), which attempts to match
396  * destination APIC ID to vCPU without looping through all vCPUs.
397  */
avic_kick_target_vcpus_fast(struct kvm * kvm,struct kvm_lapic * source,u32 icrl,u32 icrh,u32 index)398 static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
399 				       u32 icrl, u32 icrh, u32 index)
400 {
401 	int dest_mode = icrl & APIC_DEST_MASK;
402 	int shorthand = icrl & APIC_SHORT_MASK;
403 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
404 	u32 dest;
405 
406 	if (shorthand != APIC_DEST_NOSHORT)
407 		return -EINVAL;
408 
409 	if (apic_x2apic_mode(source))
410 		dest = icrh;
411 	else
412 		dest = GET_XAPIC_DEST_FIELD(icrh);
413 
414 	if (dest_mode == APIC_DEST_PHYSICAL) {
415 		/* broadcast destination, use slow path */
416 		if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST)
417 			return -EINVAL;
418 		if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
419 			return -EINVAL;
420 
421 		if (WARN_ON_ONCE(dest != index))
422 			return -EINVAL;
423 
424 		avic_kick_vcpu_by_physical_id(kvm, dest, icrl);
425 	} else {
426 		u32 *avic_logical_id_table;
427 		unsigned long bitmap, i;
428 		u32 cluster;
429 
430 		if (apic_x2apic_mode(source)) {
431 			/* 16 bit dest mask, 16 bit cluster id */
432 			bitmap = dest & 0xFFFF;
433 			cluster = (dest >> 16) << 4;
434 		} else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) {
435 			/* 8 bit dest mask*/
436 			bitmap = dest;
437 			cluster = 0;
438 		} else {
439 			/* 4 bit desk mask, 4 bit cluster id */
440 			bitmap = dest & 0xF;
441 			cluster = (dest >> 4) << 2;
442 		}
443 
444 		/* Nothing to do if there are no destinations in the cluster. */
445 		if (unlikely(!bitmap))
446 			return 0;
447 
448 		if (apic_x2apic_mode(source))
449 			avic_logical_id_table = NULL;
450 		else
451 			avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
452 
453 		/*
454 		 * AVIC is inhibited if vCPUs aren't mapped 1:1 with logical
455 		 * IDs, thus each bit in the destination is guaranteed to map
456 		 * to at most one vCPU.
457 		 */
458 		for_each_set_bit(i, &bitmap, 16)
459 			avic_kick_vcpu_by_logical_id(kvm, avic_logical_id_table,
460 						     cluster + i, icrl);
461 	}
462 
463 	return 0;
464 }
465 
avic_kick_target_vcpus(struct kvm * kvm,struct kvm_lapic * source,u32 icrl,u32 icrh,u32 index)466 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
467 				   u32 icrl, u32 icrh, u32 index)
468 {
469 	u32 dest = apic_x2apic_mode(source) ? icrh : GET_XAPIC_DEST_FIELD(icrh);
470 	unsigned long i;
471 	struct kvm_vcpu *vcpu;
472 
473 	if (!avic_kick_target_vcpus_fast(kvm, source, icrl, icrh, index))
474 		return;
475 
476 	trace_kvm_avic_kick_vcpu_slowpath(icrh, icrl, index);
477 
478 	/*
479 	 * Wake any target vCPUs that are blocking, i.e. waiting for a wake
480 	 * event.  There's no need to signal doorbells, as hardware has handled
481 	 * vCPUs that were in guest at the time of the IPI, and vCPUs that have
482 	 * since entered the guest will have processed pending IRQs at VMRUN.
483 	 */
484 	kvm_for_each_vcpu(i, vcpu, kvm) {
485 		if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
486 					dest, icrl & APIC_DEST_MASK))
487 			avic_kick_vcpu(vcpu, icrl);
488 	}
489 }
490 
avic_incomplete_ipi_interception(struct kvm_vcpu * vcpu)491 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
492 {
493 	struct vcpu_svm *svm = to_svm(vcpu);
494 	u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
495 	u32 icrl = svm->vmcb->control.exit_info_1;
496 	u32 id = svm->vmcb->control.exit_info_2 >> 32;
497 	u32 index = svm->vmcb->control.exit_info_2 & 0x1FF;
498 	struct kvm_lapic *apic = vcpu->arch.apic;
499 
500 	trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
501 
502 	switch (id) {
503 	case AVIC_IPI_FAILURE_INVALID_TARGET:
504 	case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
505 		/*
506 		 * Emulate IPIs that are not handled by AVIC hardware, which
507 		 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over
508 		 * if _any_ targets are invalid, e.g. if the logical mode mask
509 		 * is a superset of running vCPUs.
510 		 *
511 		 * The exit is a trap, e.g. ICR holds the correct value and RIP
512 		 * has been advanced, KVM is responsible only for emulating the
513 		 * IPI.  Sadly, hardware may sometimes leave the BUSY flag set,
514 		 * in which case KVM needs to emulate the ICR write as well in
515 		 * order to clear the BUSY flag.
516 		 */
517 		if (icrl & APIC_ICR_BUSY)
518 			kvm_apic_write_nodecode(vcpu, APIC_ICR);
519 		else
520 			kvm_apic_send_ipi(apic, icrl, icrh);
521 		break;
522 	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
523 		/*
524 		 * At this point, we expect that the AVIC HW has already
525 		 * set the appropriate IRR bits on the valid target
526 		 * vcpus. So, we just need to kick the appropriate vcpu.
527 		 */
528 		avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index);
529 		break;
530 	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
531 		WARN_ONCE(1, "Invalid backing page\n");
532 		break;
533 	case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
534 		/* Invalid IPI with vector < 16 */
535 		break;
536 	default:
537 		vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
538 	}
539 
540 	return 1;
541 }
542 
avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu * vcpu)543 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
544 {
545 	if (is_guest_mode(vcpu))
546 		return APICV_INHIBIT_REASON_NESTED;
547 	return 0;
548 }
549 
avic_get_logical_id_entry(struct kvm_vcpu * vcpu,u32 ldr,bool flat)550 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
551 {
552 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
553 	u32 *logical_apic_id_table;
554 	u32 cluster, index;
555 
556 	ldr = GET_APIC_LOGICAL_ID(ldr);
557 
558 	if (flat) {
559 		cluster = 0;
560 	} else {
561 		cluster = (ldr >> 4);
562 		if (cluster >= 0xf)
563 			return NULL;
564 		ldr &= 0xf;
565 	}
566 	if (!ldr || !is_power_of_2(ldr))
567 		return NULL;
568 
569 	index = __ffs(ldr);
570 	if (WARN_ON_ONCE(index > 7))
571 		return NULL;
572 	index += (cluster << 2);
573 
574 	logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
575 
576 	return &logical_apic_id_table[index];
577 }
578 
avic_ldr_write(struct kvm_vcpu * vcpu,u8 g_physical_id,u32 ldr)579 static void avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
580 {
581 	bool flat;
582 	u32 *entry, new_entry;
583 
584 	flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
585 	entry = avic_get_logical_id_entry(vcpu, ldr, flat);
586 	if (!entry)
587 		return;
588 
589 	new_entry = READ_ONCE(*entry);
590 	new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
591 	new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
592 	new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
593 	WRITE_ONCE(*entry, new_entry);
594 }
595 
avic_invalidate_logical_id_entry(struct kvm_vcpu * vcpu)596 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
597 {
598 	struct vcpu_svm *svm = to_svm(vcpu);
599 	bool flat = svm->dfr_reg == APIC_DFR_FLAT;
600 	u32 *entry;
601 
602 	/* Note: x2AVIC does not use logical APIC ID table */
603 	if (apic_x2apic_mode(vcpu->arch.apic))
604 		return;
605 
606 	entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
607 	if (entry)
608 		clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
609 }
610 
avic_handle_ldr_update(struct kvm_vcpu * vcpu)611 static void avic_handle_ldr_update(struct kvm_vcpu *vcpu)
612 {
613 	struct vcpu_svm *svm = to_svm(vcpu);
614 	u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
615 	u32 id = kvm_xapic_id(vcpu->arch.apic);
616 
617 	/* AVIC does not support LDR update for x2APIC */
618 	if (apic_x2apic_mode(vcpu->arch.apic))
619 		return;
620 
621 	if (ldr == svm->ldr_reg)
622 		return;
623 
624 	avic_invalidate_logical_id_entry(vcpu);
625 
626 	svm->ldr_reg = ldr;
627 	avic_ldr_write(vcpu, id, ldr);
628 }
629 
avic_handle_dfr_update(struct kvm_vcpu * vcpu)630 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
631 {
632 	struct vcpu_svm *svm = to_svm(vcpu);
633 	u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
634 
635 	if (svm->dfr_reg == dfr)
636 		return;
637 
638 	avic_invalidate_logical_id_entry(vcpu);
639 	svm->dfr_reg = dfr;
640 }
641 
avic_unaccel_trap_write(struct kvm_vcpu * vcpu)642 static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
643 {
644 	u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 &
645 				AVIC_UNACCEL_ACCESS_OFFSET_MASK;
646 
647 	switch (offset) {
648 	case APIC_LDR:
649 		avic_handle_ldr_update(vcpu);
650 		break;
651 	case APIC_DFR:
652 		avic_handle_dfr_update(vcpu);
653 		break;
654 	case APIC_RRR:
655 		/* Ignore writes to Read Remote Data, it's read-only. */
656 		return 1;
657 	default:
658 		break;
659 	}
660 
661 	kvm_apic_write_nodecode(vcpu, offset);
662 	return 1;
663 }
664 
is_avic_unaccelerated_access_trap(u32 offset)665 static bool is_avic_unaccelerated_access_trap(u32 offset)
666 {
667 	bool ret = false;
668 
669 	switch (offset) {
670 	case APIC_ID:
671 	case APIC_EOI:
672 	case APIC_RRR:
673 	case APIC_LDR:
674 	case APIC_DFR:
675 	case APIC_SPIV:
676 	case APIC_ESR:
677 	case APIC_ICR:
678 	case APIC_LVTT:
679 	case APIC_LVTTHMR:
680 	case APIC_LVTPC:
681 	case APIC_LVT0:
682 	case APIC_LVT1:
683 	case APIC_LVTERR:
684 	case APIC_TMICT:
685 	case APIC_TDCR:
686 		ret = true;
687 		break;
688 	default:
689 		break;
690 	}
691 	return ret;
692 }
693 
avic_unaccelerated_access_interception(struct kvm_vcpu * vcpu)694 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
695 {
696 	struct vcpu_svm *svm = to_svm(vcpu);
697 	int ret = 0;
698 	u32 offset = svm->vmcb->control.exit_info_1 &
699 		     AVIC_UNACCEL_ACCESS_OFFSET_MASK;
700 	u32 vector = svm->vmcb->control.exit_info_2 &
701 		     AVIC_UNACCEL_ACCESS_VECTOR_MASK;
702 	bool write = (svm->vmcb->control.exit_info_1 >> 32) &
703 		     AVIC_UNACCEL_ACCESS_WRITE_MASK;
704 	bool trap = is_avic_unaccelerated_access_trap(offset);
705 
706 	trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
707 					    trap, write, vector);
708 	if (trap) {
709 		/* Handling Trap */
710 		WARN_ONCE(!write, "svm: Handling trap read.\n");
711 		ret = avic_unaccel_trap_write(vcpu);
712 	} else {
713 		/* Handling Fault */
714 		ret = kvm_emulate_instruction(vcpu, 0);
715 	}
716 
717 	return ret;
718 }
719 
avic_init_vcpu(struct vcpu_svm * svm)720 int avic_init_vcpu(struct vcpu_svm *svm)
721 {
722 	int ret;
723 	struct kvm_vcpu *vcpu = &svm->vcpu;
724 
725 	if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
726 		return 0;
727 
728 	ret = avic_init_backing_page(vcpu);
729 	if (ret)
730 		return ret;
731 
732 	INIT_LIST_HEAD(&svm->ir_list);
733 	spin_lock_init(&svm->ir_list_lock);
734 	svm->dfr_reg = APIC_DFR_FLAT;
735 
736 	return ret;
737 }
738 
avic_apicv_post_state_restore(struct kvm_vcpu * vcpu)739 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
740 {
741 	avic_handle_dfr_update(vcpu);
742 	avic_handle_ldr_update(vcpu);
743 }
744 
avic_set_pi_irte_mode(struct kvm_vcpu * vcpu,bool activate)745 static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
746 {
747 	int ret = 0;
748 	unsigned long flags;
749 	struct amd_svm_iommu_ir *ir;
750 	struct vcpu_svm *svm = to_svm(vcpu);
751 
752 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
753 		return 0;
754 
755 	/*
756 	 * Here, we go through the per-vcpu ir_list to update all existing
757 	 * interrupt remapping table entry targeting this vcpu.
758 	 */
759 	spin_lock_irqsave(&svm->ir_list_lock, flags);
760 
761 	if (list_empty(&svm->ir_list))
762 		goto out;
763 
764 	list_for_each_entry(ir, &svm->ir_list, node) {
765 		if (activate)
766 			ret = amd_iommu_activate_guest_mode(ir->data);
767 		else
768 			ret = amd_iommu_deactivate_guest_mode(ir->data);
769 		if (ret)
770 			break;
771 	}
772 out:
773 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
774 	return ret;
775 }
776 
svm_ir_list_del(struct vcpu_svm * svm,struct amd_iommu_pi_data * pi)777 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
778 {
779 	unsigned long flags;
780 	struct amd_svm_iommu_ir *cur;
781 
782 	spin_lock_irqsave(&svm->ir_list_lock, flags);
783 	list_for_each_entry(cur, &svm->ir_list, node) {
784 		if (cur->data != pi->ir_data)
785 			continue;
786 		list_del(&cur->node);
787 		kfree(cur);
788 		break;
789 	}
790 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
791 }
792 
svm_ir_list_add(struct vcpu_svm * svm,struct amd_iommu_pi_data * pi)793 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
794 {
795 	int ret = 0;
796 	unsigned long flags;
797 	struct amd_svm_iommu_ir *ir;
798 	u64 entry;
799 
800 	if (WARN_ON_ONCE(!pi->ir_data))
801 		return -EINVAL;
802 
803 	/**
804 	 * In some cases, the existing irte is updated and re-set,
805 	 * so we need to check here if it's already been * added
806 	 * to the ir_list.
807 	 */
808 	if (pi->prev_ga_tag) {
809 		struct kvm *kvm = svm->vcpu.kvm;
810 		u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
811 		struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
812 		struct vcpu_svm *prev_svm;
813 
814 		if (!prev_vcpu) {
815 			ret = -EINVAL;
816 			goto out;
817 		}
818 
819 		prev_svm = to_svm(prev_vcpu);
820 		svm_ir_list_del(prev_svm, pi);
821 	}
822 
823 	/**
824 	 * Allocating new amd_iommu_pi_data, which will get
825 	 * add to the per-vcpu ir_list.
826 	 */
827 	ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
828 	if (!ir) {
829 		ret = -ENOMEM;
830 		goto out;
831 	}
832 	ir->data = pi->ir_data;
833 
834 	spin_lock_irqsave(&svm->ir_list_lock, flags);
835 
836 	/*
837 	 * Update the target pCPU for IOMMU doorbells if the vCPU is running.
838 	 * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
839 	 * will update the pCPU info when the vCPU awkened and/or scheduled in.
840 	 * See also avic_vcpu_load().
841 	 */
842 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
843 	if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
844 		amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
845 				    true, pi->ir_data);
846 
847 	list_add(&ir->node, &svm->ir_list);
848 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
849 out:
850 	return ret;
851 }
852 
853 /*
854  * Note:
855  * The HW cannot support posting multicast/broadcast
856  * interrupts to a vCPU. So, we still use legacy interrupt
857  * remapping for these kind of interrupts.
858  *
859  * For lowest-priority interrupts, we only support
860  * those with single CPU as the destination, e.g. user
861  * configures the interrupts via /proc/irq or uses
862  * irqbalance to make the interrupts single-CPU.
863  */
864 static int
get_pi_vcpu_info(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,struct vcpu_data * vcpu_info,struct vcpu_svm ** svm)865 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
866 		 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
867 {
868 	struct kvm_lapic_irq irq;
869 	struct kvm_vcpu *vcpu = NULL;
870 
871 	kvm_set_msi_irq(kvm, e, &irq);
872 
873 	if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
874 	    !kvm_irq_is_postable(&irq)) {
875 		pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
876 			 __func__, irq.vector);
877 		return -1;
878 	}
879 
880 	pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
881 		 irq.vector);
882 	*svm = to_svm(vcpu);
883 	vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
884 	vcpu_info->vector = irq.vector;
885 
886 	return 0;
887 }
888 
889 /*
890  * avic_pi_update_irte - set IRTE for Posted-Interrupts
891  *
892  * @kvm: kvm
893  * @host_irq: host irq of the interrupt
894  * @guest_irq: gsi of the interrupt
895  * @set: set or unset PI
896  * returns 0 on success, < 0 on failure
897  */
avic_pi_update_irte(struct kvm * kvm,unsigned int host_irq,uint32_t guest_irq,bool set)898 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
899 			uint32_t guest_irq, bool set)
900 {
901 	struct kvm_kernel_irq_routing_entry *e;
902 	struct kvm_irq_routing_table *irq_rt;
903 	bool enable_remapped_mode = true;
904 	int idx, ret = 0;
905 
906 	if (!kvm_arch_has_assigned_device(kvm) || !kvm_arch_has_irq_bypass())
907 		return 0;
908 
909 	pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
910 		 __func__, host_irq, guest_irq, set);
911 
912 	idx = srcu_read_lock(&kvm->irq_srcu);
913 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
914 
915 	if (guest_irq >= irq_rt->nr_rt_entries ||
916 		hlist_empty(&irq_rt->map[guest_irq])) {
917 		pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
918 			     guest_irq, irq_rt->nr_rt_entries);
919 		goto out;
920 	}
921 
922 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
923 		struct vcpu_data vcpu_info;
924 		struct vcpu_svm *svm = NULL;
925 
926 		if (e->type != KVM_IRQ_ROUTING_MSI)
927 			continue;
928 
929 		/**
930 		 * Here, we setup with legacy mode in the following cases:
931 		 * 1. When cannot target interrupt to a specific vcpu.
932 		 * 2. Unsetting posted interrupt.
933 		 * 3. APIC virtualization is disabled for the vcpu.
934 		 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
935 		 */
936 		if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
937 		    kvm_vcpu_apicv_active(&svm->vcpu)) {
938 			struct amd_iommu_pi_data pi;
939 
940 			enable_remapped_mode = false;
941 
942 			/* Try to enable guest_mode in IRTE */
943 			pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
944 					    AVIC_HPA_MASK);
945 			pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
946 						     svm->vcpu.vcpu_id);
947 			pi.is_guest_mode = true;
948 			pi.vcpu_data = &vcpu_info;
949 			ret = irq_set_vcpu_affinity(host_irq, &pi);
950 
951 			/**
952 			 * Here, we successfully setting up vcpu affinity in
953 			 * IOMMU guest mode. Now, we need to store the posted
954 			 * interrupt information in a per-vcpu ir_list so that
955 			 * we can reference to them directly when we update vcpu
956 			 * scheduling information in IOMMU irte.
957 			 */
958 			if (!ret && pi.is_guest_mode)
959 				svm_ir_list_add(svm, &pi);
960 		}
961 
962 		if (!ret && svm) {
963 			trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
964 						 e->gsi, vcpu_info.vector,
965 						 vcpu_info.pi_desc_addr, set);
966 		}
967 
968 		if (ret < 0) {
969 			pr_err("%s: failed to update PI IRTE\n", __func__);
970 			goto out;
971 		}
972 	}
973 
974 	ret = 0;
975 	if (enable_remapped_mode) {
976 		/* Use legacy mode in IRTE */
977 		struct amd_iommu_pi_data pi;
978 
979 		/**
980 		 * Here, pi is used to:
981 		 * - Tell IOMMU to use legacy mode for this interrupt.
982 		 * - Retrieve ga_tag of prior interrupt remapping data.
983 		 */
984 		pi.prev_ga_tag = 0;
985 		pi.is_guest_mode = false;
986 		ret = irq_set_vcpu_affinity(host_irq, &pi);
987 
988 		/**
989 		 * Check if the posted interrupt was previously
990 		 * setup with the guest_mode by checking if the ga_tag
991 		 * was cached. If so, we need to clean up the per-vcpu
992 		 * ir_list.
993 		 */
994 		if (!ret && pi.prev_ga_tag) {
995 			int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
996 			struct kvm_vcpu *vcpu;
997 
998 			vcpu = kvm_get_vcpu_by_id(kvm, id);
999 			if (vcpu)
1000 				svm_ir_list_del(to_svm(vcpu), &pi);
1001 		}
1002 	}
1003 out:
1004 	srcu_read_unlock(&kvm->irq_srcu, idx);
1005 	return ret;
1006 }
1007 
1008 static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu * vcpu,int cpu,bool r)1009 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1010 {
1011 	int ret = 0;
1012 	struct amd_svm_iommu_ir *ir;
1013 	struct vcpu_svm *svm = to_svm(vcpu);
1014 
1015 	lockdep_assert_held(&svm->ir_list_lock);
1016 
1017 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
1018 		return 0;
1019 
1020 	/*
1021 	 * Here, we go through the per-vcpu ir_list to update all existing
1022 	 * interrupt remapping table entry targeting this vcpu.
1023 	 */
1024 	if (list_empty(&svm->ir_list))
1025 		return 0;
1026 
1027 	list_for_each_entry(ir, &svm->ir_list, node) {
1028 		ret = amd_iommu_update_ga(cpu, r, ir->data);
1029 		if (ret)
1030 			return ret;
1031 	}
1032 	return 0;
1033 }
1034 
avic_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1035 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1036 {
1037 	u64 entry;
1038 	int h_physical_id = kvm_cpu_get_apicid(cpu);
1039 	struct vcpu_svm *svm = to_svm(vcpu);
1040 	unsigned long flags;
1041 
1042 	lockdep_assert_preemption_disabled();
1043 
1044 	if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1045 		return;
1046 
1047 	/*
1048 	 * No need to update anything if the vCPU is blocking, i.e. if the vCPU
1049 	 * is being scheduled in after being preempted.  The CPU entries in the
1050 	 * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
1051 	 * If the vCPU was migrated, its new CPU value will be stuffed when the
1052 	 * vCPU unblocks.
1053 	 */
1054 	if (kvm_vcpu_is_blocking(vcpu))
1055 		return;
1056 
1057 	/*
1058 	 * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
1059 	 * _currently_ have assigned devices, as that can change.  Holding
1060 	 * ir_list_lock ensures that either svm_ir_list_add() will consume
1061 	 * up-to-date entry information, or that this task will wait until
1062 	 * svm_ir_list_add() completes to set the new target pCPU.
1063 	 */
1064 	spin_lock_irqsave(&svm->ir_list_lock, flags);
1065 
1066 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
1067 	WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1068 
1069 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1070 	entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1071 	entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1072 
1073 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1074 	avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
1075 
1076 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1077 }
1078 
avic_vcpu_put(struct kvm_vcpu * vcpu)1079 void avic_vcpu_put(struct kvm_vcpu *vcpu)
1080 {
1081 	u64 entry;
1082 	struct vcpu_svm *svm = to_svm(vcpu);
1083 	unsigned long flags;
1084 
1085 	lockdep_assert_preemption_disabled();
1086 
1087 	/*
1088 	 * Note, reading the Physical ID entry outside of ir_list_lock is safe
1089 	 * as only the pCPU that has loaded (or is loading) the vCPU is allowed
1090 	 * to modify the entry, and preemption is disabled.  I.e. the vCPU
1091 	 * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
1092 	 * recursively.
1093 	 */
1094 	entry = READ_ONCE(*(svm->avic_physical_id_cache));
1095 
1096 	/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
1097 	if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
1098 		return;
1099 
1100 	/*
1101 	 * Take and hold the per-vCPU interrupt remapping lock while updating
1102 	 * the Physical ID entry even though the lock doesn't protect against
1103 	 * multiple writers (see above).  Holding ir_list_lock ensures that
1104 	 * either svm_ir_list_add() will consume up-to-date entry information,
1105 	 * or that this task will wait until svm_ir_list_add() completes to
1106 	 * mark the vCPU as not running.
1107 	 */
1108 	spin_lock_irqsave(&svm->ir_list_lock, flags);
1109 
1110 	avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1111 
1112 	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1113 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1114 
1115 	spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1116 
1117 }
1118 
avic_refresh_virtual_apic_mode(struct kvm_vcpu * vcpu)1119 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
1120 {
1121 	struct vcpu_svm *svm = to_svm(vcpu);
1122 	struct vmcb *vmcb = svm->vmcb01.ptr;
1123 
1124 	if (!lapic_in_kernel(vcpu) || !enable_apicv)
1125 		return;
1126 
1127 	if (kvm_vcpu_apicv_active(vcpu)) {
1128 		/**
1129 		 * During AVIC temporary deactivation, guest could update
1130 		 * APIC ID, DFR and LDR registers, which would not be trapped
1131 		 * by avic_unaccelerated_access_interception(). In this case,
1132 		 * we need to check and update the AVIC logical APIC ID table
1133 		 * accordingly before re-activating.
1134 		 */
1135 		avic_apicv_post_state_restore(vcpu);
1136 		avic_activate_vmcb(svm);
1137 	} else {
1138 		avic_deactivate_vmcb(svm);
1139 	}
1140 	vmcb_mark_dirty(vmcb, VMCB_AVIC);
1141 }
1142 
avic_refresh_apicv_exec_ctrl(struct kvm_vcpu * vcpu)1143 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
1144 {
1145 	bool activated = kvm_vcpu_apicv_active(vcpu);
1146 
1147 	if (!enable_apicv)
1148 		return;
1149 
1150 	avic_refresh_virtual_apic_mode(vcpu);
1151 
1152 	if (activated)
1153 		avic_vcpu_load(vcpu, vcpu->cpu);
1154 	else
1155 		avic_vcpu_put(vcpu);
1156 
1157 	avic_set_pi_irte_mode(vcpu, activated);
1158 }
1159 
avic_vcpu_blocking(struct kvm_vcpu * vcpu)1160 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
1161 {
1162 	if (!kvm_vcpu_apicv_active(vcpu))
1163 		return;
1164 
1165        /*
1166         * Unload the AVIC when the vCPU is about to block, _before_
1167         * the vCPU actually blocks.
1168         *
1169         * Any IRQs that arrive before IsRunning=0 will not cause an
1170         * incomplete IPI vmexit on the source, therefore vIRR will also
1171         * be checked by kvm_vcpu_check_block() before blocking.  The
1172         * memory barrier implicit in set_current_state orders writing
1173         * IsRunning=0 before reading the vIRR.  The processor needs a
1174         * matching memory barrier on interrupt delivery between writing
1175         * IRR and reading IsRunning; the lack of this barrier might be
1176         * the cause of errata #1235).
1177         */
1178 	avic_vcpu_put(vcpu);
1179 }
1180 
avic_vcpu_unblocking(struct kvm_vcpu * vcpu)1181 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
1182 {
1183 	if (!kvm_vcpu_apicv_active(vcpu))
1184 		return;
1185 
1186 	avic_vcpu_load(vcpu, vcpu->cpu);
1187 }
1188 
1189 /*
1190  * Note:
1191  * - The module param avic enable both xAPIC and x2APIC mode.
1192  * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
1193  * - The mode can be switched at run-time.
1194  */
avic_hardware_setup(void)1195 bool avic_hardware_setup(void)
1196 {
1197 	if (!npt_enabled)
1198 		return false;
1199 
1200 	/* AVIC is a prerequisite for x2AVIC. */
1201 	if (!boot_cpu_has(X86_FEATURE_AVIC) && !force_avic) {
1202 		if (boot_cpu_has(X86_FEATURE_X2AVIC)) {
1203 			pr_warn(FW_BUG "Cannot support x2AVIC due to AVIC is disabled");
1204 			pr_warn(FW_BUG "Try enable AVIC using force_avic option");
1205 		}
1206 		return false;
1207 	}
1208 
1209 	if (cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
1210 	    !boot_cpu_has(X86_FEATURE_HV_INUSE_WR_ALLOWED)) {
1211 		pr_warn("AVIC disabled: missing HvInUseWrAllowed on SNP-enabled system\n");
1212 		return false;
1213 	}
1214 
1215 	if (boot_cpu_has(X86_FEATURE_AVIC)) {
1216 		pr_info("AVIC enabled\n");
1217 	} else if (force_avic) {
1218 		/*
1219 		 * Some older systems does not advertise AVIC support.
1220 		 * See Revision Guide for specific AMD processor for more detail.
1221 		 */
1222 		pr_warn("AVIC is not supported in CPUID but force enabled");
1223 		pr_warn("Your system might crash and burn");
1224 	}
1225 
1226 	/* AVIC is a prerequisite for x2AVIC. */
1227 	x2avic_enabled = boot_cpu_has(X86_FEATURE_X2AVIC);
1228 	if (x2avic_enabled)
1229 		pr_info("x2AVIC enabled\n");
1230 
1231 	amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1232 
1233 	return true;
1234 }
1235