Lines Matching +full:cpu +full:- +full:read

1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Secure AVIC Support (SEV-SNP Guests)
12 #include <linux/percpu-defs.h>
31 static inline void *get_reg_bitmap(unsigned int cpu, unsigned int offset) in get_reg_bitmap() argument
33 return &per_cpu_ptr(savic_page, cpu)->regs[offset]; in get_reg_bitmap()
36 static inline void update_vector(unsigned int cpu, unsigned int offset, in update_vector() argument
39 void *bitmap = get_reg_bitmap(cpu, offset); in update_vector()
51 * result in #VC exception (for non-accelerated register accesses)
53 * can read/write the x2APIC register in the guest APIC backing page.
58 * the read() and write() callbacks directly read/write the APIC register
95 "APIC register read offset 0x%x not aligned at 16 bytes", reg)) in savic_read()
113 IS_ALIGNED(reg - 4, 16)), in savic_read()
114 "Misaligned APIC_IRR/ALLOWED_IRR APIC register read offset 0x%x", reg)) in savic_read()
129 * the vCPU. So, self IPIs are hardware-accelerated.
136 static void send_ipi_dest(unsigned int cpu, unsigned int vector, bool nmi) in send_ipi_dest() argument
139 apic_set_reg(per_cpu_ptr(savic_page, cpu), SAVIC_NMI_REQ, 1); in send_ipi_dest()
141 update_vector(cpu, APIC_IRR, vector, true); in send_ipi_dest()
146 unsigned int cpu, src_cpu; in send_ipi_allbut() local
152 for_each_cpu(cpu, cpu_online_mask) { in send_ipi_allbut()
153 if (cpu == src_cpu) in send_ipi_allbut()
155 send_ipi_dest(cpu, vector, nmi); in send_ipi_allbut()
234 if (IS_ALIGNED(reg - 4, 16)) { in savic_write()
252 static void savic_send_ipi(int cpu, int vector) in savic_send_ipi() argument
254 u32 dest = per_cpu(x86_cpu_to_apicid, cpu); in savic_send_ipi()
261 unsigned int cpu, this_cpu; in send_ipi_mask() local
267 for_each_cpu(cpu, mask) { in send_ipi_mask()
268 if (excl_self && cpu == this_cpu) in send_ipi_mask()
270 send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0); in send_ipi_mask()
299 static void savic_update_vector(unsigned int cpu, unsigned int vector, bool set) in savic_update_vector() argument
301 update_vector(cpu, SAVIC_ALLOWED_IRR, vector, set); in savic_update_vector()
306 unsigned int cpu; in savic_eoi() local
309 cpu = raw_smp_processor_id(); in savic_eoi()
310 vec = apic_find_highest_vector(get_reg_bitmap(cpu, APIC_ISR)); in savic_eoi()
311 if (WARN_ONCE(vec == -1, "EOI write while no active interrupt in APIC_ISR")) in savic_eoi()
314 /* Is level-triggered interrupt? */ in savic_eoi()
315 if (apic_test_vector(vec, get_reg_bitmap(cpu, APIC_TMR))) { in savic_eoi()
316 update_vector(cpu, APIC_ISR, vec, false); in savic_eoi()
318 * Propagate the EOI write to the hypervisor for level-triggered in savic_eoi()
320 * care of re-evaluating interrupt state. in savic_eoi()
325 * Hardware clears APIC_ISR and re-evaluates the interrupt state in savic_eoi()
327 * delivered to CPU. in savic_eoi()
348 * APIC_ID MSR read returns the value from the hypervisor. in savic_setup()
419 .read = savic_read,