1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_HARDIRQ_H 3 #define _ASM_X86_HARDIRQ_H 4 5 #include <linux/threads.h> 6 7 typedef struct { 8 #if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL) 9 u8 kvm_cpu_l1tf_flush_l1d; 10 #endif 11 unsigned int __nmi_count; /* arch dependent */ 12 #ifdef CONFIG_X86_LOCAL_APIC 13 unsigned int apic_timer_irqs; /* arch dependent */ 14 unsigned int irq_spurious_count; 15 unsigned int icr_read_retry_count; 16 #endif 17 #if IS_ENABLED(CONFIG_KVM) 18 unsigned int kvm_posted_intr_ipis; 19 unsigned int kvm_posted_intr_wakeup_ipis; 20 unsigned int kvm_posted_intr_nested_ipis; 21 #endif 22 #ifdef CONFIG_GUEST_PERF_EVENTS 23 unsigned int perf_guest_mediated_pmis; 24 #endif 25 unsigned int x86_platform_ipis; /* arch dependent */ 26 unsigned int apic_perf_irqs; 27 unsigned int apic_irq_work_irqs; 28 #ifdef CONFIG_SMP 29 unsigned int irq_resched_count; 30 unsigned int irq_call_count; 31 #endif 32 unsigned int irq_tlb_count; 33 #ifdef CONFIG_X86_THERMAL_VECTOR 34 unsigned int irq_thermal_count; 35 #endif 36 #ifdef CONFIG_X86_MCE_THRESHOLD 37 unsigned int irq_threshold_count; 38 #endif 39 #ifdef CONFIG_X86_MCE_AMD 40 unsigned int irq_deferred_error_count; 41 #endif 42 #ifdef CONFIG_X86_HV_CALLBACK_VECTOR 43 unsigned int irq_hv_callback_count; 44 #endif 45 #if IS_ENABLED(CONFIG_HYPERV) 46 unsigned int irq_hv_reenlightenment_count; 47 unsigned int hyperv_stimer0_count; 48 #endif 49 #ifdef CONFIG_X86_POSTED_MSI 50 unsigned int posted_msi_notification_count; 51 #endif 52 } ____cacheline_aligned irq_cpustat_t; 53 54 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 55 56 #ifdef CONFIG_X86_POSTED_MSI 57 DECLARE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc); 58 #endif 59 #define __ARCH_IRQ_STAT 60 61 #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) 62 63 extern void ack_bad_irq(unsigned int irq); 64 65 extern u64 arch_irq_stat_cpu(unsigned int cpu); 66 #define arch_irq_stat_cpu arch_irq_stat_cpu 67 68 extern u64 arch_irq_stat(void); 69 #define arch_irq_stat arch_irq_stat 70 71 DECLARE_PER_CPU_CACHE_HOT(u16, __softirq_pending); 72 #define local_softirq_pending_ref __softirq_pending 73 74 #if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL) 75 /* 76 * This function is called from noinstr interrupt contexts 77 * and must be inlined to not get instrumentation. 78 */ 79 static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) 80 { 81 __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); 82 } 83 84 static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void) 85 { 86 __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0); 87 } 88 89 static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void) 90 { 91 return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); 92 } 93 #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ 94 static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { } 95 #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ 96 97 #endif /* _ASM_X86_HARDIRQ_H */ 98