xref: /linux/arch/x86/xen/irq.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 #include <linux/hardirq.h>
2 
3 #include <xen/interface/xen.h>
4 #include <xen/interface/sched.h>
5 #include <xen/interface/vcpu.h>
6 
7 #include <asm/xen/hypercall.h>
8 #include <asm/xen/hypervisor.h>
9 
10 #include "xen-ops.h"
11 
12 /*
13  * Force a proper event-channel callback from Xen after clearing the
14  * callback mask. We do this in a very simple manner, by making a call
15  * down into Xen. The pending flag will be checked by Xen on return.
16  */
17 void xen_force_evtchn_callback(void)
18 {
19 	(void)HYPERVISOR_xen_version(0, NULL);
20 }
21 
22 static unsigned long xen_save_fl(void)
23 {
24 	struct vcpu_info *vcpu;
25 	unsigned long flags;
26 
27 	vcpu = percpu_read(xen_vcpu);
28 
29 	/* flag has opposite sense of mask */
30 	flags = !vcpu->evtchn_upcall_mask;
31 
32 	/* convert to IF type flag
33 	   -0 -> 0x00000000
34 	   -1 -> 0xffffffff
35 	*/
36 	return (-flags) & X86_EFLAGS_IF;
37 }
38 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
39 
40 static void xen_restore_fl(unsigned long flags)
41 {
42 	struct vcpu_info *vcpu;
43 
44 	/* convert from IF type flag */
45 	flags = !(flags & X86_EFLAGS_IF);
46 
47 	/* There's a one instruction preempt window here.  We need to
48 	   make sure we're don't switch CPUs between getting the vcpu
49 	   pointer and updating the mask. */
50 	preempt_disable();
51 	vcpu = percpu_read(xen_vcpu);
52 	vcpu->evtchn_upcall_mask = flags;
53 	preempt_enable_no_resched();
54 
55 	/* Doesn't matter if we get preempted here, because any
56 	   pending event will get dealt with anyway. */
57 
58 	if (flags == 0) {
59 		preempt_check_resched();
60 		barrier(); /* unmask then check (avoid races) */
61 		if (unlikely(vcpu->evtchn_upcall_pending))
62 			xen_force_evtchn_callback();
63 	}
64 }
65 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
66 
67 static void xen_irq_disable(void)
68 {
69 	/* There's a one instruction preempt window here.  We need to
70 	   make sure we're don't switch CPUs between getting the vcpu
71 	   pointer and updating the mask. */
72 	preempt_disable();
73 	percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
74 	preempt_enable_no_resched();
75 }
76 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
77 
78 static void xen_irq_enable(void)
79 {
80 	struct vcpu_info *vcpu;
81 
82 	/* We don't need to worry about being preempted here, since
83 	   either a) interrupts are disabled, so no preemption, or b)
84 	   the caller is confused and is trying to re-enable interrupts
85 	   on an indeterminate processor. */
86 
87 	vcpu = percpu_read(xen_vcpu);
88 	vcpu->evtchn_upcall_mask = 0;
89 
90 	/* Doesn't matter if we get preempted here, because any
91 	   pending event will get dealt with anyway. */
92 
93 	barrier(); /* unmask then check (avoid races) */
94 	if (unlikely(vcpu->evtchn_upcall_pending))
95 		xen_force_evtchn_callback();
96 }
97 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
98 
99 static void xen_safe_halt(void)
100 {
101 	/* Blocking includes an implicit local_irq_enable(). */
102 	if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
103 		BUG();
104 }
105 
106 static void xen_halt(void)
107 {
108 	if (irqs_disabled())
109 		HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
110 	else
111 		xen_safe_halt();
112 }
113 
114 static const struct pv_irq_ops xen_irq_ops __initdata = {
115 	.init_IRQ = xen_init_IRQ,
116 
117 	.save_fl = PV_CALLEE_SAVE(xen_save_fl),
118 	.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
119 	.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
120 	.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
121 
122 	.safe_halt = xen_safe_halt,
123 	.halt = xen_halt,
124 #ifdef CONFIG_X86_64
125 	.adjust_exception_frame = xen_adjust_exception_frame,
126 #endif
127 };
128 
129 void __init xen_init_irq_ops()
130 {
131 	pv_irq_ops = xen_irq_ops;
132 }
133