xref: /linux/drivers/xen/events/events_internal.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Xen Event Channels (internal header)
3  *
4  * Copyright (C) 2013 Citrix Systems R&D Ltd.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2 or later.  See the file COPYING for more details.
8  */
9 #ifndef __EVENTS_INTERNAL_H__
10 #define __EVENTS_INTERNAL_H__
11 
12 /* Interrupt types. */
13 enum xen_irq_type {
14 	IRQT_UNBOUND = 0,
15 	IRQT_PIRQ,
16 	IRQT_VIRQ,
17 	IRQT_IPI,
18 	IRQT_EVTCHN
19 };
20 
21 /*
22  * Packed IRQ information:
23  * type - enum xen_irq_type
24  * event channel - irq->event channel mapping
25  * cpu - cpu this event channel is bound to
26  * index - type-specific information:
27  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
28  *           guest, or GSI (real passthrough IRQ) of the device.
29  *    VIRQ - virq number
30  *    IPI - IPI vector
31  *    EVTCHN -
32  */
33 struct irq_info {
34 	struct list_head list;
35 	int refcnt;
36 	enum xen_irq_type type;	/* type */
37 	unsigned irq;
38 	unsigned int evtchn;	/* event channel */
39 	unsigned short cpu;	/* cpu bound */
40 
41 	union {
42 		unsigned short virq;
43 		enum ipi_vector ipi;
44 		struct {
45 			unsigned short pirq;
46 			unsigned short gsi;
47 			unsigned char vector;
48 			unsigned char flags;
49 			uint16_t domid;
50 		} pirq;
51 	} u;
52 };
53 
54 #define PIRQ_NEEDS_EOI	(1 << 0)
55 #define PIRQ_SHAREABLE	(1 << 1)
56 #define PIRQ_MSI_GROUP	(1 << 2)
57 
58 struct evtchn_ops {
59 	unsigned (*max_channels)(void);
60 	unsigned (*nr_channels)(void);
61 
62 	int (*setup)(struct irq_info *info);
63 	void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
64 
65 	void (*clear_pending)(unsigned port);
66 	void (*set_pending)(unsigned port);
67 	bool (*is_pending)(unsigned port);
68 	bool (*test_and_set_mask)(unsigned port);
69 	void (*mask)(unsigned port);
70 	void (*unmask)(unsigned port);
71 
72 	void (*handle_events)(unsigned cpu);
73 	void (*resume)(void);
74 };
75 
76 extern const struct evtchn_ops *evtchn_ops;
77 
78 extern int **evtchn_to_irq;
79 int get_evtchn_to_irq(unsigned int evtchn);
80 
81 struct irq_info *info_for_irq(unsigned irq);
82 unsigned cpu_from_irq(unsigned irq);
83 unsigned cpu_from_evtchn(unsigned int evtchn);
84 
85 static inline unsigned xen_evtchn_max_channels(void)
86 {
87 	return evtchn_ops->max_channels();
88 }
89 
90 /*
91  * Do any ABI specific setup for a bound event channel before it can
92  * be unmasked and used.
93  */
94 static inline int xen_evtchn_port_setup(struct irq_info *info)
95 {
96 	if (evtchn_ops->setup)
97 		return evtchn_ops->setup(info);
98 	return 0;
99 }
100 
101 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
102 					       unsigned cpu)
103 {
104 	evtchn_ops->bind_to_cpu(info, cpu);
105 }
106 
107 static inline void clear_evtchn(unsigned port)
108 {
109 	evtchn_ops->clear_pending(port);
110 }
111 
112 static inline void set_evtchn(unsigned port)
113 {
114 	evtchn_ops->set_pending(port);
115 }
116 
117 static inline bool test_evtchn(unsigned port)
118 {
119 	return evtchn_ops->is_pending(port);
120 }
121 
122 static inline bool test_and_set_mask(unsigned port)
123 {
124 	return evtchn_ops->test_and_set_mask(port);
125 }
126 
127 static inline void mask_evtchn(unsigned port)
128 {
129 	return evtchn_ops->mask(port);
130 }
131 
132 static inline void unmask_evtchn(unsigned port)
133 {
134 	return evtchn_ops->unmask(port);
135 }
136 
137 static inline void xen_evtchn_handle_events(unsigned cpu)
138 {
139 	return evtchn_ops->handle_events(cpu);
140 }
141 
142 static inline void xen_evtchn_resume(void)
143 {
144 	if (evtchn_ops->resume)
145 		evtchn_ops->resume();
146 }
147 
148 void xen_evtchn_2l_init(void);
149 int xen_evtchn_fifo_init(void);
150 
151 #endif /* #ifndef __EVENTS_INTERNAL_H__ */
152