xref: /linux/drivers/xen/events/events_base.c (revision c2dfe29f30d8850af324449f416491b171af19aa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xen event channels
4  *
5  * Xen models interrupts with abstract event channels.  Because each
6  * domain gets 1024 event channels, but NR_IRQ is not that large, we
7  * must dynamically map irqs<->event channels.  The event channels
8  * interface with the rest of the kernel by defining a xen interrupt
9  * chip.  When an event is received, it is mapped to an irq and sent
10  * through the normal interrupt processing path.
11  *
12  * There are four kinds of events which can be mapped to an event
13  * channel:
14  *
15  * 1. Inter-domain notifications.  This includes all the virtual
16  *    device events, since they're driven by front-ends in another domain
17  *    (typically dom0).
18  * 2. VIRQs, typically used for timers.  These are per-cpu events.
19  * 3. IPIs.
20  * 4. PIRQs - Hardware interrupts.
21  *
22  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
23  */
24 
25 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
26 
27 #include <linux/linkage.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/moduleparam.h>
31 #include <linux/string.h>
32 #include <linux/memblock.h>
33 #include <linux/slab.h>
34 #include <linux/irqnr.h>
35 #include <linux/pci.h>
36 #include <linux/rcupdate.h>
37 #include <linux/spinlock.h>
38 #include <linux/cpuhotplug.h>
39 #include <linux/atomic.h>
40 #include <linux/ktime.h>
41 
42 #ifdef CONFIG_X86
43 #include <asm/desc.h>
44 #include <asm/ptrace.h>
45 #include <asm/idtentry.h>
46 #include <asm/irq.h>
47 #include <asm/io_apic.h>
48 #include <asm/i8259.h>
49 #include <asm/xen/cpuid.h>
50 #include <asm/xen/pci.h>
51 #endif
52 #include <asm/sync_bitops.h>
53 #include <asm/xen/hypercall.h>
54 #include <asm/xen/hypervisor.h>
55 #include <xen/page.h>
56 
57 #include <xen/xen.h>
58 #include <xen/hvm.h>
59 #include <xen/xen-ops.h>
60 #include <xen/events.h>
61 #include <xen/interface/xen.h>
62 #include <xen/interface/event_channel.h>
63 #include <xen/interface/hvm/hvm_op.h>
64 #include <xen/interface/hvm/params.h>
65 #include <xen/interface/physdev.h>
66 #include <xen/interface/sched.h>
67 #include <xen/interface/vcpu.h>
68 #include <xen/xenbus.h>
69 #include <asm/hw_irq.h>
70 
71 #include "events_internal.h"
72 
73 #undef MODULE_PARAM_PREFIX
74 #define MODULE_PARAM_PREFIX "xen."
75 
76 /* Interrupt types. */
77 enum xen_irq_type {
78 	IRQT_UNBOUND = 0,
79 	IRQT_PIRQ,
80 	IRQT_VIRQ,
81 	IRQT_IPI,
82 	IRQT_EVTCHN
83 };
84 
85 /*
86  * Packed IRQ information:
87  * type - enum xen_irq_type
88  * event channel - irq->event channel mapping
89  * cpu - cpu this event channel is bound to
90  * index - type-specific information:
91  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
92  *           guest, or GSI (real passthrough IRQ) of the device.
93  *    VIRQ - virq number
94  *    IPI - IPI vector
95  *    EVTCHN -
96  */
97 struct irq_info {
98 	struct list_head list;
99 	struct list_head eoi_list;
100 	struct rcu_work rwork;
101 	short refcnt;
102 	u8 spurious_cnt;
103 	u8 is_accounted;
104 	short type;		/* type: IRQT_* */
105 	u8 mask_reason;		/* Why is event channel masked */
106 #define EVT_MASK_REASON_EXPLICIT	0x01
107 #define EVT_MASK_REASON_TEMPORARY	0x02
108 #define EVT_MASK_REASON_EOI_PENDING	0x04
109 	u8 is_active;		/* Is event just being handled? */
110 	unsigned irq;
111 	evtchn_port_t evtchn;   /* event channel */
112 	unsigned short cpu;     /* cpu bound */
113 	unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
114 	unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
115 	u64 eoi_time;           /* Time in jiffies when to EOI. */
116 	raw_spinlock_t lock;
117 	bool is_static;           /* Is event channel static */
118 
119 	union {
120 		unsigned short virq;
121 		enum ipi_vector ipi;
122 		struct {
123 			unsigned short pirq;
124 			unsigned short gsi;
125 			unsigned char vector;
126 			unsigned char flags;
127 			uint16_t domid;
128 		} pirq;
129 		struct xenbus_device *interdomain;
130 	} u;
131 };
132 
133 #define PIRQ_NEEDS_EOI	(1 << 0)
134 #define PIRQ_SHAREABLE	(1 << 1)
135 #define PIRQ_MSI_GROUP	(1 << 2)
136 
137 static uint __read_mostly event_loop_timeout = 2;
138 module_param(event_loop_timeout, uint, 0644);
139 
140 static uint __read_mostly event_eoi_delay = 10;
141 module_param(event_eoi_delay, uint, 0644);
142 
143 const struct evtchn_ops *evtchn_ops;
144 
145 /*
146  * This lock protects updates to the following mapping and reference-count
147  * arrays. The lock does not need to be acquired to read the mapping tables.
148  */
149 static DEFINE_MUTEX(irq_mapping_update_lock);
150 
151 /*
152  * Lock hierarchy:
153  *
154  * irq_mapping_update_lock
155  *   IRQ-desc lock
156  *     percpu eoi_list_lock
157  *       irq_info->lock
158  */
159 
160 static LIST_HEAD(xen_irq_list_head);
161 
162 /* IRQ <-> VIRQ mapping. */
163 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
164 
165 /* IRQ <-> IPI mapping */
166 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
167 /* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
168 static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) = {[0 ... XEN_NR_IPIS-1] = 0};
169 
170 /* Event channel distribution data */
171 static atomic_t channels_on_cpu[NR_CPUS];
172 
173 static int **evtchn_to_irq;
174 #ifdef CONFIG_X86
175 static unsigned long *pirq_eoi_map;
176 #endif
177 static bool (*pirq_needs_eoi)(struct irq_info *info);
178 
179 #define EVTCHN_ROW(e)  (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
180 #define EVTCHN_COL(e)  (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
181 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
182 
183 /* Xen will never allocate port zero for any purpose. */
184 #define VALID_EVTCHN(chn)	((chn) != 0)
185 
186 static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
187 
188 static struct irq_chip xen_dynamic_chip;
189 static struct irq_chip xen_lateeoi_chip;
190 static struct irq_chip xen_percpu_chip;
191 static struct irq_chip xen_pirq_chip;
192 static void enable_dynirq(struct irq_data *data);
193 
194 static DEFINE_PER_CPU(unsigned int, irq_epoch);
195 
196 static void clear_evtchn_to_irq_row(int *evtchn_row)
197 {
198 	unsigned col;
199 
200 	for (col = 0; col < EVTCHN_PER_ROW; col++)
201 		WRITE_ONCE(evtchn_row[col], -1);
202 }
203 
204 static void clear_evtchn_to_irq_all(void)
205 {
206 	unsigned row;
207 
208 	for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
209 		if (evtchn_to_irq[row] == NULL)
210 			continue;
211 		clear_evtchn_to_irq_row(evtchn_to_irq[row]);
212 	}
213 }
214 
215 static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
216 {
217 	unsigned row;
218 	unsigned col;
219 	int *evtchn_row;
220 
221 	if (evtchn >= xen_evtchn_max_channels())
222 		return -EINVAL;
223 
224 	row = EVTCHN_ROW(evtchn);
225 	col = EVTCHN_COL(evtchn);
226 
227 	if (evtchn_to_irq[row] == NULL) {
228 		/* Unallocated irq entries return -1 anyway */
229 		if (irq == -1)
230 			return 0;
231 
232 		evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
233 		if (evtchn_row == NULL)
234 			return -ENOMEM;
235 
236 		clear_evtchn_to_irq_row(evtchn_row);
237 
238 		/*
239 		 * We've prepared an empty row for the mapping. If a different
240 		 * thread was faster inserting it, we can drop ours.
241 		 */
242 		if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
243 			free_page((unsigned long) evtchn_row);
244 	}
245 
246 	WRITE_ONCE(evtchn_to_irq[row][col], irq);
247 	return 0;
248 }
249 
250 /* Get info for IRQ */
251 static struct irq_info *info_for_irq(unsigned irq)
252 {
253 	if (irq < nr_legacy_irqs())
254 		return legacy_info_ptrs[irq];
255 	else
256 		return irq_get_chip_data(irq);
257 }
258 
259 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
260 {
261 	if (irq < nr_legacy_irqs())
262 		legacy_info_ptrs[irq] = info;
263 	else
264 		irq_set_chip_data(irq, info);
265 }
266 
267 static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
268 {
269 	int irq;
270 
271 	if (evtchn >= xen_evtchn_max_channels())
272 		return NULL;
273 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
274 		return NULL;
275 	irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
276 
277 	return (irq < 0) ? NULL : info_for_irq(irq);
278 }
279 
280 /* Per CPU channel accounting */
281 static void channels_on_cpu_dec(struct irq_info *info)
282 {
283 	if (!info->is_accounted)
284 		return;
285 
286 	info->is_accounted = 0;
287 
288 	if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
289 		return;
290 
291 	WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
292 }
293 
294 static void channels_on_cpu_inc(struct irq_info *info)
295 {
296 	if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
297 		return;
298 
299 	if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
300 					    INT_MAX)))
301 		return;
302 
303 	info->is_accounted = 1;
304 }
305 
306 static void xen_irq_free_desc(unsigned int irq)
307 {
308 	/* Legacy IRQ descriptors are managed by the arch. */
309 	if (irq >= nr_legacy_irqs())
310 		irq_free_desc(irq);
311 }
312 
313 static void delayed_free_irq(struct work_struct *work)
314 {
315 	struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
316 					     rwork);
317 	unsigned int irq = info->irq;
318 
319 	/* Remove the info pointer only now, with no potential users left. */
320 	set_info_for_irq(irq, NULL);
321 
322 	kfree(info);
323 
324 	xen_irq_free_desc(irq);
325 }
326 
327 /* Constructors for packed IRQ information. */
328 static int xen_irq_info_common_setup(struct irq_info *info,
329 				     enum xen_irq_type type,
330 				     evtchn_port_t evtchn,
331 				     unsigned short cpu)
332 {
333 	int ret;
334 
335 	BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
336 
337 	info->type = type;
338 	info->evtchn = evtchn;
339 	info->cpu = cpu;
340 	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
341 	raw_spin_lock_init(&info->lock);
342 
343 	ret = set_evtchn_to_irq(evtchn, info->irq);
344 	if (ret < 0)
345 		return ret;
346 
347 	irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
348 
349 	return xen_evtchn_port_setup(evtchn);
350 }
351 
352 static int xen_irq_info_evtchn_setup(struct irq_info *info,
353 				     evtchn_port_t evtchn,
354 				     struct xenbus_device *dev)
355 {
356 	int ret;
357 
358 	ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
359 	info->u.interdomain = dev;
360 	if (dev)
361 		atomic_inc(&dev->event_channels);
362 
363 	return ret;
364 }
365 
366 static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
367 				  evtchn_port_t evtchn, enum ipi_vector ipi)
368 {
369 	info->u.ipi = ipi;
370 
371 	per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
372 	per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
373 
374 	return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
375 }
376 
377 static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
378 				   evtchn_port_t evtchn, unsigned int virq)
379 {
380 	info->u.virq = virq;
381 
382 	per_cpu(virq_to_irq, cpu)[virq] = info->irq;
383 
384 	return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
385 }
386 
387 static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
388 				   unsigned int pirq, unsigned int gsi,
389 				   uint16_t domid, unsigned char flags)
390 {
391 	info->u.pirq.pirq = pirq;
392 	info->u.pirq.gsi = gsi;
393 	info->u.pirq.domid = domid;
394 	info->u.pirq.flags = flags;
395 
396 	return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
397 }
398 
399 static void xen_irq_info_cleanup(struct irq_info *info)
400 {
401 	set_evtchn_to_irq(info->evtchn, -1);
402 	xen_evtchn_port_remove(info->evtchn, info->cpu);
403 	info->evtchn = 0;
404 	channels_on_cpu_dec(info);
405 }
406 
407 /*
408  * Accessors for packed IRQ information.
409  */
410 static evtchn_port_t evtchn_from_irq(unsigned int irq)
411 {
412 	const struct irq_info *info = NULL;
413 
414 	if (likely(irq < nr_irqs))
415 		info = info_for_irq(irq);
416 	if (!info)
417 		return 0;
418 
419 	return info->evtchn;
420 }
421 
422 unsigned int irq_from_evtchn(evtchn_port_t evtchn)
423 {
424 	struct irq_info *info = evtchn_to_info(evtchn);
425 
426 	return info ? info->irq : -1;
427 }
428 EXPORT_SYMBOL_GPL(irq_from_evtchn);
429 
430 int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
431 			 evtchn_port_t *evtchn)
432 {
433 	int irq = per_cpu(virq_to_irq, cpu)[virq];
434 
435 	*evtchn = evtchn_from_irq(irq);
436 
437 	return irq;
438 }
439 
440 static enum ipi_vector ipi_from_irq(struct irq_info *info)
441 {
442 	BUG_ON(info == NULL);
443 	BUG_ON(info->type != IRQT_IPI);
444 
445 	return info->u.ipi;
446 }
447 
448 static unsigned int virq_from_irq(struct irq_info *info)
449 {
450 	BUG_ON(info == NULL);
451 	BUG_ON(info->type != IRQT_VIRQ);
452 
453 	return info->u.virq;
454 }
455 
456 static unsigned int pirq_from_irq(struct irq_info *info)
457 {
458 	BUG_ON(info == NULL);
459 	BUG_ON(info->type != IRQT_PIRQ);
460 
461 	return info->u.pirq.pirq;
462 }
463 
464 unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
465 {
466 	struct irq_info *info = evtchn_to_info(evtchn);
467 
468 	return info ? info->cpu : 0;
469 }
470 
471 static void do_mask(struct irq_info *info, u8 reason)
472 {
473 	unsigned long flags;
474 
475 	raw_spin_lock_irqsave(&info->lock, flags);
476 
477 	if (!info->mask_reason)
478 		mask_evtchn(info->evtchn);
479 
480 	info->mask_reason |= reason;
481 
482 	raw_spin_unlock_irqrestore(&info->lock, flags);
483 }
484 
485 static void do_unmask(struct irq_info *info, u8 reason)
486 {
487 	unsigned long flags;
488 
489 	raw_spin_lock_irqsave(&info->lock, flags);
490 
491 	info->mask_reason &= ~reason;
492 
493 	if (!info->mask_reason)
494 		unmask_evtchn(info->evtchn);
495 
496 	raw_spin_unlock_irqrestore(&info->lock, flags);
497 }
498 
499 #ifdef CONFIG_X86
500 static bool pirq_check_eoi_map(struct irq_info *info)
501 {
502 	return test_bit(pirq_from_irq(info), pirq_eoi_map);
503 }
504 #endif
505 
506 static bool pirq_needs_eoi_flag(struct irq_info *info)
507 {
508 	BUG_ON(info->type != IRQT_PIRQ);
509 
510 	return info->u.pirq.flags & PIRQ_NEEDS_EOI;
511 }
512 
513 static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
514 			       bool force_affinity)
515 {
516 	if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
517 		struct irq_data *data = irq_get_irq_data(info->irq);
518 
519 		irq_data_update_affinity(data, cpumask_of(cpu));
520 		irq_data_update_effective_affinity(data, cpumask_of(cpu));
521 	}
522 
523 	xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
524 
525 	channels_on_cpu_dec(info);
526 	info->cpu = cpu;
527 	channels_on_cpu_inc(info);
528 }
529 
530 /**
531  * notify_remote_via_irq - send event to remote end of event channel via irq
532  * @irq: irq of event channel to send event to
533  *
534  * Unlike notify_remote_via_evtchn(), this is safe to use across
535  * save/restore. Notifications on a broken connection are silently
536  * dropped.
537  */
538 void notify_remote_via_irq(int irq)
539 {
540 	evtchn_port_t evtchn = evtchn_from_irq(irq);
541 
542 	if (VALID_EVTCHN(evtchn))
543 		notify_remote_via_evtchn(evtchn);
544 }
545 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
546 
547 struct lateeoi_work {
548 	struct delayed_work delayed;
549 	spinlock_t eoi_list_lock;
550 	struct list_head eoi_list;
551 };
552 
553 static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
554 
555 static void lateeoi_list_del(struct irq_info *info)
556 {
557 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
558 	unsigned long flags;
559 
560 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
561 	list_del_init(&info->eoi_list);
562 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
563 }
564 
565 static void lateeoi_list_add(struct irq_info *info)
566 {
567 	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
568 	struct irq_info *elem;
569 	u64 now = get_jiffies_64();
570 	unsigned long delay;
571 	unsigned long flags;
572 
573 	if (now < info->eoi_time)
574 		delay = info->eoi_time - now;
575 	else
576 		delay = 1;
577 
578 	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
579 
580 	elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
581 					eoi_list);
582 	if (!elem || info->eoi_time < elem->eoi_time) {
583 		list_add(&info->eoi_list, &eoi->eoi_list);
584 		mod_delayed_work_on(info->eoi_cpu, system_wq,
585 				    &eoi->delayed, delay);
586 	} else {
587 		list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
588 			if (elem->eoi_time <= info->eoi_time)
589 				break;
590 		}
591 		list_add(&info->eoi_list, &elem->eoi_list);
592 	}
593 
594 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
595 }
596 
597 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
598 {
599 	evtchn_port_t evtchn;
600 	unsigned int cpu;
601 	unsigned int delay = 0;
602 
603 	evtchn = info->evtchn;
604 	if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
605 		return;
606 
607 	if (spurious) {
608 		struct xenbus_device *dev = info->u.interdomain;
609 		unsigned int threshold = 1;
610 
611 		if (dev && dev->spurious_threshold)
612 			threshold = dev->spurious_threshold;
613 
614 		if ((1 << info->spurious_cnt) < (HZ << 2)) {
615 			if (info->spurious_cnt != 0xFF)
616 				info->spurious_cnt++;
617 		}
618 		if (info->spurious_cnt > threshold) {
619 			delay = 1 << (info->spurious_cnt - 1 - threshold);
620 			if (delay > HZ)
621 				delay = HZ;
622 			if (!info->eoi_time)
623 				info->eoi_cpu = smp_processor_id();
624 			info->eoi_time = get_jiffies_64() + delay;
625 			if (dev)
626 				atomic_add(delay, &dev->jiffies_eoi_delayed);
627 		}
628 		if (dev)
629 			atomic_inc(&dev->spurious_events);
630 	} else {
631 		info->spurious_cnt = 0;
632 	}
633 
634 	cpu = info->eoi_cpu;
635 	if (info->eoi_time &&
636 	    (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
637 		lateeoi_list_add(info);
638 		return;
639 	}
640 
641 	info->eoi_time = 0;
642 
643 	/* is_active hasn't been reset yet, do it now. */
644 	smp_store_release(&info->is_active, 0);
645 	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
646 }
647 
648 static void xen_irq_lateeoi_worker(struct work_struct *work)
649 {
650 	struct lateeoi_work *eoi;
651 	struct irq_info *info;
652 	u64 now = get_jiffies_64();
653 	unsigned long flags;
654 
655 	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
656 
657 	rcu_read_lock();
658 
659 	while (true) {
660 		spin_lock_irqsave(&eoi->eoi_list_lock, flags);
661 
662 		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
663 						eoi_list);
664 
665 		if (info == NULL)
666 			break;
667 
668 		if (now < info->eoi_time) {
669 			mod_delayed_work_on(info->eoi_cpu, system_wq,
670 					    &eoi->delayed,
671 					    info->eoi_time - now);
672 			break;
673 		}
674 
675 		list_del_init(&info->eoi_list);
676 
677 		spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
678 
679 		info->eoi_time = 0;
680 
681 		xen_irq_lateeoi_locked(info, false);
682 	}
683 
684 	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
685 
686 	rcu_read_unlock();
687 }
688 
689 static void xen_cpu_init_eoi(unsigned int cpu)
690 {
691 	struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
692 
693 	INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
694 	spin_lock_init(&eoi->eoi_list_lock);
695 	INIT_LIST_HEAD(&eoi->eoi_list);
696 }
697 
698 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
699 {
700 	struct irq_info *info;
701 
702 	rcu_read_lock();
703 
704 	info = info_for_irq(irq);
705 
706 	if (info)
707 		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
708 
709 	rcu_read_unlock();
710 }
711 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
712 
713 static struct irq_info *xen_irq_init(unsigned int irq)
714 {
715 	struct irq_info *info;
716 
717 	info = kzalloc(sizeof(*info), GFP_KERNEL);
718 	if (info) {
719 		info->irq = irq;
720 		info->type = IRQT_UNBOUND;
721 		info->refcnt = -1;
722 		INIT_RCU_WORK(&info->rwork, delayed_free_irq);
723 
724 		set_info_for_irq(irq, info);
725 		/*
726 		 * Interrupt affinity setting can be immediate. No point
727 		 * in delaying it until an interrupt is handled.
728 		 */
729 		irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
730 
731 		INIT_LIST_HEAD(&info->eoi_list);
732 		list_add_tail(&info->list, &xen_irq_list_head);
733 	}
734 
735 	return info;
736 }
737 
738 static struct irq_info *xen_allocate_irq_dynamic(void)
739 {
740 	int irq = irq_alloc_desc_from(0, -1);
741 	struct irq_info *info = NULL;
742 
743 	if (irq >= 0) {
744 		info = xen_irq_init(irq);
745 		if (!info)
746 			xen_irq_free_desc(irq);
747 	}
748 
749 	return info;
750 }
751 
752 static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
753 {
754 	int irq;
755 	struct irq_info *info;
756 
757 	/*
758 	 * A PV guest has no concept of a GSI (since it has no ACPI
759 	 * nor access to/knowledge of the physical APICs). Therefore
760 	 * all IRQs are dynamically allocated from the entire IRQ
761 	 * space.
762 	 */
763 	if (xen_pv_domain() && !xen_initial_domain())
764 		return xen_allocate_irq_dynamic();
765 
766 	/* Legacy IRQ descriptors are already allocated by the arch. */
767 	if (gsi < nr_legacy_irqs())
768 		irq = gsi;
769 	else
770 		irq = irq_alloc_desc_at(gsi, -1);
771 
772 	info = xen_irq_init(irq);
773 	if (!info)
774 		xen_irq_free_desc(irq);
775 
776 	return info;
777 }
778 
779 static void xen_free_irq(struct irq_info *info)
780 {
781 	if (WARN_ON(!info))
782 		return;
783 
784 	if (!list_empty(&info->eoi_list))
785 		lateeoi_list_del(info);
786 
787 	list_del(&info->list);
788 
789 	WARN_ON(info->refcnt > 0);
790 
791 	queue_rcu_work(system_wq, &info->rwork);
792 }
793 
794 /* Not called for lateeoi events. */
795 static void event_handler_exit(struct irq_info *info)
796 {
797 	smp_store_release(&info->is_active, 0);
798 	clear_evtchn(info->evtchn);
799 }
800 
801 static void pirq_query_unmask(struct irq_info *info)
802 {
803 	struct physdev_irq_status_query irq_status;
804 
805 	irq_status.irq = pirq_from_irq(info);
806 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
807 		irq_status.flags = 0;
808 
809 	info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
810 	if (irq_status.flags & XENIRQSTAT_needs_eoi)
811 		info->u.pirq.flags |= PIRQ_NEEDS_EOI;
812 }
813 
814 static void do_eoi_pirq(struct irq_info *info)
815 {
816 	struct physdev_eoi eoi = { .irq = pirq_from_irq(info) };
817 	int rc = 0;
818 
819 	if (!VALID_EVTCHN(info->evtchn))
820 		return;
821 
822 	event_handler_exit(info);
823 
824 	if (pirq_needs_eoi(info)) {
825 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
826 		WARN_ON(rc);
827 	}
828 }
829 
830 static void eoi_pirq(struct irq_data *data)
831 {
832 	struct irq_info *info = info_for_irq(data->irq);
833 
834 	do_eoi_pirq(info);
835 }
836 
837 static void do_disable_dynirq(struct irq_info *info)
838 {
839 	if (VALID_EVTCHN(info->evtchn))
840 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
841 }
842 
843 static void disable_dynirq(struct irq_data *data)
844 {
845 	struct irq_info *info = info_for_irq(data->irq);
846 
847 	if (info)
848 		do_disable_dynirq(info);
849 }
850 
851 static void mask_ack_pirq(struct irq_data *data)
852 {
853 	struct irq_info *info = info_for_irq(data->irq);
854 
855 	if (info) {
856 		do_disable_dynirq(info);
857 		do_eoi_pirq(info);
858 	}
859 }
860 
861 static unsigned int __startup_pirq(struct irq_info *info)
862 {
863 	struct evtchn_bind_pirq bind_pirq;
864 	evtchn_port_t evtchn = info->evtchn;
865 	int rc;
866 
867 	if (VALID_EVTCHN(evtchn))
868 		goto out;
869 
870 	bind_pirq.pirq = pirq_from_irq(info);
871 	/* NB. We are happy to share unless we are probing. */
872 	bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
873 					BIND_PIRQ__WILL_SHARE : 0;
874 	rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
875 	if (rc != 0) {
876 		pr_warn("Failed to obtain physical IRQ %d\n", info->irq);
877 		return 0;
878 	}
879 	evtchn = bind_pirq.port;
880 
881 	pirq_query_unmask(info);
882 
883 	rc = set_evtchn_to_irq(evtchn, info->irq);
884 	if (rc)
885 		goto err;
886 
887 	info->evtchn = evtchn;
888 	bind_evtchn_to_cpu(info, 0, false);
889 
890 	rc = xen_evtchn_port_setup(evtchn);
891 	if (rc)
892 		goto err;
893 
894 out:
895 	do_unmask(info, EVT_MASK_REASON_EXPLICIT);
896 
897 	do_eoi_pirq(info);
898 
899 	return 0;
900 
901 err:
902 	pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq,
903 	       rc);
904 	xen_evtchn_close(evtchn);
905 	return 0;
906 }
907 
908 static unsigned int startup_pirq(struct irq_data *data)
909 {
910 	struct irq_info *info = info_for_irq(data->irq);
911 
912 	return __startup_pirq(info);
913 }
914 
915 static void shutdown_pirq(struct irq_data *data)
916 {
917 	struct irq_info *info = info_for_irq(data->irq);
918 	evtchn_port_t evtchn = info->evtchn;
919 
920 	BUG_ON(info->type != IRQT_PIRQ);
921 
922 	if (!VALID_EVTCHN(evtchn))
923 		return;
924 
925 	do_mask(info, EVT_MASK_REASON_EXPLICIT);
926 	xen_evtchn_close(evtchn);
927 	xen_irq_info_cleanup(info);
928 }
929 
930 static void enable_pirq(struct irq_data *data)
931 {
932 	enable_dynirq(data);
933 }
934 
935 static void disable_pirq(struct irq_data *data)
936 {
937 	disable_dynirq(data);
938 }
939 
940 int xen_irq_from_gsi(unsigned gsi)
941 {
942 	struct irq_info *info;
943 
944 	list_for_each_entry(info, &xen_irq_list_head, list) {
945 		if (info->type != IRQT_PIRQ)
946 			continue;
947 
948 		if (info->u.pirq.gsi == gsi)
949 			return info->irq;
950 	}
951 
952 	return -1;
953 }
954 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
955 
956 static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
957 {
958 	evtchn_port_t evtchn;
959 
960 	if (!info) {
961 		xen_irq_free_desc(irq);
962 		return;
963 	}
964 
965 	if (info->refcnt > 0) {
966 		info->refcnt--;
967 		if (info->refcnt != 0)
968 			return;
969 	}
970 
971 	evtchn = info->evtchn;
972 
973 	if (VALID_EVTCHN(evtchn)) {
974 		unsigned int cpu = info->cpu;
975 		struct xenbus_device *dev;
976 
977 		if (!info->is_static)
978 			xen_evtchn_close(evtchn);
979 
980 		switch (info->type) {
981 		case IRQT_VIRQ:
982 			per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
983 			break;
984 		case IRQT_IPI:
985 			per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
986 			per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
987 			break;
988 		case IRQT_EVTCHN:
989 			dev = info->u.interdomain;
990 			if (dev)
991 				atomic_dec(&dev->event_channels);
992 			break;
993 		default:
994 			break;
995 		}
996 
997 		xen_irq_info_cleanup(info);
998 	}
999 
1000 	xen_free_irq(info);
1001 }
1002 
1003 /*
1004  * Do not make any assumptions regarding the relationship between the
1005  * IRQ number returned here and the Xen pirq argument.
1006  *
1007  * Note: We don't assign an event channel until the irq actually started
1008  * up.  Return an existing irq if we've already got one for the gsi.
1009  *
1010  * Shareable implies level triggered, not shareable implies edge
1011  * triggered here.
1012  */
1013 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
1014 			     unsigned pirq, int shareable, char *name)
1015 {
1016 	struct irq_info *info;
1017 	struct physdev_irq irq_op;
1018 	int ret;
1019 
1020 	mutex_lock(&irq_mapping_update_lock);
1021 
1022 	ret = xen_irq_from_gsi(gsi);
1023 	if (ret != -1) {
1024 		pr_info("%s: returning irq %d for gsi %u\n",
1025 			__func__, ret, gsi);
1026 		goto out;
1027 	}
1028 
1029 	info = xen_allocate_irq_gsi(gsi);
1030 	if (!info)
1031 		goto out;
1032 
1033 	irq_op.irq = info->irq;
1034 	irq_op.vector = 0;
1035 
1036 	/* Only the privileged domain can do this. For non-priv, the pcifront
1037 	 * driver provides a PCI bus that does the call to do exactly
1038 	 * this in the priv domain. */
1039 	if (xen_initial_domain() &&
1040 	    HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
1041 		xen_free_irq(info);
1042 		ret = -ENOSPC;
1043 		goto out;
1044 	}
1045 
1046 	ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
1047 			       shareable ? PIRQ_SHAREABLE : 0);
1048 	if (ret < 0) {
1049 		__unbind_from_irq(info, info->irq);
1050 		goto out;
1051 	}
1052 
1053 	pirq_query_unmask(info);
1054 	/* We try to use the handler with the appropriate semantic for the
1055 	 * type of interrupt: if the interrupt is an edge triggered
1056 	 * interrupt we use handle_edge_irq.
1057 	 *
1058 	 * On the other hand if the interrupt is level triggered we use
1059 	 * handle_fasteoi_irq like the native code does for this kind of
1060 	 * interrupts.
1061 	 *
1062 	 * Depending on the Xen version, pirq_needs_eoi might return true
1063 	 * not only for level triggered interrupts but for edge triggered
1064 	 * interrupts too. In any case Xen always honors the eoi mechanism,
1065 	 * not injecting any more pirqs of the same kind if the first one
1066 	 * hasn't received an eoi yet. Therefore using the fasteoi handler
1067 	 * is the right choice either way.
1068 	 */
1069 	if (shareable)
1070 		irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1071 				handle_fasteoi_irq, name);
1072 	else
1073 		irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
1074 				handle_edge_irq, name);
1075 
1076 	ret = info->irq;
1077 
1078 out:
1079 	mutex_unlock(&irq_mapping_update_lock);
1080 
1081 	return ret;
1082 }
1083 
1084 #ifdef CONFIG_PCI_MSI
1085 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
1086 {
1087 	int rc;
1088 	struct physdev_get_free_pirq op_get_free_pirq;
1089 
1090 	op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
1091 	rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
1092 
1093 	WARN_ONCE(rc == -ENOSYS,
1094 		  "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
1095 
1096 	return rc ? -1 : op_get_free_pirq.pirq;
1097 }
1098 
1099 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
1100 			     int pirq, int nvec, const char *name, domid_t domid)
1101 {
1102 	int i, irq, ret;
1103 	struct irq_info *info;
1104 
1105 	mutex_lock(&irq_mapping_update_lock);
1106 
1107 	irq = irq_alloc_descs(-1, 0, nvec, -1);
1108 	if (irq < 0)
1109 		goto out;
1110 
1111 	for (i = 0; i < nvec; i++) {
1112 		info = xen_irq_init(irq + i);
1113 		if (!info)
1114 			goto error_irq;
1115 
1116 		irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
1117 
1118 		ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
1119 					      i == 0 ? 0 : PIRQ_MSI_GROUP);
1120 		if (ret < 0)
1121 			goto error_irq;
1122 	}
1123 
1124 	ret = irq_set_msi_desc(irq, msidesc);
1125 	if (ret < 0)
1126 		goto error_irq;
1127 out:
1128 	mutex_unlock(&irq_mapping_update_lock);
1129 	return irq;
1130 
1131 error_irq:
1132 	while (nvec--) {
1133 		info = info_for_irq(irq + nvec);
1134 		__unbind_from_irq(info, irq + nvec);
1135 	}
1136 	mutex_unlock(&irq_mapping_update_lock);
1137 	return ret;
1138 }
1139 #endif
1140 
1141 int xen_destroy_irq(int irq)
1142 {
1143 	struct physdev_unmap_pirq unmap_irq;
1144 	struct irq_info *info = info_for_irq(irq);
1145 	int rc = -ENOENT;
1146 
1147 	mutex_lock(&irq_mapping_update_lock);
1148 
1149 	/*
1150 	 * If trying to remove a vector in a MSI group different
1151 	 * than the first one skip the PIRQ unmap unless this vector
1152 	 * is the first one in the group.
1153 	 */
1154 	if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1155 		unmap_irq.pirq = info->u.pirq.pirq;
1156 		unmap_irq.domid = info->u.pirq.domid;
1157 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1158 		/* If another domain quits without making the pci_disable_msix
1159 		 * call, the Xen hypervisor takes care of freeing the PIRQs
1160 		 * (free_domain_pirqs).
1161 		 */
1162 		if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1163 			pr_info("domain %d does not have %d anymore\n",
1164 				info->u.pirq.domid, info->u.pirq.pirq);
1165 		else if (rc) {
1166 			pr_warn("unmap irq failed %d\n", rc);
1167 			goto out;
1168 		}
1169 	}
1170 
1171 	xen_free_irq(info);
1172 
1173 out:
1174 	mutex_unlock(&irq_mapping_update_lock);
1175 	return rc;
1176 }
1177 
1178 int xen_pirq_from_irq(unsigned irq)
1179 {
1180 	struct irq_info *info = info_for_irq(irq);
1181 
1182 	return pirq_from_irq(info);
1183 }
1184 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
1185 
1186 static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
1187 				   struct xenbus_device *dev)
1188 {
1189 	int ret = -ENOMEM;
1190 	struct irq_info *info;
1191 
1192 	if (evtchn >= xen_evtchn_max_channels())
1193 		return -ENOMEM;
1194 
1195 	mutex_lock(&irq_mapping_update_lock);
1196 
1197 	info = evtchn_to_info(evtchn);
1198 
1199 	if (!info) {
1200 		info = xen_allocate_irq_dynamic();
1201 		if (!info)
1202 			goto out;
1203 
1204 		irq_set_chip_and_handler_name(info->irq, chip,
1205 					      handle_edge_irq, "event");
1206 
1207 		ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
1208 		if (ret < 0) {
1209 			__unbind_from_irq(info, info->irq);
1210 			goto out;
1211 		}
1212 		/*
1213 		 * New interdomain events are initially bound to vCPU0 This
1214 		 * is required to setup the event channel in the first
1215 		 * place and also important for UP guests because the
1216 		 * affinity setting is not invoked on them so nothing would
1217 		 * bind the channel.
1218 		 */
1219 		bind_evtchn_to_cpu(info, 0, false);
1220 	} else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
1221 		info->refcnt++;
1222 	}
1223 
1224 	ret = info->irq;
1225 
1226 out:
1227 	mutex_unlock(&irq_mapping_update_lock);
1228 
1229 	return ret;
1230 }
1231 
1232 int bind_evtchn_to_irq(evtchn_port_t evtchn)
1233 {
1234 	return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
1235 }
1236 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
1237 
1238 int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
1239 {
1240 	return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
1241 }
1242 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
1243 
1244 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1245 {
1246 	struct evtchn_bind_ipi bind_ipi;
1247 	evtchn_port_t evtchn;
1248 	struct irq_info *info;
1249 	int ret;
1250 
1251 	mutex_lock(&irq_mapping_update_lock);
1252 
1253 	ret = per_cpu(ipi_to_irq, cpu)[ipi];
1254 
1255 	if (ret == -1) {
1256 		info = xen_allocate_irq_dynamic();
1257 		if (!info)
1258 			goto out;
1259 
1260 		irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1261 					      handle_percpu_irq, "ipi");
1262 
1263 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
1264 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1265 						&bind_ipi) != 0)
1266 			BUG();
1267 		evtchn = bind_ipi.port;
1268 
1269 		ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
1270 		if (ret < 0) {
1271 			__unbind_from_irq(info, info->irq);
1272 			goto out;
1273 		}
1274 		/*
1275 		 * Force the affinity mask to the target CPU so proc shows
1276 		 * the correct target.
1277 		 */
1278 		bind_evtchn_to_cpu(info, cpu, true);
1279 		ret = info->irq;
1280 	} else {
1281 		info = info_for_irq(ret);
1282 		WARN_ON(info == NULL || info->type != IRQT_IPI);
1283 	}
1284 
1285  out:
1286 	mutex_unlock(&irq_mapping_update_lock);
1287 	return ret;
1288 }
1289 
1290 static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
1291 					       evtchn_port_t remote_port,
1292 					       struct irq_chip *chip)
1293 {
1294 	struct evtchn_bind_interdomain bind_interdomain;
1295 	int err;
1296 
1297 	bind_interdomain.remote_dom  = dev->otherend_id;
1298 	bind_interdomain.remote_port = remote_port;
1299 
1300 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1301 					  &bind_interdomain);
1302 
1303 	return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
1304 					       chip, dev);
1305 }
1306 
1307 int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
1308 					   evtchn_port_t remote_port)
1309 {
1310 	return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
1311 						   &xen_lateeoi_chip);
1312 }
1313 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
1314 
1315 static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
1316 {
1317 	struct evtchn_status status;
1318 	evtchn_port_t port;
1319 	int rc = -ENOENT;
1320 
1321 	memset(&status, 0, sizeof(status));
1322 	for (port = 0; port < xen_evtchn_max_channels(); port++) {
1323 		status.dom = DOMID_SELF;
1324 		status.port = port;
1325 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
1326 		if (rc < 0)
1327 			continue;
1328 		if (status.status != EVTCHNSTAT_virq)
1329 			continue;
1330 		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1331 			*evtchn = port;
1332 			break;
1333 		}
1334 	}
1335 	return rc;
1336 }
1337 
1338 /**
1339  * xen_evtchn_nr_channels - number of usable event channel ports
1340  *
1341  * This may be less than the maximum supported by the current
1342  * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
1343  * supported.
1344  */
1345 unsigned xen_evtchn_nr_channels(void)
1346 {
1347         return evtchn_ops->nr_channels();
1348 }
1349 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
1350 
1351 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
1352 {
1353 	struct evtchn_bind_virq bind_virq;
1354 	evtchn_port_t evtchn = 0;
1355 	struct irq_info *info;
1356 	int ret;
1357 
1358 	mutex_lock(&irq_mapping_update_lock);
1359 
1360 	ret = per_cpu(virq_to_irq, cpu)[virq];
1361 
1362 	if (ret == -1) {
1363 		info = xen_allocate_irq_dynamic();
1364 		if (!info)
1365 			goto out;
1366 
1367 		if (percpu)
1368 			irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
1369 						      handle_percpu_irq, "virq");
1370 		else
1371 			irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
1372 						      handle_edge_irq, "virq");
1373 
1374 		bind_virq.virq = virq;
1375 		bind_virq.vcpu = xen_vcpu_nr(cpu);
1376 		ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1377 						&bind_virq);
1378 		if (ret == 0)
1379 			evtchn = bind_virq.port;
1380 		else {
1381 			if (ret == -EEXIST)
1382 				ret = find_virq(virq, cpu, &evtchn);
1383 			BUG_ON(ret < 0);
1384 		}
1385 
1386 		ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1387 		if (ret < 0) {
1388 			__unbind_from_irq(info, info->irq);
1389 			goto out;
1390 		}
1391 
1392 		/*
1393 		 * Force the affinity mask for percpu interrupts so proc
1394 		 * shows the correct target.
1395 		 */
1396 		bind_evtchn_to_cpu(info, cpu, percpu);
1397 		ret = info->irq;
1398 	} else {
1399 		info = info_for_irq(ret);
1400 		WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1401 	}
1402 
1403 out:
1404 	mutex_unlock(&irq_mapping_update_lock);
1405 
1406 	return ret;
1407 }
1408 
1409 static void unbind_from_irq(unsigned int irq)
1410 {
1411 	struct irq_info *info;
1412 
1413 	mutex_lock(&irq_mapping_update_lock);
1414 	info = info_for_irq(irq);
1415 	__unbind_from_irq(info, irq);
1416 	mutex_unlock(&irq_mapping_update_lock);
1417 }
1418 
1419 static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
1420 					  irq_handler_t handler,
1421 					  unsigned long irqflags,
1422 					  const char *devname, void *dev_id,
1423 					  struct irq_chip *chip)
1424 {
1425 	int irq, retval;
1426 
1427 	irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
1428 	if (irq < 0)
1429 		return irq;
1430 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1431 	if (retval != 0) {
1432 		unbind_from_irq(irq);
1433 		return retval;
1434 	}
1435 
1436 	return irq;
1437 }
1438 
1439 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
1440 			      irq_handler_t handler,
1441 			      unsigned long irqflags,
1442 			      const char *devname, void *dev_id)
1443 {
1444 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1445 					      devname, dev_id,
1446 					      &xen_dynamic_chip);
1447 }
1448 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1449 
1450 int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
1451 				      irq_handler_t handler,
1452 				      unsigned long irqflags,
1453 				      const char *devname, void *dev_id)
1454 {
1455 	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1456 					      devname, dev_id,
1457 					      &xen_lateeoi_chip);
1458 }
1459 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
1460 
1461 static int bind_interdomain_evtchn_to_irqhandler_chip(
1462 		struct xenbus_device *dev, evtchn_port_t remote_port,
1463 		irq_handler_t handler, unsigned long irqflags,
1464 		const char *devname, void *dev_id, struct irq_chip *chip)
1465 {
1466 	int irq, retval;
1467 
1468 	irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
1469 	if (irq < 0)
1470 		return irq;
1471 
1472 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1473 	if (retval != 0) {
1474 		unbind_from_irq(irq);
1475 		return retval;
1476 	}
1477 
1478 	return irq;
1479 }
1480 
1481 int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
1482 						  evtchn_port_t remote_port,
1483 						  irq_handler_t handler,
1484 						  unsigned long irqflags,
1485 						  const char *devname,
1486 						  void *dev_id)
1487 {
1488 	return bind_interdomain_evtchn_to_irqhandler_chip(dev,
1489 				remote_port, handler, irqflags, devname,
1490 				dev_id, &xen_lateeoi_chip);
1491 }
1492 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
1493 
1494 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1495 			    irq_handler_t handler,
1496 			    unsigned long irqflags, const char *devname, void *dev_id)
1497 {
1498 	int irq, retval;
1499 
1500 	irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1501 	if (irq < 0)
1502 		return irq;
1503 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1504 	if (retval != 0) {
1505 		unbind_from_irq(irq);
1506 		return retval;
1507 	}
1508 
1509 	return irq;
1510 }
1511 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1512 
1513 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1514 			   unsigned int cpu,
1515 			   irq_handler_t handler,
1516 			   unsigned long irqflags,
1517 			   const char *devname,
1518 			   void *dev_id)
1519 {
1520 	int irq, retval;
1521 
1522 	irq = bind_ipi_to_irq(ipi, cpu);
1523 	if (irq < 0)
1524 		return irq;
1525 
1526 	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1527 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
1528 	if (retval != 0) {
1529 		unbind_from_irq(irq);
1530 		return retval;
1531 	}
1532 
1533 	return irq;
1534 }
1535 
1536 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1537 {
1538 	struct irq_info *info = info_for_irq(irq);
1539 
1540 	if (WARN_ON(!info))
1541 		return;
1542 	free_irq(irq, dev_id);
1543 	unbind_from_irq(irq);
1544 }
1545 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1546 
1547 /**
1548  * xen_set_irq_priority() - set an event channel priority.
1549  * @irq:irq bound to an event channel.
1550  * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1551  */
1552 int xen_set_irq_priority(unsigned irq, unsigned priority)
1553 {
1554 	struct evtchn_set_priority set_priority;
1555 
1556 	set_priority.port = evtchn_from_irq(irq);
1557 	set_priority.priority = priority;
1558 
1559 	return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1560 					   &set_priority);
1561 }
1562 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1563 
1564 int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
1565 {
1566 	struct irq_info *info = evtchn_to_info(evtchn);
1567 
1568 	if (!info)
1569 		return -ENOENT;
1570 
1571 	WARN_ON(info->refcnt != -1);
1572 
1573 	info->refcnt = 1;
1574 	info->is_static = is_static;
1575 
1576 	return 0;
1577 }
1578 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1579 
1580 int evtchn_get(evtchn_port_t evtchn)
1581 {
1582 	struct irq_info *info;
1583 	int err = -ENOENT;
1584 
1585 	if (evtchn >= xen_evtchn_max_channels())
1586 		return -EINVAL;
1587 
1588 	mutex_lock(&irq_mapping_update_lock);
1589 
1590 	info = evtchn_to_info(evtchn);
1591 
1592 	if (!info)
1593 		goto done;
1594 
1595 	err = -EINVAL;
1596 	if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1597 		goto done;
1598 
1599 	info->refcnt++;
1600 	err = 0;
1601  done:
1602 	mutex_unlock(&irq_mapping_update_lock);
1603 
1604 	return err;
1605 }
1606 EXPORT_SYMBOL_GPL(evtchn_get);
1607 
1608 void evtchn_put(evtchn_port_t evtchn)
1609 {
1610 	struct irq_info *info = evtchn_to_info(evtchn);
1611 
1612 	if (WARN_ON(!info))
1613 		return;
1614 	unbind_from_irq(info->irq);
1615 }
1616 EXPORT_SYMBOL_GPL(evtchn_put);
1617 
1618 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1619 {
1620 	evtchn_port_t evtchn;
1621 
1622 #ifdef CONFIG_X86
1623 	if (unlikely(vector == XEN_NMI_VECTOR)) {
1624 		int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1625 					     NULL);
1626 		if (rc < 0)
1627 			printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1628 		return;
1629 	}
1630 #endif
1631 	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
1632 	BUG_ON(evtchn == 0);
1633 	notify_remote_via_evtchn(evtchn);
1634 }
1635 
1636 struct evtchn_loop_ctrl {
1637 	ktime_t timeout;
1638 	unsigned count;
1639 	bool defer_eoi;
1640 };
1641 
1642 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
1643 {
1644 	struct irq_info *info = evtchn_to_info(port);
1645 	struct xenbus_device *dev;
1646 
1647 	if (!info)
1648 		return;
1649 
1650 	/*
1651 	 * Check for timeout every 256 events.
1652 	 * We are setting the timeout value only after the first 256
1653 	 * events in order to not hurt the common case of few loop
1654 	 * iterations. The 256 is basically an arbitrary value.
1655 	 *
1656 	 * In case we are hitting the timeout we need to defer all further
1657 	 * EOIs in order to ensure to leave the event handling loop rather
1658 	 * sooner than later.
1659 	 */
1660 	if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
1661 		ktime_t kt = ktime_get();
1662 
1663 		if (!ctrl->timeout) {
1664 			kt = ktime_add_ms(kt,
1665 					  jiffies_to_msecs(event_loop_timeout));
1666 			ctrl->timeout = kt;
1667 		} else if (kt > ctrl->timeout) {
1668 			ctrl->defer_eoi = true;
1669 		}
1670 	}
1671 
1672 	if (xchg_acquire(&info->is_active, 1))
1673 		return;
1674 
1675 	dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
1676 	if (dev)
1677 		atomic_inc(&dev->events);
1678 
1679 	if (ctrl->defer_eoi) {
1680 		info->eoi_cpu = smp_processor_id();
1681 		info->irq_epoch = __this_cpu_read(irq_epoch);
1682 		info->eoi_time = get_jiffies_64() + event_eoi_delay;
1683 	}
1684 
1685 	generic_handle_irq(info->irq);
1686 }
1687 
1688 int xen_evtchn_do_upcall(void)
1689 {
1690 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1691 	int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
1692 	int cpu = smp_processor_id();
1693 	struct evtchn_loop_ctrl ctrl = { 0 };
1694 
1695 	/*
1696 	 * When closing an event channel the associated IRQ must not be freed
1697 	 * until all cpus have left the event handling loop. This is ensured
1698 	 * by taking the rcu_read_lock() while handling events, as freeing of
1699 	 * the IRQ is handled via queue_rcu_work() _after_ closing the event
1700 	 * channel.
1701 	 */
1702 	rcu_read_lock();
1703 
1704 	do {
1705 		vcpu_info->evtchn_upcall_pending = 0;
1706 
1707 		xen_evtchn_handle_events(cpu, &ctrl);
1708 
1709 		BUG_ON(!irqs_disabled());
1710 
1711 		virt_rmb(); /* Hypervisor can set upcall pending. */
1712 
1713 	} while (vcpu_info->evtchn_upcall_pending);
1714 
1715 	rcu_read_unlock();
1716 
1717 	/*
1718 	 * Increment irq_epoch only now to defer EOIs only for
1719 	 * xen_irq_lateeoi() invocations occurring from inside the loop
1720 	 * above.
1721 	 */
1722 	__this_cpu_inc(irq_epoch);
1723 
1724 	return ret;
1725 }
1726 EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
1727 
1728 /* Rebind a new event channel to an existing irq. */
1729 void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
1730 {
1731 	struct irq_info *info = info_for_irq(irq);
1732 
1733 	if (WARN_ON(!info))
1734 		return;
1735 
1736 	/* Make sure the irq is masked, since the new event channel
1737 	   will also be masked. */
1738 	disable_irq(irq);
1739 
1740 	mutex_lock(&irq_mapping_update_lock);
1741 
1742 	/* After resume the irq<->evtchn mappings are all cleared out */
1743 	BUG_ON(evtchn_to_info(evtchn));
1744 	/* Expect irq to have been bound before,
1745 	   so there should be a proper type */
1746 	BUG_ON(info->type == IRQT_UNBOUND);
1747 
1748 	info->irq = irq;
1749 	(void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
1750 
1751 	mutex_unlock(&irq_mapping_update_lock);
1752 
1753 	bind_evtchn_to_cpu(info, info->cpu, false);
1754 
1755 	/* Unmask the event channel. */
1756 	enable_irq(irq);
1757 }
1758 
1759 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1760 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1761 {
1762 	struct evtchn_bind_vcpu bind_vcpu;
1763 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1764 
1765 	if (!VALID_EVTCHN(evtchn))
1766 		return -1;
1767 
1768 	if (!xen_support_evtchn_rebind())
1769 		return -1;
1770 
1771 	/* Send future instances of this interrupt to other vcpu. */
1772 	bind_vcpu.port = evtchn;
1773 	bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1774 
1775 	/*
1776 	 * Mask the event while changing the VCPU binding to prevent
1777 	 * it being delivered on an unexpected VCPU.
1778 	 */
1779 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
1780 
1781 	/*
1782 	 * If this fails, it usually just indicates that we're dealing with a
1783 	 * virq or IPI channel, which don't actually need to be rebound. Ignore
1784 	 * it, but don't do the xenlinux-level rebind in that case.
1785 	 */
1786 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1787 		bind_evtchn_to_cpu(info, tcpu, false);
1788 
1789 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1790 
1791 	return 0;
1792 }
1793 
1794 /*
1795  * Find the CPU within @dest mask which has the least number of channels
1796  * assigned. This is not precise as the per cpu counts can be modified
1797  * concurrently.
1798  */
1799 static unsigned int select_target_cpu(const struct cpumask *dest)
1800 {
1801 	unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
1802 
1803 	for_each_cpu_and(cpu, dest, cpu_online_mask) {
1804 		unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
1805 
1806 		if (curch < minch) {
1807 			minch = curch;
1808 			best_cpu = cpu;
1809 		}
1810 	}
1811 
1812 	/*
1813 	 * Catch the unlikely case that dest contains no online CPUs. Can't
1814 	 * recurse.
1815 	 */
1816 	if (best_cpu == UINT_MAX)
1817 		return select_target_cpu(cpu_online_mask);
1818 
1819 	return best_cpu;
1820 }
1821 
1822 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1823 			    bool force)
1824 {
1825 	unsigned int tcpu = select_target_cpu(dest);
1826 	int ret;
1827 
1828 	ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
1829 	if (!ret)
1830 		irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1831 
1832 	return ret;
1833 }
1834 
1835 static void enable_dynirq(struct irq_data *data)
1836 {
1837 	struct irq_info *info = info_for_irq(data->irq);
1838 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1839 
1840 	if (VALID_EVTCHN(evtchn))
1841 		do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1842 }
1843 
1844 static void do_ack_dynirq(struct irq_info *info)
1845 {
1846 	evtchn_port_t evtchn = info->evtchn;
1847 
1848 	if (VALID_EVTCHN(evtchn))
1849 		event_handler_exit(info);
1850 }
1851 
1852 static void ack_dynirq(struct irq_data *data)
1853 {
1854 	struct irq_info *info = info_for_irq(data->irq);
1855 
1856 	if (info)
1857 		do_ack_dynirq(info);
1858 }
1859 
1860 static void mask_ack_dynirq(struct irq_data *data)
1861 {
1862 	struct irq_info *info = info_for_irq(data->irq);
1863 
1864 	if (info) {
1865 		do_disable_dynirq(info);
1866 		do_ack_dynirq(info);
1867 	}
1868 }
1869 
1870 static void lateeoi_ack_dynirq(struct irq_data *data)
1871 {
1872 	struct irq_info *info = info_for_irq(data->irq);
1873 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1874 
1875 	if (VALID_EVTCHN(evtchn)) {
1876 		do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1877 		/*
1878 		 * Don't call event_handler_exit().
1879 		 * Need to keep is_active non-zero in order to ignore re-raised
1880 		 * events after cpu affinity changes while a lateeoi is pending.
1881 		 */
1882 		clear_evtchn(evtchn);
1883 	}
1884 }
1885 
1886 static void lateeoi_mask_ack_dynirq(struct irq_data *data)
1887 {
1888 	struct irq_info *info = info_for_irq(data->irq);
1889 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1890 
1891 	if (VALID_EVTCHN(evtchn)) {
1892 		do_mask(info, EVT_MASK_REASON_EXPLICIT);
1893 		event_handler_exit(info);
1894 	}
1895 }
1896 
1897 static int retrigger_dynirq(struct irq_data *data)
1898 {
1899 	struct irq_info *info = info_for_irq(data->irq);
1900 	evtchn_port_t evtchn = info ? info->evtchn : 0;
1901 
1902 	if (!VALID_EVTCHN(evtchn))
1903 		return 0;
1904 
1905 	do_mask(info, EVT_MASK_REASON_TEMPORARY);
1906 	set_evtchn(evtchn);
1907 	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1908 
1909 	return 1;
1910 }
1911 
1912 static void restore_pirqs(void)
1913 {
1914 	int pirq, rc, irq, gsi;
1915 	struct physdev_map_pirq map_irq;
1916 	struct irq_info *info;
1917 
1918 	list_for_each_entry(info, &xen_irq_list_head, list) {
1919 		if (info->type != IRQT_PIRQ)
1920 			continue;
1921 
1922 		pirq = info->u.pirq.pirq;
1923 		gsi = info->u.pirq.gsi;
1924 		irq = info->irq;
1925 
1926 		/* save/restore of PT devices doesn't work, so at this point the
1927 		 * only devices present are GSI based emulated devices */
1928 		if (!gsi)
1929 			continue;
1930 
1931 		map_irq.domid = DOMID_SELF;
1932 		map_irq.type = MAP_PIRQ_TYPE_GSI;
1933 		map_irq.index = gsi;
1934 		map_irq.pirq = pirq;
1935 
1936 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1937 		if (rc) {
1938 			pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1939 				gsi, irq, pirq, rc);
1940 			xen_free_irq(info);
1941 			continue;
1942 		}
1943 
1944 		printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1945 
1946 		__startup_pirq(info);
1947 	}
1948 }
1949 
1950 static void restore_cpu_virqs(unsigned int cpu)
1951 {
1952 	struct evtchn_bind_virq bind_virq;
1953 	evtchn_port_t evtchn;
1954 	struct irq_info *info;
1955 	int virq, irq;
1956 
1957 	for (virq = 0; virq < NR_VIRQS; virq++) {
1958 		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1959 			continue;
1960 		info = info_for_irq(irq);
1961 
1962 		BUG_ON(virq_from_irq(info) != virq);
1963 
1964 		/* Get a new binding from Xen. */
1965 		bind_virq.virq = virq;
1966 		bind_virq.vcpu = xen_vcpu_nr(cpu);
1967 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1968 						&bind_virq) != 0)
1969 			BUG();
1970 		evtchn = bind_virq.port;
1971 
1972 		/* Record the new mapping. */
1973 		xen_irq_info_virq_setup(info, cpu, evtchn, virq);
1974 		/* The affinity mask is still valid */
1975 		bind_evtchn_to_cpu(info, cpu, false);
1976 	}
1977 }
1978 
1979 static void restore_cpu_ipis(unsigned int cpu)
1980 {
1981 	struct evtchn_bind_ipi bind_ipi;
1982 	evtchn_port_t evtchn;
1983 	struct irq_info *info;
1984 	int ipi, irq;
1985 
1986 	for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1987 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1988 			continue;
1989 		info = info_for_irq(irq);
1990 
1991 		BUG_ON(ipi_from_irq(info) != ipi);
1992 
1993 		/* Get a new binding from Xen. */
1994 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
1995 		if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1996 						&bind_ipi) != 0)
1997 			BUG();
1998 		evtchn = bind_ipi.port;
1999 
2000 		/* Record the new mapping. */
2001 		xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
2002 		/* The affinity mask is still valid */
2003 		bind_evtchn_to_cpu(info, cpu, false);
2004 	}
2005 }
2006 
2007 /* Clear an irq's pending state, in preparation for polling on it */
2008 void xen_clear_irq_pending(int irq)
2009 {
2010 	struct irq_info *info = info_for_irq(irq);
2011 	evtchn_port_t evtchn = info ? info->evtchn : 0;
2012 
2013 	if (VALID_EVTCHN(evtchn))
2014 		event_handler_exit(info);
2015 }
2016 EXPORT_SYMBOL(xen_clear_irq_pending);
2017 
2018 bool xen_test_irq_pending(int irq)
2019 {
2020 	evtchn_port_t evtchn = evtchn_from_irq(irq);
2021 	bool ret = false;
2022 
2023 	if (VALID_EVTCHN(evtchn))
2024 		ret = test_evtchn(evtchn);
2025 
2026 	return ret;
2027 }
2028 
2029 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
2030  * the irq will be disabled so it won't deliver an interrupt. */
2031 void xen_poll_irq_timeout(int irq, u64 timeout)
2032 {
2033 	evtchn_port_t evtchn = evtchn_from_irq(irq);
2034 
2035 	if (VALID_EVTCHN(evtchn)) {
2036 		struct sched_poll poll;
2037 
2038 		poll.nr_ports = 1;
2039 		poll.timeout = timeout;
2040 		set_xen_guest_handle(poll.ports, &evtchn);
2041 
2042 		if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
2043 			BUG();
2044 	}
2045 }
2046 EXPORT_SYMBOL(xen_poll_irq_timeout);
2047 /* Poll waiting for an irq to become pending.  In the usual case, the
2048  * irq will be disabled so it won't deliver an interrupt. */
2049 void xen_poll_irq(int irq)
2050 {
2051 	xen_poll_irq_timeout(irq, 0 /* no timeout */);
2052 }
2053 
2054 /* Check whether the IRQ line is shared with other guests. */
2055 int xen_test_irq_shared(int irq)
2056 {
2057 	struct irq_info *info = info_for_irq(irq);
2058 	struct physdev_irq_status_query irq_status;
2059 
2060 	if (WARN_ON(!info))
2061 		return -ENOENT;
2062 
2063 	irq_status.irq = info->u.pirq.pirq;
2064 
2065 	if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
2066 		return 0;
2067 	return !(irq_status.flags & XENIRQSTAT_shared);
2068 }
2069 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
2070 
2071 void xen_irq_resume(void)
2072 {
2073 	unsigned int cpu;
2074 	struct irq_info *info;
2075 
2076 	/* New event-channel space is not 'live' yet. */
2077 	xen_evtchn_resume();
2078 
2079 	/* No IRQ <-> event-channel mappings. */
2080 	list_for_each_entry(info, &xen_irq_list_head, list) {
2081 		/* Zap event-channel binding */
2082 		info->evtchn = 0;
2083 		/* Adjust accounting */
2084 		channels_on_cpu_dec(info);
2085 	}
2086 
2087 	clear_evtchn_to_irq_all();
2088 
2089 	for_each_possible_cpu(cpu) {
2090 		restore_cpu_virqs(cpu);
2091 		restore_cpu_ipis(cpu);
2092 	}
2093 
2094 	restore_pirqs();
2095 }
2096 
2097 static struct irq_chip xen_dynamic_chip __read_mostly = {
2098 	.name			= "xen-dyn",
2099 
2100 	.irq_disable		= disable_dynirq,
2101 	.irq_mask		= disable_dynirq,
2102 	.irq_unmask		= enable_dynirq,
2103 
2104 	.irq_ack		= ack_dynirq,
2105 	.irq_mask_ack		= mask_ack_dynirq,
2106 
2107 	.irq_set_affinity	= set_affinity_irq,
2108 	.irq_retrigger		= retrigger_dynirq,
2109 };
2110 
2111 static struct irq_chip xen_lateeoi_chip __read_mostly = {
2112 	/* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2113 	.name			= "xen-dyn-lateeoi",
2114 
2115 	.irq_disable		= disable_dynirq,
2116 	.irq_mask		= disable_dynirq,
2117 	.irq_unmask		= enable_dynirq,
2118 
2119 	.irq_ack		= lateeoi_ack_dynirq,
2120 	.irq_mask_ack		= lateeoi_mask_ack_dynirq,
2121 
2122 	.irq_set_affinity	= set_affinity_irq,
2123 	.irq_retrigger		= retrigger_dynirq,
2124 };
2125 
2126 static struct irq_chip xen_pirq_chip __read_mostly = {
2127 	.name			= "xen-pirq",
2128 
2129 	.irq_startup		= startup_pirq,
2130 	.irq_shutdown		= shutdown_pirq,
2131 	.irq_enable		= enable_pirq,
2132 	.irq_disable		= disable_pirq,
2133 
2134 	.irq_mask		= disable_dynirq,
2135 	.irq_unmask		= enable_dynirq,
2136 
2137 	.irq_ack		= eoi_pirq,
2138 	.irq_eoi		= eoi_pirq,
2139 	.irq_mask_ack		= mask_ack_pirq,
2140 
2141 	.irq_set_affinity	= set_affinity_irq,
2142 
2143 	.irq_retrigger		= retrigger_dynirq,
2144 };
2145 
2146 static struct irq_chip xen_percpu_chip __read_mostly = {
2147 	.name			= "xen-percpu",
2148 
2149 	.irq_disable		= disable_dynirq,
2150 	.irq_mask		= disable_dynirq,
2151 	.irq_unmask		= enable_dynirq,
2152 
2153 	.irq_ack		= ack_dynirq,
2154 };
2155 
2156 #ifdef CONFIG_X86
2157 #ifdef CONFIG_XEN_PVHVM
2158 /* Vector callbacks are better than PCI interrupts to receive event
2159  * channel notifications because we can receive vector callbacks on any
2160  * vcpu and we don't need PCI support or APIC interactions. */
2161 void xen_setup_callback_vector(void)
2162 {
2163 	uint64_t callback_via;
2164 
2165 	if (xen_have_vector_callback) {
2166 		callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2167 		if (xen_set_callback_via(callback_via)) {
2168 			pr_err("Request for Xen HVM callback vector failed\n");
2169 			xen_have_vector_callback = false;
2170 		}
2171 	}
2172 }
2173 
2174 /*
2175  * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
2176  * fallback to the global vector-type callback.
2177  */
2178 static __init void xen_init_setup_upcall_vector(void)
2179 {
2180 	if (!xen_have_vector_callback)
2181 		return;
2182 
2183 	if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
2184 	    !xen_set_upcall_vector(0))
2185 		xen_percpu_upcall = true;
2186 	else if (xen_feature(XENFEAT_hvm_callback_vector))
2187 		xen_setup_callback_vector();
2188 	else
2189 		xen_have_vector_callback = false;
2190 }
2191 
2192 int xen_set_upcall_vector(unsigned int cpu)
2193 {
2194 	int rc;
2195 	xen_hvm_evtchn_upcall_vector_t op = {
2196 		.vector = HYPERVISOR_CALLBACK_VECTOR,
2197 		.vcpu = per_cpu(xen_vcpu_id, cpu),
2198 	};
2199 
2200 	rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
2201 	if (rc)
2202 		return rc;
2203 
2204 	/* Trick toolstack to think we are enlightened. */
2205 	if (!cpu)
2206 		rc = xen_set_callback_via(1);
2207 
2208 	return rc;
2209 }
2210 
2211 static __init void xen_alloc_callback_vector(void)
2212 {
2213 	if (!xen_have_vector_callback)
2214 		return;
2215 
2216 	pr_info("Xen HVM callback vector for event delivery is enabled\n");
2217 	alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2218 }
2219 #else
2220 void xen_setup_callback_vector(void) {}
2221 static inline void xen_init_setup_upcall_vector(void) {}
2222 int xen_set_upcall_vector(unsigned int cpu) {}
2223 static inline void xen_alloc_callback_vector(void) {}
2224 #endif /* CONFIG_XEN_PVHVM */
2225 #endif /* CONFIG_X86 */
2226 
2227 bool xen_fifo_events = true;
2228 module_param_named(fifo_events, xen_fifo_events, bool, 0);
2229 
2230 static int xen_evtchn_cpu_prepare(unsigned int cpu)
2231 {
2232 	int ret = 0;
2233 
2234 	xen_cpu_init_eoi(cpu);
2235 
2236 	if (evtchn_ops->percpu_init)
2237 		ret = evtchn_ops->percpu_init(cpu);
2238 
2239 	return ret;
2240 }
2241 
2242 static int xen_evtchn_cpu_dead(unsigned int cpu)
2243 {
2244 	int ret = 0;
2245 
2246 	if (evtchn_ops->percpu_deinit)
2247 		ret = evtchn_ops->percpu_deinit(cpu);
2248 
2249 	return ret;
2250 }
2251 
2252 void __init xen_init_IRQ(void)
2253 {
2254 	int ret = -EINVAL;
2255 	evtchn_port_t evtchn;
2256 
2257 	if (xen_fifo_events)
2258 		ret = xen_evtchn_fifo_init();
2259 	if (ret < 0) {
2260 		xen_evtchn_2l_init();
2261 		xen_fifo_events = false;
2262 	}
2263 
2264 	xen_cpu_init_eoi(smp_processor_id());
2265 
2266 	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
2267 				  "xen/evtchn:prepare",
2268 				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
2269 
2270 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
2271 				sizeof(*evtchn_to_irq), GFP_KERNEL);
2272 	BUG_ON(!evtchn_to_irq);
2273 
2274 	/* No event channels are 'live' right now. */
2275 	for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
2276 		mask_evtchn(evtchn);
2277 
2278 	pirq_needs_eoi = pirq_needs_eoi_flag;
2279 
2280 #ifdef CONFIG_X86
2281 	if (xen_pv_domain()) {
2282 		if (xen_initial_domain())
2283 			pci_xen_initial_domain();
2284 	}
2285 	xen_init_setup_upcall_vector();
2286 	xen_alloc_callback_vector();
2287 
2288 
2289 	if (xen_hvm_domain()) {
2290 		native_init_IRQ();
2291 		/* pci_xen_hvm_init must be called after native_init_IRQ so that
2292 		 * __acpi_register_gsi can point at the right function */
2293 		pci_xen_hvm_init();
2294 	} else {
2295 		int rc;
2296 		struct physdev_pirq_eoi_gmfn eoi_gmfn;
2297 
2298 		pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
2299 		eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
2300 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
2301 		if (rc != 0) {
2302 			free_page((unsigned long) pirq_eoi_map);
2303 			pirq_eoi_map = NULL;
2304 		} else
2305 			pirq_needs_eoi = pirq_check_eoi_map;
2306 	}
2307 #endif
2308 }
2309