xref: /linux/arch/um/kernel/irq.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
10d1fb0a4SAlex Dewar // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3ff6a1798SAnton Ivanov  * Copyright (C) 2017 - Cambridge Greys Ltd
4ff6a1798SAnton Ivanov  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5ba180fd4SJeff Dike  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
61da177e4SLinus Torvalds  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
71da177e4SLinus Torvalds  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
1037185b33SAl Viro #include <linux/cpumask.h>
1137185b33SAl Viro #include <linux/hardirq.h>
1237185b33SAl Viro #include <linux/interrupt.h>
1337185b33SAl Viro #include <linux/kernel_stat.h>
1437185b33SAl Viro #include <linux/module.h>
1537185b33SAl Viro #include <linux/sched.h>
1637185b33SAl Viro #include <linux/seq_file.h>
1737185b33SAl Viro #include <linux/slab.h>
1837185b33SAl Viro #include <as-layout.h>
1937185b33SAl Viro #include <kern_util.h>
2037185b33SAl Viro #include <os.h>
21ff6a1798SAnton Ivanov #include <irq_user.h>
2236d46a59SJohannes Berg #include <irq_kern.h>
23c8177abaSJohannes Berg #include <linux/time-internal.h>
241da177e4SLinus Torvalds 
25ff6a1798SAnton Ivanov 
26ff6a1798SAnton Ivanov /* When epoll triggers we do not know why it did so
27ff6a1798SAnton Ivanov  * we can also have different IRQs for read and write.
28458e1f7dSJohannes Berg  * This is why we keep a small irq_reg array for each fd -
29ff6a1798SAnton Ivanov  * one entry per IRQ type
30d973a77bSJeff Dike  */
31458e1f7dSJohannes Berg struct irq_reg {
32458e1f7dSJohannes Berg 	void *id;
33458e1f7dSJohannes Berg 	int irq;
343032b945SJohannes Berg 	/* it's cheaper to store this than to query it */
35458e1f7dSJohannes Berg 	int events;
36458e1f7dSJohannes Berg 	bool active;
37458e1f7dSJohannes Berg 	bool pending;
38a374b7cbSJohannes Berg 	bool wakeup;
39c8177abaSJohannes Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
40c140a5bdSBenjamin Berg 	bool pending_event;
41c8177abaSJohannes Berg 	void (*timetravel_handler)(int, int, void *,
42c8177abaSJohannes Berg 				   struct time_travel_event *);
43c8177abaSJohannes Berg 	struct time_travel_event event;
44c8177abaSJohannes Berg #endif
45458e1f7dSJohannes Berg };
46458e1f7dSJohannes Berg 
47ff6a1798SAnton Ivanov struct irq_entry {
483032b945SJohannes Berg 	struct list_head list;
49ff6a1798SAnton Ivanov 	int fd;
503032b945SJohannes Berg 	struct irq_reg reg[NUM_IRQ_TYPES];
51a374b7cbSJohannes Berg 	bool suspended;
52cae20ba0SJohannes Berg 	bool sigio_workaround;
53ff6a1798SAnton Ivanov };
549b4f018dSJeff Dike 
55bfaafd71SJeff Dike static DEFINE_SPINLOCK(irq_lock);
563032b945SJohannes Berg static LIST_HEAD(active_fds);
5768f5d3f3SJohannes Berg static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
58c8177abaSJohannes Berg static bool irqs_suspended;
59c140a5bdSBenjamin Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
60c140a5bdSBenjamin Berg static bool irqs_pending;
61c140a5bdSBenjamin Berg #endif
62bfaafd71SJeff Dike 
irq_io_loop(struct irq_reg * irq,struct uml_pt_regs * regs)63458e1f7dSJohannes Berg static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
64ff6a1798SAnton Ivanov {
65ff6a1798SAnton Ivanov /*
66ff6a1798SAnton Ivanov  * irq->active guards against reentry
67ff6a1798SAnton Ivanov  * irq->pending accumulates pending requests
68ff6a1798SAnton Ivanov  * if pending is raised the irq_handler is re-run
69ff6a1798SAnton Ivanov  * until pending is cleared
70ff6a1798SAnton Ivanov  */
71ff6a1798SAnton Ivanov 	if (irq->active) {
72ff6a1798SAnton Ivanov 		irq->active = false;
733032b945SJohannes Berg 
74ff6a1798SAnton Ivanov 		do {
75ff6a1798SAnton Ivanov 			irq->pending = false;
76ff6a1798SAnton Ivanov 			do_IRQ(irq->irq, regs);
773032b945SJohannes Berg 		} while (irq->pending);
783032b945SJohannes Berg 
79ff6a1798SAnton Ivanov 		irq->active = true;
80ff6a1798SAnton Ivanov 	} else {
81ff6a1798SAnton Ivanov 		irq->pending = true;
82ff6a1798SAnton Ivanov 	}
83ff6a1798SAnton Ivanov }
84ff6a1798SAnton Ivanov 
85c8177abaSJohannes Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
irq_event_handler(struct time_travel_event * ev)86c8177abaSJohannes Berg static void irq_event_handler(struct time_travel_event *ev)
87a374b7cbSJohannes Berg {
88c8177abaSJohannes Berg 	struct irq_reg *reg = container_of(ev, struct irq_reg, event);
89c8177abaSJohannes Berg 
90c140a5bdSBenjamin Berg 	/* do nothing if suspended; just cause a wakeup and mark as pending */
91c140a5bdSBenjamin Berg 	if (irqs_suspended) {
92c140a5bdSBenjamin Berg 		irqs_pending = true;
93c140a5bdSBenjamin Berg 		reg->pending_event = true;
94c8177abaSJohannes Berg 		return;
95c140a5bdSBenjamin Berg 	}
96c8177abaSJohannes Berg 
97c8177abaSJohannes Berg 	generic_handle_irq(reg->irq);
98c8177abaSJohannes Berg }
99c8177abaSJohannes Berg 
irq_do_timetravel_handler(struct irq_entry * entry,enum um_irq_type t)100c8177abaSJohannes Berg static bool irq_do_timetravel_handler(struct irq_entry *entry,
101c8177abaSJohannes Berg 				      enum um_irq_type t)
102c8177abaSJohannes Berg {
103c8177abaSJohannes Berg 	struct irq_reg *reg = &entry->reg[t];
104c8177abaSJohannes Berg 
105c8177abaSJohannes Berg 	if (!reg->timetravel_handler)
106c8177abaSJohannes Berg 		return false;
107c8177abaSJohannes Berg 
108a5ab7c84SJohannes Berg 	/*
109a5ab7c84SJohannes Berg 	 * Handle all messages - we might get multiple even while
110a5ab7c84SJohannes Berg 	 * interrupts are already suspended, due to suspend order
111a5ab7c84SJohannes Berg 	 * etc. Note that time_travel_add_irq_event() will not add
112a5ab7c84SJohannes Berg 	 * an event twice, if it's pending already "first wins".
113a5ab7c84SJohannes Berg 	 */
114c8177abaSJohannes Berg 	reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
115c8177abaSJohannes Berg 
116c8177abaSJohannes Berg 	if (!reg->event.pending)
117c8177abaSJohannes Berg 		return false;
118c8177abaSJohannes Berg 
119c8177abaSJohannes Berg 	return true;
120c8177abaSJohannes Berg }
121c140a5bdSBenjamin Berg 
irq_do_pending_events(bool timetravel_handlers_only)122c140a5bdSBenjamin Berg static void irq_do_pending_events(bool timetravel_handlers_only)
123c140a5bdSBenjamin Berg {
124c140a5bdSBenjamin Berg 	struct irq_entry *entry;
125c140a5bdSBenjamin Berg 
126c140a5bdSBenjamin Berg 	if (!irqs_pending || timetravel_handlers_only)
127c140a5bdSBenjamin Berg 		return;
128c140a5bdSBenjamin Berg 
129c140a5bdSBenjamin Berg 	irqs_pending = false;
130c140a5bdSBenjamin Berg 
131c140a5bdSBenjamin Berg 	list_for_each_entry(entry, &active_fds, list) {
132c140a5bdSBenjamin Berg 		enum um_irq_type t;
133c140a5bdSBenjamin Berg 
134c140a5bdSBenjamin Berg 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
135c140a5bdSBenjamin Berg 			struct irq_reg *reg = &entry->reg[t];
136c140a5bdSBenjamin Berg 
137c140a5bdSBenjamin Berg 			/*
138c140a5bdSBenjamin Berg 			 * Any timetravel_handler was invoked already, just
139c140a5bdSBenjamin Berg 			 * directly run the IRQ.
140c140a5bdSBenjamin Berg 			 */
141c140a5bdSBenjamin Berg 			if (reg->pending_event) {
142c140a5bdSBenjamin Berg 				irq_enter();
143c140a5bdSBenjamin Berg 				generic_handle_irq(reg->irq);
144c140a5bdSBenjamin Berg 				irq_exit();
145c140a5bdSBenjamin Berg 				reg->pending_event = false;
146c140a5bdSBenjamin Berg 			}
147c140a5bdSBenjamin Berg 		}
148c140a5bdSBenjamin Berg 	}
149c140a5bdSBenjamin Berg }
150c8177abaSJohannes Berg #else
irq_do_timetravel_handler(struct irq_entry * entry,enum um_irq_type t)151c8177abaSJohannes Berg static bool irq_do_timetravel_handler(struct irq_entry *entry,
152c8177abaSJohannes Berg 				      enum um_irq_type t)
153c8177abaSJohannes Berg {
154c8177abaSJohannes Berg 	return false;
155c8177abaSJohannes Berg }
156c140a5bdSBenjamin Berg 
irq_do_pending_events(bool timetravel_handlers_only)157c140a5bdSBenjamin Berg static void irq_do_pending_events(bool timetravel_handlers_only)
158c140a5bdSBenjamin Berg {
159c140a5bdSBenjamin Berg }
160c8177abaSJohannes Berg #endif
161c8177abaSJohannes Berg 
sigio_reg_handler(int idx,struct irq_entry * entry,enum um_irq_type t,struct uml_pt_regs * regs,bool timetravel_handlers_only)162c8177abaSJohannes Berg static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
163d6b399a0SJohannes Berg 			      struct uml_pt_regs *regs,
164d6b399a0SJohannes Berg 			      bool timetravel_handlers_only)
165c8177abaSJohannes Berg {
166c8177abaSJohannes Berg 	struct irq_reg *reg = &entry->reg[t];
167c8177abaSJohannes Berg 
168c8177abaSJohannes Berg 	if (!reg->events)
169c8177abaSJohannes Berg 		return;
170c8177abaSJohannes Berg 
171c8177abaSJohannes Berg 	if (os_epoll_triggered(idx, reg->events) <= 0)
172c8177abaSJohannes Berg 		return;
173c8177abaSJohannes Berg 
174c8177abaSJohannes Berg 	if (irq_do_timetravel_handler(entry, t))
175c8177abaSJohannes Berg 		return;
176c8177abaSJohannes Berg 
177d6b399a0SJohannes Berg 	/*
178d6b399a0SJohannes Berg 	 * If we're called to only run time-travel handlers then don't
179d6b399a0SJohannes Berg 	 * actually proceed but mark sigio as pending (if applicable).
180d6b399a0SJohannes Berg 	 * For suspend/resume, timetravel_handlers_only may be true
181d6b399a0SJohannes Berg 	 * despite time-travel not being configured and used.
182d6b399a0SJohannes Berg 	 */
183d6b399a0SJohannes Berg 	if (timetravel_handlers_only) {
184d6b399a0SJohannes Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
185c140a5bdSBenjamin Berg 		reg->pending_event = true;
186c140a5bdSBenjamin Berg 		irqs_pending = true;
187d6b399a0SJohannes Berg 		mark_sigio_pending();
188d6b399a0SJohannes Berg #endif
189c8177abaSJohannes Berg 		return;
190d6b399a0SJohannes Berg 	}
191c8177abaSJohannes Berg 
192c8177abaSJohannes Berg 	irq_io_loop(reg, regs);
193a374b7cbSJohannes Berg }
194a374b7cbSJohannes Berg 
_sigio_handler(struct uml_pt_regs * regs,bool timetravel_handlers_only)195d6b399a0SJohannes Berg static void _sigio_handler(struct uml_pt_regs *regs,
196d6b399a0SJohannes Berg 			   bool timetravel_handlers_only)
197ff6a1798SAnton Ivanov {
198ff6a1798SAnton Ivanov 	struct irq_entry *irq_entry;
1993032b945SJohannes Berg 	int n, i;
200ff6a1798SAnton Ivanov 
201d6b399a0SJohannes Berg 	if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
202c8177abaSJohannes Berg 		return;
203c8177abaSJohannes Berg 
204c140a5bdSBenjamin Berg 	/* Flush out pending events that were ignored due to time-travel. */
205c140a5bdSBenjamin Berg 	if (!irqs_suspended)
206c140a5bdSBenjamin Berg 		irq_do_pending_events(timetravel_handlers_only);
207c140a5bdSBenjamin Berg 
208ff6a1798SAnton Ivanov 	while (1) {
209ff6a1798SAnton Ivanov 		/* This is now lockless - epoll keeps back-referencesto the irqs
210ff6a1798SAnton Ivanov 		 * which have trigger it so there is no need to walk the irq
211ff6a1798SAnton Ivanov 		 * list and lock it every time. We avoid locking by turning off
212ff6a1798SAnton Ivanov 		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
213ff6a1798SAnton Ivanov 		 * we do any changes to the actual data structures
214ff6a1798SAnton Ivanov 		 */
215ff6a1798SAnton Ivanov 		n = os_waiting_for_events_epoll();
216ff6a1798SAnton Ivanov 
217ff6a1798SAnton Ivanov 		if (n <= 0) {
218ff6a1798SAnton Ivanov 			if (n == -EINTR)
219ff6a1798SAnton Ivanov 				continue;
220ff6a1798SAnton Ivanov 			else
221ff6a1798SAnton Ivanov 				break;
222ff6a1798SAnton Ivanov 		}
223ff6a1798SAnton Ivanov 
224ff6a1798SAnton Ivanov 		for (i = 0; i < n ; i++) {
2253032b945SJohannes Berg 			enum um_irq_type t;
2263032b945SJohannes Berg 
2273032b945SJohannes Berg 			irq_entry = os_epoll_get_data_pointer(i);
2283032b945SJohannes Berg 
229c8177abaSJohannes Berg 			for (t = 0; t < NUM_IRQ_TYPES; t++)
230d6b399a0SJohannes Berg 				sigio_reg_handler(i, irq_entry, t, regs,
231d6b399a0SJohannes Berg 						  timetravel_handlers_only);
232ff6a1798SAnton Ivanov 		}
233ff6a1798SAnton Ivanov 	}
234bebe4681SJouni Malinen 
235d6b399a0SJohannes Berg 	if (!timetravel_handlers_only)
236bebe4681SJouni Malinen 		free_irqs();
237ff6a1798SAnton Ivanov }
238ff6a1798SAnton Ivanov 
sigio_handler(int sig,struct siginfo * unused_si,struct uml_pt_regs * regs)239d6b399a0SJohannes Berg void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
240d6b399a0SJohannes Berg {
241*cd01672dSAnton Ivanov 	preempt_disable();
242d6b399a0SJohannes Berg 	_sigio_handler(regs, irqs_suspended);
243*cd01672dSAnton Ivanov 	preempt_enable();
244d6b399a0SJohannes Berg }
245d6b399a0SJohannes Berg 
get_irq_entry_by_fd(int fd)2463032b945SJohannes Berg static struct irq_entry *get_irq_entry_by_fd(int fd)
247ff6a1798SAnton Ivanov {
2483032b945SJohannes Berg 	struct irq_entry *walk;
2493032b945SJohannes Berg 
2503032b945SJohannes Berg 	lockdep_assert_held(&irq_lock);
2513032b945SJohannes Berg 
2523032b945SJohannes Berg 	list_for_each_entry(walk, &active_fds, list) {
2533032b945SJohannes Berg 		if (walk->fd == fd)
2543032b945SJohannes Berg 			return walk;
2553032b945SJohannes Berg 	}
2563032b945SJohannes Berg 
2573032b945SJohannes Berg 	return NULL;
2583032b945SJohannes Berg }
2593032b945SJohannes Berg 
free_irq_entry(struct irq_entry * to_free,bool remove)2603032b945SJohannes Berg static void free_irq_entry(struct irq_entry *to_free, bool remove)
2613032b945SJohannes Berg {
2623032b945SJohannes Berg 	if (!to_free)
2633032b945SJohannes Berg 		return;
2643032b945SJohannes Berg 
2653032b945SJohannes Berg 	if (remove)
2663032b945SJohannes Berg 		os_del_epoll_fd(to_free->fd);
2673032b945SJohannes Berg 	list_del(&to_free->list);
2683032b945SJohannes Berg 	kfree(to_free);
2693032b945SJohannes Berg }
2703032b945SJohannes Berg 
update_irq_entry(struct irq_entry * entry)2713032b945SJohannes Berg static bool update_irq_entry(struct irq_entry *entry)
2723032b945SJohannes Berg {
2733032b945SJohannes Berg 	enum um_irq_type i;
274ff6a1798SAnton Ivanov 	int events = 0;
275ff6a1798SAnton Ivanov 
2763032b945SJohannes Berg 	for (i = 0; i < NUM_IRQ_TYPES; i++)
2773032b945SJohannes Berg 		events |= entry->reg[i].events;
2783032b945SJohannes Berg 
2793032b945SJohannes Berg 	if (events) {
2803032b945SJohannes Berg 		/* will modify (instead of add) if needed */
2813032b945SJohannes Berg 		os_add_epoll_fd(events, entry->fd, entry);
2823032b945SJohannes Berg 		return true;
283ff6a1798SAnton Ivanov 	}
284ff6a1798SAnton Ivanov 
2853032b945SJohannes Berg 	os_del_epoll_fd(entry->fd);
2863032b945SJohannes Berg 	return false;
2873032b945SJohannes Berg }
288ff6a1798SAnton Ivanov 
update_or_free_irq_entry(struct irq_entry * entry)2893032b945SJohannes Berg static void update_or_free_irq_entry(struct irq_entry *entry)
2903032b945SJohannes Berg {
2913032b945SJohannes Berg 	if (!update_irq_entry(entry))
2923032b945SJohannes Berg 		free_irq_entry(entry, false);
2933032b945SJohannes Berg }
294ff6a1798SAnton Ivanov 
activate_fd(int irq,int fd,enum um_irq_type type,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))295c8177abaSJohannes Berg static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
296c8177abaSJohannes Berg 		       void (*timetravel_handler)(int, int, void *,
297c8177abaSJohannes Berg 						  struct time_travel_event *))
2989b4f018dSJeff Dike {
299ff6a1798SAnton Ivanov 	struct irq_entry *irq_entry;
3003032b945SJohannes Berg 	int err, events = os_event_mask(type);
3019b4f018dSJeff Dike 	unsigned long flags;
3029b4f018dSJeff Dike 
303bf8fde78SJeff Dike 	err = os_set_fd_async(fd);
3049b4f018dSJeff Dike 	if (err < 0)
3059b4f018dSJeff Dike 		goto out;
3069b4f018dSJeff Dike 
307ff6a1798SAnton Ivanov 	spin_lock_irqsave(&irq_lock, flags);
3083032b945SJohannes Berg 	irq_entry = get_irq_entry_by_fd(fd);
3093032b945SJohannes Berg 	if (irq_entry) {
3103032b945SJohannes Berg 		/* cannot register the same FD twice with the same type */
3113032b945SJohannes Berg 		if (WARN_ON(irq_entry->reg[type].events)) {
3123032b945SJohannes Berg 			err = -EALREADY;
3133032b945SJohannes Berg 			goto out_unlock;
314ff6a1798SAnton Ivanov 		}
315ff6a1798SAnton Ivanov 
3163032b945SJohannes Berg 		/* temporarily disable to avoid IRQ-side locking */
3173032b945SJohannes Berg 		os_del_epoll_fd(fd);
3183032b945SJohannes Berg 	} else {
3193032b945SJohannes Berg 		irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
3203032b945SJohannes Berg 		if (!irq_entry) {
3213032b945SJohannes Berg 			err = -ENOMEM;
3229b4f018dSJeff Dike 			goto out_unlock;
3239b4f018dSJeff Dike 		}
324ff6a1798SAnton Ivanov 		irq_entry->fd = fd;
3253032b945SJohannes Berg 		list_add_tail(&irq_entry->list, &active_fds);
3262fccfcc0SJohannes Berg 		maybe_sigio_broken(fd);
3273032b945SJohannes Berg 	}
3283032b945SJohannes Berg 
3293032b945SJohannes Berg 	irq_entry->reg[type].id = dev_id;
3303032b945SJohannes Berg 	irq_entry->reg[type].irq = irq;
3313032b945SJohannes Berg 	irq_entry->reg[type].active = true;
3323032b945SJohannes Berg 	irq_entry->reg[type].events = events;
3333032b945SJohannes Berg 
334c8177abaSJohannes Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
335c8177abaSJohannes Berg 	if (um_irq_timetravel_handler_used()) {
336c8177abaSJohannes Berg 		irq_entry->reg[type].timetravel_handler = timetravel_handler;
337c8177abaSJohannes Berg 		irq_entry->reg[type].event.fn = irq_event_handler;
338c8177abaSJohannes Berg 	}
339c8177abaSJohannes Berg #endif
340c8177abaSJohannes Berg 
3413032b945SJohannes Berg 	WARN_ON(!update_irq_entry(irq_entry));
3423032b945SJohannes Berg 	spin_unlock_irqrestore(&irq_lock, flags);
3439b4f018dSJeff Dike 
34419bdf040SJeff Dike 	return 0;
3459b4f018dSJeff Dike out_unlock:
346bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
3479b4f018dSJeff Dike out:
34819bdf040SJeff Dike 	return err;
3499b4f018dSJeff Dike }
3509b4f018dSJeff Dike 
351ff6a1798SAnton Ivanov /*
3523032b945SJohannes Berg  * Remove the entry or entries for a specific FD, if you
3533032b945SJohannes Berg  * don't want to remove all the possible entries then use
3543032b945SJohannes Berg  * um_free_irq() or deactivate_fd() instead.
355ff6a1798SAnton Ivanov  */
free_irq_by_fd(int fd)3569b4f018dSJeff Dike void free_irq_by_fd(int fd)
3579b4f018dSJeff Dike {
358ff6a1798SAnton Ivanov 	struct irq_entry *to_free;
359ff6a1798SAnton Ivanov 	unsigned long flags;
360ff6a1798SAnton Ivanov 
361ff6a1798SAnton Ivanov 	spin_lock_irqsave(&irq_lock, flags);
362ff6a1798SAnton Ivanov 	to_free = get_irq_entry_by_fd(fd);
3633032b945SJohannes Berg 	free_irq_entry(to_free, true);
364ff6a1798SAnton Ivanov 	spin_unlock_irqrestore(&irq_lock, flags);
3659b4f018dSJeff Dike }
366f1668501SAnton Ivanov EXPORT_SYMBOL(free_irq_by_fd);
3679b4f018dSJeff Dike 
free_irq_by_irq_and_dev(unsigned int irq,void * dev)368ff6a1798SAnton Ivanov static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
3699b4f018dSJeff Dike {
3703032b945SJohannes Berg 	struct irq_entry *entry;
371ff6a1798SAnton Ivanov 	unsigned long flags;
3729b4f018dSJeff Dike 
373ff6a1798SAnton Ivanov 	spin_lock_irqsave(&irq_lock, flags);
3743032b945SJohannes Berg 	list_for_each_entry(entry, &active_fds, list) {
3753032b945SJohannes Berg 		enum um_irq_type i;
3763032b945SJohannes Berg 
3773032b945SJohannes Berg 		for (i = 0; i < NUM_IRQ_TYPES; i++) {
3783032b945SJohannes Berg 			struct irq_reg *reg = &entry->reg[i];
3793032b945SJohannes Berg 
3803032b945SJohannes Berg 			if (!reg->events)
3813032b945SJohannes Berg 				continue;
3823032b945SJohannes Berg 			if (reg->irq != irq)
3833032b945SJohannes Berg 				continue;
3843032b945SJohannes Berg 			if (reg->id != dev)
3853032b945SJohannes Berg 				continue;
3863032b945SJohannes Berg 
3873032b945SJohannes Berg 			os_del_epoll_fd(entry->fd);
3883032b945SJohannes Berg 			reg->events = 0;
3893032b945SJohannes Berg 			update_or_free_irq_entry(entry);
3903032b945SJohannes Berg 			goto out;
3919b4f018dSJeff Dike 		}
3923032b945SJohannes Berg 	}
3933032b945SJohannes Berg out:
394ff6a1798SAnton Ivanov 	spin_unlock_irqrestore(&irq_lock, flags);
3959b4f018dSJeff Dike }
396ff6a1798SAnton Ivanov 
deactivate_fd(int fd,int irqnum)3979b4f018dSJeff Dike void deactivate_fd(int fd, int irqnum)
3989b4f018dSJeff Dike {
3993032b945SJohannes Berg 	struct irq_entry *entry;
4009b4f018dSJeff Dike 	unsigned long flags;
4013032b945SJohannes Berg 	enum um_irq_type i;
4029b4f018dSJeff Dike 
403ff6a1798SAnton Ivanov 	os_del_epoll_fd(fd);
4043032b945SJohannes Berg 
405bfaafd71SJeff Dike 	spin_lock_irqsave(&irq_lock, flags);
4063032b945SJohannes Berg 	entry = get_irq_entry_by_fd(fd);
4073032b945SJohannes Berg 	if (!entry)
4083032b945SJohannes Berg 		goto out;
4093032b945SJohannes Berg 
4103032b945SJohannes Berg 	for (i = 0; i < NUM_IRQ_TYPES; i++) {
4113032b945SJohannes Berg 		if (!entry->reg[i].events)
4123032b945SJohannes Berg 			continue;
4133032b945SJohannes Berg 		if (entry->reg[i].irq == irqnum)
4143032b945SJohannes Berg 			entry->reg[i].events = 0;
41519bdf040SJeff Dike 	}
4163032b945SJohannes Berg 
4173032b945SJohannes Berg 	update_or_free_irq_entry(entry);
4183032b945SJohannes Berg out:
41919bdf040SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
4203032b945SJohannes Berg 
42119bdf040SJeff Dike 	ignore_sigio_fd(fd);
4229b4f018dSJeff Dike }
42373395a00SAl Viro EXPORT_SYMBOL(deactivate_fd);
4249b4f018dSJeff Dike 
425d973a77bSJeff Dike /*
426d973a77bSJeff Dike  * Called just before shutdown in order to provide a clean exec
427d973a77bSJeff Dike  * environment in case the system is rebooting.  No locking because
428d973a77bSJeff Dike  * that would cause a pointless shutdown hang if something hadn't
429d973a77bSJeff Dike  * released the lock.
430d973a77bSJeff Dike  */
deactivate_all_fds(void)4319b4f018dSJeff Dike int deactivate_all_fds(void)
4329b4f018dSJeff Dike {
4333032b945SJohannes Berg 	struct irq_entry *entry;
4349b4f018dSJeff Dike 
435ff6a1798SAnton Ivanov 	/* Stop IO. The IRQ loop has no lock so this is our
436ff6a1798SAnton Ivanov 	 * only way of making sure we are safe to dispose
437ff6a1798SAnton Ivanov 	 * of all IRQ handlers
438ff6a1798SAnton Ivanov 	 */
4399b4f018dSJeff Dike 	os_set_ioignore();
4403032b945SJohannes Berg 
4413032b945SJohannes Berg 	/* we can no longer call kfree() here so just deactivate */
4423032b945SJohannes Berg 	list_for_each_entry(entry, &active_fds, list)
4433032b945SJohannes Berg 		os_del_epoll_fd(entry->fd);
444ff6a1798SAnton Ivanov 	os_close_epoll_fd();
445191ef966SJesper Juhl 	return 0;
4469b4f018dSJeff Dike }
4479b4f018dSJeff Dike 
4481da177e4SLinus Torvalds /*
449b60745b9SSimon Arlott  * do_IRQ handles all normal device IRQs (the special
4501da177e4SLinus Torvalds  * SMP cross-CPU interrupts have their own specific
4511da177e4SLinus Torvalds  * handlers).
4521da177e4SLinus Torvalds  */
do_IRQ(int irq,struct uml_pt_regs * regs)45377bf4400SJeff Dike unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
4541da177e4SLinus Torvalds {
4557bea96fdSAl Viro 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
4561da177e4SLinus Torvalds 	irq_enter();
457be76d81fSRichard Weinberger 	generic_handle_irq(irq);
4581da177e4SLinus Torvalds 	irq_exit();
4597bea96fdSAl Viro 	set_irq_regs(old_regs);
4601da177e4SLinus Torvalds 	return 1;
4611da177e4SLinus Torvalds }
4621da177e4SLinus Torvalds 
um_free_irq(int irq,void * dev)46336d46a59SJohannes Berg void um_free_irq(int irq, void *dev)
464fa7a0449SRichard Weinberger {
46568f5d3f3SJohannes Berg 	if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
46668f5d3f3SJohannes Berg 		 "freeing invalid irq %d", irq))
46736d46a59SJohannes Berg 		return;
46836d46a59SJohannes Berg 
469fa7a0449SRichard Weinberger 	free_irq_by_irq_and_dev(irq, dev);
470fa7a0449SRichard Weinberger 	free_irq(irq, dev);
47136d46a59SJohannes Berg 	clear_bit(irq, irqs_allocated);
472fa7a0449SRichard Weinberger }
473fa7a0449SRichard Weinberger EXPORT_SYMBOL(um_free_irq);
474fa7a0449SRichard Weinberger 
475c8177abaSJohannes Berg static int
_um_request_irq(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))476c8177abaSJohannes Berg _um_request_irq(int irq, int fd, enum um_irq_type type,
4772fccfcc0SJohannes Berg 		irq_handler_t handler, unsigned long irqflags,
478c8177abaSJohannes Berg 		const char *devname, void *dev_id,
479c8177abaSJohannes Berg 		void (*timetravel_handler)(int, int, void *,
480c8177abaSJohannes Berg 					   struct time_travel_event *))
4811da177e4SLinus Torvalds {
4821da177e4SLinus Torvalds 	int err;
4831da177e4SLinus Torvalds 
48436d46a59SJohannes Berg 	if (irq == UM_IRQ_ALLOC) {
48536d46a59SJohannes Berg 		int i;
48636d46a59SJohannes Berg 
48736d46a59SJohannes Berg 		for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
48836d46a59SJohannes Berg 			if (!test_and_set_bit(i, irqs_allocated)) {
48936d46a59SJohannes Berg 				irq = i;
49036d46a59SJohannes Berg 				break;
49136d46a59SJohannes Berg 			}
49236d46a59SJohannes Berg 		}
49336d46a59SJohannes Berg 	}
49436d46a59SJohannes Berg 
49536d46a59SJohannes Berg 	if (irq < 0)
49636d46a59SJohannes Berg 		return -ENOSPC;
49736d46a59SJohannes Berg 
4989ac625a3SJeff Dike 	if (fd != -1) {
499c8177abaSJohannes Berg 		err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
5001da177e4SLinus Torvalds 		if (err)
50136d46a59SJohannes Berg 			goto error;
5021da177e4SLinus Torvalds 	}
5039ac625a3SJeff Dike 
50436d46a59SJohannes Berg 	err = request_irq(irq, handler, irqflags, devname, dev_id);
50536d46a59SJohannes Berg 	if (err < 0)
50636d46a59SJohannes Berg 		goto error;
50736d46a59SJohannes Berg 
50836d46a59SJohannes Berg 	return irq;
50936d46a59SJohannes Berg error:
51036d46a59SJohannes Berg 	clear_bit(irq, irqs_allocated);
51136d46a59SJohannes Berg 	return err;
5129ac625a3SJeff Dike }
513c8177abaSJohannes Berg 
um_request_irq(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)514c8177abaSJohannes Berg int um_request_irq(int irq, int fd, enum um_irq_type type,
515c8177abaSJohannes Berg 		   irq_handler_t handler, unsigned long irqflags,
516c8177abaSJohannes Berg 		   const char *devname, void *dev_id)
517c8177abaSJohannes Berg {
518c8177abaSJohannes Berg 	return _um_request_irq(irq, fd, type, handler, irqflags,
519c8177abaSJohannes Berg 			       devname, dev_id, NULL);
520c8177abaSJohannes Berg }
5211da177e4SLinus Torvalds EXPORT_SYMBOL(um_request_irq);
5221da177e4SLinus Torvalds 
523c8177abaSJohannes Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
um_request_irq_tt(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))524c8177abaSJohannes Berg int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
525c8177abaSJohannes Berg 		      irq_handler_t handler, unsigned long irqflags,
526c8177abaSJohannes Berg 		      const char *devname, void *dev_id,
527c8177abaSJohannes Berg 		      void (*timetravel_handler)(int, int, void *,
528c8177abaSJohannes Berg 						 struct time_travel_event *))
529c8177abaSJohannes Berg {
530c8177abaSJohannes Berg 	return _um_request_irq(irq, fd, type, handler, irqflags,
531c8177abaSJohannes Berg 			       devname, dev_id, timetravel_handler);
532c8177abaSJohannes Berg }
533c8177abaSJohannes Berg EXPORT_SYMBOL(um_request_irq_tt);
534d6b399a0SJohannes Berg 
sigio_run_timetravel_handlers(void)535d6b399a0SJohannes Berg void sigio_run_timetravel_handlers(void)
536d6b399a0SJohannes Berg {
537d6b399a0SJohannes Berg 	_sigio_handler(NULL, true);
538d6b399a0SJohannes Berg }
539c8177abaSJohannes Berg #endif
540c8177abaSJohannes Berg 
541a374b7cbSJohannes Berg #ifdef CONFIG_PM_SLEEP
um_irqs_suspend(void)542a374b7cbSJohannes Berg void um_irqs_suspend(void)
543a374b7cbSJohannes Berg {
544a374b7cbSJohannes Berg 	struct irq_entry *entry;
545a374b7cbSJohannes Berg 	unsigned long flags;
546a374b7cbSJohannes Berg 
547c8177abaSJohannes Berg 	irqs_suspended = true;
548a374b7cbSJohannes Berg 
549a374b7cbSJohannes Berg 	spin_lock_irqsave(&irq_lock, flags);
550a374b7cbSJohannes Berg 	list_for_each_entry(entry, &active_fds, list) {
551a374b7cbSJohannes Berg 		enum um_irq_type t;
552c8177abaSJohannes Berg 		bool clear = true;
553a374b7cbSJohannes Berg 
554a374b7cbSJohannes Berg 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
555a374b7cbSJohannes Berg 			if (!entry->reg[t].events)
556a374b7cbSJohannes Berg 				continue;
557a374b7cbSJohannes Berg 
558cae20ba0SJohannes Berg 			/*
559cae20ba0SJohannes Berg 			 * For the SIGIO_WRITE_IRQ, which is used to handle the
560cae20ba0SJohannes Berg 			 * SIGIO workaround thread, we need special handling:
561cae20ba0SJohannes Berg 			 * enable wake for it itself, but below we tell it about
562cae20ba0SJohannes Berg 			 * any FDs that should be suspended.
563cae20ba0SJohannes Berg 			 */
564cae20ba0SJohannes Berg 			if (entry->reg[t].wakeup ||
565c8177abaSJohannes Berg 			    entry->reg[t].irq == SIGIO_WRITE_IRQ
566c8177abaSJohannes Berg #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
567c8177abaSJohannes Berg 			    || entry->reg[t].timetravel_handler
568c8177abaSJohannes Berg #endif
569c8177abaSJohannes Berg 			    ) {
570c8177abaSJohannes Berg 				clear = false;
571a374b7cbSJohannes Berg 				break;
572a374b7cbSJohannes Berg 			}
573a374b7cbSJohannes Berg 		}
574a374b7cbSJohannes Berg 
575c8177abaSJohannes Berg 		if (clear) {
576a374b7cbSJohannes Berg 			entry->suspended = true;
577a374b7cbSJohannes Berg 			os_clear_fd_async(entry->fd);
578cae20ba0SJohannes Berg 			entry->sigio_workaround =
579cae20ba0SJohannes Berg 				!__ignore_sigio_fd(entry->fd);
580a374b7cbSJohannes Berg 		}
581a374b7cbSJohannes Berg 	}
582a374b7cbSJohannes Berg 	spin_unlock_irqrestore(&irq_lock, flags);
583a374b7cbSJohannes Berg }
584a374b7cbSJohannes Berg 
um_irqs_resume(void)585a374b7cbSJohannes Berg void um_irqs_resume(void)
586a374b7cbSJohannes Berg {
587a374b7cbSJohannes Berg 	struct irq_entry *entry;
588a374b7cbSJohannes Berg 	unsigned long flags;
589a374b7cbSJohannes Berg 
590c8177abaSJohannes Berg 
591c140a5bdSBenjamin Berg 	spin_lock_irqsave(&irq_lock, flags);
592a374b7cbSJohannes Berg 	list_for_each_entry(entry, &active_fds, list) {
593a374b7cbSJohannes Berg 		if (entry->suspended) {
594a374b7cbSJohannes Berg 			int err = os_set_fd_async(entry->fd);
595a374b7cbSJohannes Berg 
596a374b7cbSJohannes Berg 			WARN(err < 0, "os_set_fd_async returned %d\n", err);
597a374b7cbSJohannes Berg 			entry->suspended = false;
598cae20ba0SJohannes Berg 
599cae20ba0SJohannes Berg 			if (entry->sigio_workaround) {
600cae20ba0SJohannes Berg 				err = __add_sigio_fd(entry->fd);
601cae20ba0SJohannes Berg 				WARN(err < 0, "add_sigio_returned %d\n", err);
602cae20ba0SJohannes Berg 			}
603a374b7cbSJohannes Berg 		}
604a374b7cbSJohannes Berg 	}
605a374b7cbSJohannes Berg 	spin_unlock_irqrestore(&irq_lock, flags);
606a374b7cbSJohannes Berg 
607c8177abaSJohannes Berg 	irqs_suspended = false;
608a374b7cbSJohannes Berg 	send_sigio_to_self();
609a374b7cbSJohannes Berg }
610a374b7cbSJohannes Berg 
normal_irq_set_wake(struct irq_data * d,unsigned int on)611a374b7cbSJohannes Berg static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
612a374b7cbSJohannes Berg {
613a374b7cbSJohannes Berg 	struct irq_entry *entry;
614a374b7cbSJohannes Berg 	unsigned long flags;
615a374b7cbSJohannes Berg 
616a374b7cbSJohannes Berg 	spin_lock_irqsave(&irq_lock, flags);
617a374b7cbSJohannes Berg 	list_for_each_entry(entry, &active_fds, list) {
618a374b7cbSJohannes Berg 		enum um_irq_type t;
619a374b7cbSJohannes Berg 
620a374b7cbSJohannes Berg 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
621a374b7cbSJohannes Berg 			if (!entry->reg[t].events)
622a374b7cbSJohannes Berg 				continue;
623a374b7cbSJohannes Berg 
624a374b7cbSJohannes Berg 			if (entry->reg[t].irq != d->irq)
625a374b7cbSJohannes Berg 				continue;
626a374b7cbSJohannes Berg 			entry->reg[t].wakeup = on;
627a374b7cbSJohannes Berg 			goto unlock;
628a374b7cbSJohannes Berg 		}
629a374b7cbSJohannes Berg 	}
630a374b7cbSJohannes Berg unlock:
631a374b7cbSJohannes Berg 	spin_unlock_irqrestore(&irq_lock, flags);
632a374b7cbSJohannes Berg 	return 0;
633a374b7cbSJohannes Berg }
634a374b7cbSJohannes Berg #else
635a374b7cbSJohannes Berg #define normal_irq_set_wake NULL
636a374b7cbSJohannes Berg #endif
637a374b7cbSJohannes Berg 
638ba180fd4SJeff Dike /*
6391d119aa0SThomas Gleixner  * irq_chip must define at least enable/disable and ack when
6401d119aa0SThomas Gleixner  * the edge handler is used.
641ba180fd4SJeff Dike  */
dummy(struct irq_data * d)6421d119aa0SThomas Gleixner static void dummy(struct irq_data *d)
6431da177e4SLinus Torvalds {
6441da177e4SLinus Torvalds }
6451da177e4SLinus Torvalds 
6460ede3c05SJohannes Berg /* This is used for everything other than the timer. */
6476fa851c3SThomas Gleixner static struct irq_chip normal_irq_type = {
648d1ea13c6SThomas Gleixner 	.name = "SIGIO",
6491d119aa0SThomas Gleixner 	.irq_disable = dummy,
6501d119aa0SThomas Gleixner 	.irq_enable = dummy,
6511d119aa0SThomas Gleixner 	.irq_ack = dummy,
65281bab4c3SRichard Weinberger 	.irq_mask = dummy,
65381bab4c3SRichard Weinberger 	.irq_unmask = dummy,
654a374b7cbSJohannes Berg 	.irq_set_wake = normal_irq_set_wake,
6551da177e4SLinus Torvalds };
6561da177e4SLinus Torvalds 
6570ede3c05SJohannes Berg static struct irq_chip alarm_irq_type = {
6580ede3c05SJohannes Berg 	.name = "SIGALRM",
6591d119aa0SThomas Gleixner 	.irq_disable = dummy,
6601d119aa0SThomas Gleixner 	.irq_enable = dummy,
6611d119aa0SThomas Gleixner 	.irq_ack = dummy,
66281bab4c3SRichard Weinberger 	.irq_mask = dummy,
66381bab4c3SRichard Weinberger 	.irq_unmask = dummy,
6641da177e4SLinus Torvalds };
6651da177e4SLinus Torvalds 
init_IRQ(void)6661da177e4SLinus Torvalds void __init init_IRQ(void)
6671da177e4SLinus Torvalds {
6681da177e4SLinus Torvalds 	int i;
6691da177e4SLinus Torvalds 
6700ede3c05SJohannes Berg 	irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
671ff6a1798SAnton Ivanov 
67268f5d3f3SJohannes Berg 	for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
6730ebec35fSThomas Gleixner 		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
674ff6a1798SAnton Ivanov 	/* Initialize EPOLL Loop */
675ff6a1798SAnton Ivanov 	os_setup_epoll();
6761da177e4SLinus Torvalds }
6771da177e4SLinus Torvalds 
678c14b8494SJeff Dike /*
679c14b8494SJeff Dike  * IRQ stack entry and exit:
680c14b8494SJeff Dike  *
681c14b8494SJeff Dike  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
682c14b8494SJeff Dike  * and switch over to the IRQ stack after some preparation.  We use
683c14b8494SJeff Dike  * sigaltstack to receive signals on a separate stack from the start.
684c14b8494SJeff Dike  * These two functions make sure the rest of the kernel won't be too
685c14b8494SJeff Dike  * upset by being on a different stack.  The IRQ stack has a
686c14b8494SJeff Dike  * thread_info structure at the bottom so that current et al continue
687c14b8494SJeff Dike  * to work.
688c14b8494SJeff Dike  *
689c14b8494SJeff Dike  * to_irq_stack copies the current task's thread_info to the IRQ stack
690c14b8494SJeff Dike  * thread_info and sets the tasks's stack to point to the IRQ stack.
691c14b8494SJeff Dike  *
692c14b8494SJeff Dike  * from_irq_stack copies the thread_info struct back (flags may have
693c14b8494SJeff Dike  * been modified) and resets the task's stack pointer.
694c14b8494SJeff Dike  *
695c14b8494SJeff Dike  * Tricky bits -
696c14b8494SJeff Dike  *
697c14b8494SJeff Dike  * What happens when two signals race each other?  UML doesn't block
698c14b8494SJeff Dike  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
699c14b8494SJeff Dike  * could arrive while a previous one is still setting up the
700c14b8494SJeff Dike  * thread_info.
701c14b8494SJeff Dike  *
702c14b8494SJeff Dike  * There are three cases -
703c14b8494SJeff Dike  *     The first interrupt on the stack - sets up the thread_info and
704c14b8494SJeff Dike  * handles the interrupt
705c14b8494SJeff Dike  *     A nested interrupt interrupting the copying of the thread_info -
706c14b8494SJeff Dike  * can't handle the interrupt, as the stack is in an unknown state
707c14b8494SJeff Dike  *     A nested interrupt not interrupting the copying of the
708c14b8494SJeff Dike  * thread_info - doesn't do any setup, just handles the interrupt
709c14b8494SJeff Dike  *
710c14b8494SJeff Dike  * The first job is to figure out whether we interrupted stack setup.
711c14b8494SJeff Dike  * This is done by xchging the signal mask with thread_info->pending.
712c14b8494SJeff Dike  * If the value that comes back is zero, then there is no setup in
713c14b8494SJeff Dike  * progress, and the interrupt can be handled.  If the value is
714c14b8494SJeff Dike  * non-zero, then there is stack setup in progress.  In order to have
715c14b8494SJeff Dike  * the interrupt handled, we leave our signal in the mask, and it will
716c14b8494SJeff Dike  * be handled by the upper handler after it has set up the stack.
717c14b8494SJeff Dike  *
718c14b8494SJeff Dike  * Next is to figure out whether we are the outer handler or a nested
719c14b8494SJeff Dike  * one.  As part of setting up the stack, thread_info->real_thread is
720c14b8494SJeff Dike  * set to non-NULL (and is reset to NULL on exit).  This is the
721c14b8494SJeff Dike  * nesting indicator.  If it is non-NULL, then the stack is already
722c14b8494SJeff Dike  * set up and the handler can run.
723c14b8494SJeff Dike  */
724c14b8494SJeff Dike 
725c14b8494SJeff Dike static unsigned long pending_mask;
726c14b8494SJeff Dike 
to_irq_stack(unsigned long * mask_out)727508a9274SJeff Dike unsigned long to_irq_stack(unsigned long *mask_out)
728c14b8494SJeff Dike {
729c14b8494SJeff Dike 	struct thread_info *ti;
730c14b8494SJeff Dike 	unsigned long mask, old;
731c14b8494SJeff Dike 	int nested;
732c14b8494SJeff Dike 
733508a9274SJeff Dike 	mask = xchg(&pending_mask, *mask_out);
734c14b8494SJeff Dike 	if (mask != 0) {
735ba180fd4SJeff Dike 		/*
736ba180fd4SJeff Dike 		 * If any interrupts come in at this point, we want to
737c14b8494SJeff Dike 		 * make sure that their bits aren't lost by our
738c14b8494SJeff Dike 		 * putting our bit in.  So, this loop accumulates bits
739c14b8494SJeff Dike 		 * until xchg returns the same value that we put in.
740c14b8494SJeff Dike 		 * When that happens, there were no new interrupts,
741c14b8494SJeff Dike 		 * and pending_mask contains a bit for each interrupt
742c14b8494SJeff Dike 		 * that came in.
743c14b8494SJeff Dike 		 */
744508a9274SJeff Dike 		old = *mask_out;
745c14b8494SJeff Dike 		do {
746c14b8494SJeff Dike 			old |= mask;
747c14b8494SJeff Dike 			mask = xchg(&pending_mask, old);
748c14b8494SJeff Dike 		} while (mask != old);
749c14b8494SJeff Dike 		return 1;
750c14b8494SJeff Dike 	}
751c14b8494SJeff Dike 
752c14b8494SJeff Dike 	ti = current_thread_info();
753c14b8494SJeff Dike 	nested = (ti->real_thread != NULL);
754c14b8494SJeff Dike 	if (!nested) {
755c14b8494SJeff Dike 		struct task_struct *task;
756c14b8494SJeff Dike 		struct thread_info *tti;
757c14b8494SJeff Dike 
758c14b8494SJeff Dike 		task = cpu_tasks[ti->cpu].task;
759c14b8494SJeff Dike 		tti = task_thread_info(task);
760508a9274SJeff Dike 
761c14b8494SJeff Dike 		*ti = *tti;
762c14b8494SJeff Dike 		ti->real_thread = tti;
763c14b8494SJeff Dike 		task->stack = ti;
764c14b8494SJeff Dike 	}
765c14b8494SJeff Dike 
766c14b8494SJeff Dike 	mask = xchg(&pending_mask, 0);
767c14b8494SJeff Dike 	*mask_out |= mask | nested;
768c14b8494SJeff Dike 	return 0;
769c14b8494SJeff Dike }
770c14b8494SJeff Dike 
from_irq_stack(int nested)771c14b8494SJeff Dike unsigned long from_irq_stack(int nested)
772c14b8494SJeff Dike {
773c14b8494SJeff Dike 	struct thread_info *ti, *to;
774c14b8494SJeff Dike 	unsigned long mask;
775c14b8494SJeff Dike 
776c14b8494SJeff Dike 	ti = current_thread_info();
777c14b8494SJeff Dike 
778c14b8494SJeff Dike 	pending_mask = 1;
779c14b8494SJeff Dike 
780c14b8494SJeff Dike 	to = ti->real_thread;
781c14b8494SJeff Dike 	current->stack = to;
782c14b8494SJeff Dike 	ti->real_thread = NULL;
783c14b8494SJeff Dike 	*to = *ti;
784c14b8494SJeff Dike 
785c14b8494SJeff Dike 	mask = xchg(&pending_mask, 0);
786c14b8494SJeff Dike 	return mask & ~1;
787c14b8494SJeff Dike }
788c14b8494SJeff Dike 
789