xref: /linux/arch/um/kernel/irq.c (revision 2fccfcc0c742625c01e6a3913f4fc2d330541fbb)
10d1fb0a4SAlex Dewar // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3ff6a1798SAnton Ivanov  * Copyright (C) 2017 - Cambridge Greys Ltd
4ff6a1798SAnton Ivanov  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5ba180fd4SJeff Dike  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
61da177e4SLinus Torvalds  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
71da177e4SLinus Torvalds  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
1037185b33SAl Viro #include <linux/cpumask.h>
1137185b33SAl Viro #include <linux/hardirq.h>
1237185b33SAl Viro #include <linux/interrupt.h>
1337185b33SAl Viro #include <linux/kernel_stat.h>
1437185b33SAl Viro #include <linux/module.h>
1537185b33SAl Viro #include <linux/sched.h>
1637185b33SAl Viro #include <linux/seq_file.h>
1737185b33SAl Viro #include <linux/slab.h>
1837185b33SAl Viro #include <as-layout.h>
1937185b33SAl Viro #include <kern_util.h>
2037185b33SAl Viro #include <os.h>
21ff6a1798SAnton Ivanov #include <irq_user.h>
2236d46a59SJohannes Berg #include <irq_kern.h>
231da177e4SLinus Torvalds 
24ff6a1798SAnton Ivanov 
25bebe4681SJouni Malinen extern void free_irqs(void);
26bebe4681SJouni Malinen 
27ff6a1798SAnton Ivanov /* When epoll triggers we do not know why it did so
28ff6a1798SAnton Ivanov  * we can also have different IRQs for read and write.
29458e1f7dSJohannes Berg  * This is why we keep a small irq_reg array for each fd -
30ff6a1798SAnton Ivanov  * one entry per IRQ type
31d973a77bSJeff Dike  */
329b4f018dSJeff Dike 
33458e1f7dSJohannes Berg struct irq_reg {
34458e1f7dSJohannes Berg 	void *id;
35*2fccfcc0SJohannes Berg 	enum um_irq_type type;
36458e1f7dSJohannes Berg 	int irq;
37458e1f7dSJohannes Berg 	int events;
38458e1f7dSJohannes Berg 	bool active;
39458e1f7dSJohannes Berg 	bool pending;
40458e1f7dSJohannes Berg 	bool purge;
41458e1f7dSJohannes Berg };
42458e1f7dSJohannes Berg 
43ff6a1798SAnton Ivanov struct irq_entry {
44ff6a1798SAnton Ivanov 	struct irq_entry *next;
45ff6a1798SAnton Ivanov 	int fd;
460737402fSJohannes Berg 	struct irq_reg *irq_array[NUM_IRQ_TYPES];
47ff6a1798SAnton Ivanov };
489b4f018dSJeff Dike 
49ff6a1798SAnton Ivanov static struct irq_entry *active_fds;
509b4f018dSJeff Dike 
51bfaafd71SJeff Dike static DEFINE_SPINLOCK(irq_lock);
5236d46a59SJohannes Berg static DECLARE_BITMAP(irqs_allocated, NR_IRQS);
53bfaafd71SJeff Dike 
54458e1f7dSJohannes Berg static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
55ff6a1798SAnton Ivanov {
56ff6a1798SAnton Ivanov /*
57ff6a1798SAnton Ivanov  * irq->active guards against reentry
58ff6a1798SAnton Ivanov  * irq->pending accumulates pending requests
59ff6a1798SAnton Ivanov  * if pending is raised the irq_handler is re-run
60ff6a1798SAnton Ivanov  * until pending is cleared
61ff6a1798SAnton Ivanov  */
62ff6a1798SAnton Ivanov 	if (irq->active) {
63ff6a1798SAnton Ivanov 		irq->active = false;
64ff6a1798SAnton Ivanov 		do {
65ff6a1798SAnton Ivanov 			irq->pending = false;
66ff6a1798SAnton Ivanov 			do_IRQ(irq->irq, regs);
67ff6a1798SAnton Ivanov 		} while (irq->pending && (!irq->purge));
68ff6a1798SAnton Ivanov 		if (!irq->purge)
69ff6a1798SAnton Ivanov 			irq->active = true;
70ff6a1798SAnton Ivanov 	} else {
71ff6a1798SAnton Ivanov 		irq->pending = true;
72ff6a1798SAnton Ivanov 	}
73ff6a1798SAnton Ivanov }
74ff6a1798SAnton Ivanov 
75ff6a1798SAnton Ivanov void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
76ff6a1798SAnton Ivanov {
77ff6a1798SAnton Ivanov 	struct irq_entry *irq_entry;
78458e1f7dSJohannes Berg 	struct irq_reg *irq;
79ff6a1798SAnton Ivanov 
80ff6a1798SAnton Ivanov 	int n, i, j;
81ff6a1798SAnton Ivanov 
82ff6a1798SAnton Ivanov 	while (1) {
83ff6a1798SAnton Ivanov 		/* This is now lockless - epoll keeps back-referencesto the irqs
84ff6a1798SAnton Ivanov 		 * which have trigger it so there is no need to walk the irq
85ff6a1798SAnton Ivanov 		 * list and lock it every time. We avoid locking by turning off
86ff6a1798SAnton Ivanov 		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
87ff6a1798SAnton Ivanov 		 * we do any changes to the actual data structures
88ff6a1798SAnton Ivanov 		 */
89ff6a1798SAnton Ivanov 		n = os_waiting_for_events_epoll();
90ff6a1798SAnton Ivanov 
91ff6a1798SAnton Ivanov 		if (n <= 0) {
92ff6a1798SAnton Ivanov 			if (n == -EINTR)
93ff6a1798SAnton Ivanov 				continue;
94ff6a1798SAnton Ivanov 			else
95ff6a1798SAnton Ivanov 				break;
96ff6a1798SAnton Ivanov 		}
97ff6a1798SAnton Ivanov 
98ff6a1798SAnton Ivanov 		for (i = 0; i < n ; i++) {
99*2fccfcc0SJohannes Berg 			/* Epoll back reference is the entry with 2 irq_reg
100ff6a1798SAnton Ivanov 			 * leaves - one for each irq type.
101ff6a1798SAnton Ivanov 			 */
102ff6a1798SAnton Ivanov 			irq_entry = (struct irq_entry *)
103ff6a1798SAnton Ivanov 				os_epoll_get_data_pointer(i);
1040737402fSJohannes Berg 			for (j = 0; j < NUM_IRQ_TYPES ; j++) {
105ff6a1798SAnton Ivanov 				irq = irq_entry->irq_array[j];
106ff6a1798SAnton Ivanov 				if (irq == NULL)
107ff6a1798SAnton Ivanov 					continue;
108ff6a1798SAnton Ivanov 				if (os_epoll_triggered(i, irq->events) > 0)
109ff6a1798SAnton Ivanov 					irq_io_loop(irq, regs);
110ff6a1798SAnton Ivanov 				if (irq->purge) {
111ff6a1798SAnton Ivanov 					irq_entry->irq_array[j] = NULL;
112ff6a1798SAnton Ivanov 					kfree(irq);
113ff6a1798SAnton Ivanov 				}
114ff6a1798SAnton Ivanov 			}
115ff6a1798SAnton Ivanov 		}
116ff6a1798SAnton Ivanov 	}
117bebe4681SJouni Malinen 
118bebe4681SJouni Malinen 	free_irqs();
119ff6a1798SAnton Ivanov }
120ff6a1798SAnton Ivanov 
121ff6a1798SAnton Ivanov static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
122ff6a1798SAnton Ivanov {
123ff6a1798SAnton Ivanov 	int i;
124ff6a1798SAnton Ivanov 	int events = 0;
125458e1f7dSJohannes Berg 	struct irq_reg *irq;
126ff6a1798SAnton Ivanov 
1270737402fSJohannes Berg 	for (i = 0; i < NUM_IRQ_TYPES ; i++) {
128ff6a1798SAnton Ivanov 		irq = irq_entry->irq_array[i];
129ff6a1798SAnton Ivanov 		if (irq != NULL)
130ff6a1798SAnton Ivanov 			events = irq->events | events;
131ff6a1798SAnton Ivanov 	}
132ff6a1798SAnton Ivanov 	if (events > 0) {
133ff6a1798SAnton Ivanov 	/* os_add_epoll will call os_mod_epoll if this already exists */
134ff6a1798SAnton Ivanov 		return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
135ff6a1798SAnton Ivanov 	}
136ff6a1798SAnton Ivanov 	/* No events - delete */
137ff6a1798SAnton Ivanov 	return os_del_epoll_fd(irq_entry->fd);
138ff6a1798SAnton Ivanov }
139ff6a1798SAnton Ivanov 
140ff6a1798SAnton Ivanov 
141ff6a1798SAnton Ivanov 
142*2fccfcc0SJohannes Berg static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id)
1439b4f018dSJeff Dike {
144458e1f7dSJohannes Berg 	struct irq_reg *new_fd;
145ff6a1798SAnton Ivanov 	struct irq_entry *irq_entry;
146ff6a1798SAnton Ivanov 	int i, err, events;
1479b4f018dSJeff Dike 	unsigned long flags;
1489b4f018dSJeff Dike 
149bf8fde78SJeff Dike 	err = os_set_fd_async(fd);
1509b4f018dSJeff Dike 	if (err < 0)
1519b4f018dSJeff Dike 		goto out;
1529b4f018dSJeff Dike 
153ff6a1798SAnton Ivanov 	spin_lock_irqsave(&irq_lock, flags);
1549b4f018dSJeff Dike 
155ff6a1798SAnton Ivanov 	/* Check if we have an entry for this fd */
1569b4f018dSJeff Dike 
1570f97869dSPaolo 'Blaisorblade' Giarrusso 	err = -EBUSY;
158ff6a1798SAnton Ivanov 	for (irq_entry = active_fds;
159ff6a1798SAnton Ivanov 		irq_entry != NULL; irq_entry = irq_entry->next) {
160ff6a1798SAnton Ivanov 		if (irq_entry->fd == fd)
161ff6a1798SAnton Ivanov 			break;
162ff6a1798SAnton Ivanov 	}
163ff6a1798SAnton Ivanov 
164ff6a1798SAnton Ivanov 	if (irq_entry == NULL) {
165ff6a1798SAnton Ivanov 		/* This needs to be atomic as it may be called from an
166ff6a1798SAnton Ivanov 		 * IRQ context.
167ff6a1798SAnton Ivanov 		 */
168ff6a1798SAnton Ivanov 		irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
169ff6a1798SAnton Ivanov 		if (irq_entry == NULL) {
170ff6a1798SAnton Ivanov 			printk(KERN_ERR
171ff6a1798SAnton Ivanov 				"Failed to allocate new IRQ entry\n");
1729b4f018dSJeff Dike 			goto out_unlock;
1739b4f018dSJeff Dike 		}
174ff6a1798SAnton Ivanov 		irq_entry->fd = fd;
1750737402fSJohannes Berg 		for (i = 0; i < NUM_IRQ_TYPES; i++)
176ff6a1798SAnton Ivanov 			irq_entry->irq_array[i] = NULL;
177ff6a1798SAnton Ivanov 		irq_entry->next = active_fds;
178ff6a1798SAnton Ivanov 		active_fds = irq_entry;
1799b4f018dSJeff Dike 	}
1809b4f018dSJeff Dike 
181ff6a1798SAnton Ivanov 	/* Check if we are trying to re-register an interrupt for a
182ff6a1798SAnton Ivanov 	 * particular fd
1839b4f018dSJeff Dike 	 */
1849b4f018dSJeff Dike 
185ff6a1798SAnton Ivanov 	if (irq_entry->irq_array[type] != NULL) {
186ff6a1798SAnton Ivanov 		printk(KERN_ERR
187ff6a1798SAnton Ivanov 			"Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
188ff6a1798SAnton Ivanov 			irq, fd, type, dev_id
189ff6a1798SAnton Ivanov 		);
190ff6a1798SAnton Ivanov 		goto out_unlock;
191ff6a1798SAnton Ivanov 	} else {
192ff6a1798SAnton Ivanov 		/* New entry for this fd */
1939b4f018dSJeff Dike 
194ff6a1798SAnton Ivanov 		err = -ENOMEM;
195458e1f7dSJohannes Berg 		new_fd = kmalloc(sizeof(struct irq_reg), GFP_ATOMIC);
196ff6a1798SAnton Ivanov 		if (new_fd == NULL)
197ff6a1798SAnton Ivanov 			goto out_unlock;
198ff6a1798SAnton Ivanov 
199ff6a1798SAnton Ivanov 		events = os_event_mask(type);
200ff6a1798SAnton Ivanov 
201458e1f7dSJohannes Berg 		*new_fd = ((struct irq_reg) {
202ff6a1798SAnton Ivanov 			.id		= dev_id,
203ff6a1798SAnton Ivanov 			.irq		= irq,
204ff6a1798SAnton Ivanov 			.type		= type,
205ff6a1798SAnton Ivanov 			.events		= events,
206ff6a1798SAnton Ivanov 			.active		= true,
207ff6a1798SAnton Ivanov 			.pending	= false,
208ff6a1798SAnton Ivanov 			.purge		= false
209ff6a1798SAnton Ivanov 		});
210ff6a1798SAnton Ivanov 		/* Turn off any IO on this fd - allows us to
211ff6a1798SAnton Ivanov 		 * avoid locking the IRQ loop
212ff6a1798SAnton Ivanov 		 */
213ff6a1798SAnton Ivanov 		os_del_epoll_fd(irq_entry->fd);
214ff6a1798SAnton Ivanov 		irq_entry->irq_array[type] = new_fd;
2159b4f018dSJeff Dike 	}
2169b4f018dSJeff Dike 
217ff6a1798SAnton Ivanov 	/* Turn back IO on with the correct (new) IO event mask */
218ff6a1798SAnton Ivanov 	assign_epoll_events_to_irq(irq_entry);
219bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
220*2fccfcc0SJohannes Berg 	maybe_sigio_broken(fd);
2219b4f018dSJeff Dike 
22219bdf040SJeff Dike 	return 0;
2239b4f018dSJeff Dike out_unlock:
224bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
2259b4f018dSJeff Dike out:
22619bdf040SJeff Dike 	return err;
2279b4f018dSJeff Dike }
2289b4f018dSJeff Dike 
229ff6a1798SAnton Ivanov /*
230ff6a1798SAnton Ivanov  * Walk the IRQ list and dispose of any unused entries.
231ff6a1798SAnton Ivanov  * Should be done under irq_lock.
232ff6a1798SAnton Ivanov  */
2339b4f018dSJeff Dike 
234ff6a1798SAnton Ivanov static void garbage_collect_irq_entries(void)
235ff6a1798SAnton Ivanov {
236ff6a1798SAnton Ivanov 	int i;
237ff6a1798SAnton Ivanov 	bool reap;
238ff6a1798SAnton Ivanov 	struct irq_entry *walk;
239ff6a1798SAnton Ivanov 	struct irq_entry *previous = NULL;
240ff6a1798SAnton Ivanov 	struct irq_entry *to_free;
241ff6a1798SAnton Ivanov 
242ff6a1798SAnton Ivanov 	if (active_fds == NULL)
243ff6a1798SAnton Ivanov 		return;
244ff6a1798SAnton Ivanov 	walk = active_fds;
245ff6a1798SAnton Ivanov 	while (walk != NULL) {
246ff6a1798SAnton Ivanov 		reap = true;
2470737402fSJohannes Berg 		for (i = 0; i < NUM_IRQ_TYPES ; i++) {
248ff6a1798SAnton Ivanov 			if (walk->irq_array[i] != NULL) {
249ff6a1798SAnton Ivanov 				reap = false;
250ff6a1798SAnton Ivanov 				break;
251ff6a1798SAnton Ivanov 			}
252ff6a1798SAnton Ivanov 		}
253ff6a1798SAnton Ivanov 		if (reap) {
254ff6a1798SAnton Ivanov 			if (previous == NULL)
255ff6a1798SAnton Ivanov 				active_fds = walk->next;
256ff6a1798SAnton Ivanov 			else
257ff6a1798SAnton Ivanov 				previous->next = walk->next;
258ff6a1798SAnton Ivanov 			to_free = walk;
259ff6a1798SAnton Ivanov 		} else {
260ff6a1798SAnton Ivanov 			to_free = NULL;
261ff6a1798SAnton Ivanov 		}
262ff6a1798SAnton Ivanov 		walk = walk->next;
263ff6a1798SAnton Ivanov 		kfree(to_free);
264ff6a1798SAnton Ivanov 	}
2659b4f018dSJeff Dike }
2669b4f018dSJeff Dike 
267ff6a1798SAnton Ivanov /*
268ff6a1798SAnton Ivanov  * Walk the IRQ list and get the descriptor for our FD
269ff6a1798SAnton Ivanov  */
2709b4f018dSJeff Dike 
271ff6a1798SAnton Ivanov static struct irq_entry *get_irq_entry_by_fd(int fd)
2729b4f018dSJeff Dike {
273ff6a1798SAnton Ivanov 	struct irq_entry *walk = active_fds;
2749b4f018dSJeff Dike 
275ff6a1798SAnton Ivanov 	while (walk != NULL) {
276ff6a1798SAnton Ivanov 		if (walk->fd == fd)
277ff6a1798SAnton Ivanov 			return walk;
278ff6a1798SAnton Ivanov 		walk = walk->next;
279ff6a1798SAnton Ivanov 	}
280ff6a1798SAnton Ivanov 	return NULL;
2819b4f018dSJeff Dike }
2829b4f018dSJeff Dike 
2839b4f018dSJeff Dike 
284ff6a1798SAnton Ivanov /*
285ff6a1798SAnton Ivanov  * Walk the IRQ list and dispose of an entry for a specific
286458e1f7dSJohannes Berg  * device and number. Note - if sharing an IRQ for read
287ff6a1798SAnton Ivanov  * and write for the same FD it will be disposed in either case.
288ff6a1798SAnton Ivanov  * If this behaviour is undesirable use different IRQ ids.
289ff6a1798SAnton Ivanov  */
290ff6a1798SAnton Ivanov 
291ff6a1798SAnton Ivanov #define IGNORE_IRQ 1
292ff6a1798SAnton Ivanov #define IGNORE_DEV (1<<1)
293ff6a1798SAnton Ivanov 
294ff6a1798SAnton Ivanov static void do_free_by_irq_and_dev(
295ff6a1798SAnton Ivanov 	struct irq_entry *irq_entry,
296ff6a1798SAnton Ivanov 	unsigned int irq,
297ff6a1798SAnton Ivanov 	void *dev,
298ff6a1798SAnton Ivanov 	int flags
299ff6a1798SAnton Ivanov )
300ff6a1798SAnton Ivanov {
301ff6a1798SAnton Ivanov 	int i;
302458e1f7dSJohannes Berg 	struct irq_reg *to_free;
303ff6a1798SAnton Ivanov 
3040737402fSJohannes Berg 	for (i = 0; i < NUM_IRQ_TYPES ; i++) {
305ff6a1798SAnton Ivanov 		if (irq_entry->irq_array[i] != NULL) {
306ff6a1798SAnton Ivanov 			if (
307ff6a1798SAnton Ivanov 			((flags & IGNORE_IRQ) ||
308ff6a1798SAnton Ivanov 				(irq_entry->irq_array[i]->irq == irq)) &&
309ff6a1798SAnton Ivanov 			((flags & IGNORE_DEV) ||
310ff6a1798SAnton Ivanov 				(irq_entry->irq_array[i]->id == dev))
311ff6a1798SAnton Ivanov 			) {
312ff6a1798SAnton Ivanov 				/* Turn off any IO on this fd - allows us to
313ff6a1798SAnton Ivanov 				 * avoid locking the IRQ loop
314ff6a1798SAnton Ivanov 				 */
315ff6a1798SAnton Ivanov 				os_del_epoll_fd(irq_entry->fd);
316ff6a1798SAnton Ivanov 				to_free = irq_entry->irq_array[i];
317ff6a1798SAnton Ivanov 				irq_entry->irq_array[i] = NULL;
318ff6a1798SAnton Ivanov 				assign_epoll_events_to_irq(irq_entry);
319ff6a1798SAnton Ivanov 				if (to_free->active)
320ff6a1798SAnton Ivanov 					to_free->purge = true;
321ff6a1798SAnton Ivanov 				else
322ff6a1798SAnton Ivanov 					kfree(to_free);
3239b4f018dSJeff Dike 			}
324ff6a1798SAnton Ivanov 		}
325ff6a1798SAnton Ivanov 	}
3269b4f018dSJeff Dike }
3279b4f018dSJeff Dike 
3289b4f018dSJeff Dike void free_irq_by_fd(int fd)
3299b4f018dSJeff Dike {
330ff6a1798SAnton Ivanov 	struct irq_entry *to_free;
331ff6a1798SAnton Ivanov 	unsigned long flags;
332ff6a1798SAnton Ivanov 
333ff6a1798SAnton Ivanov 	spin_lock_irqsave(&irq_lock, flags);
334ff6a1798SAnton Ivanov 	to_free = get_irq_entry_by_fd(fd);
335ff6a1798SAnton Ivanov 	if (to_free != NULL) {
336ff6a1798SAnton Ivanov 		do_free_by_irq_and_dev(
337ff6a1798SAnton Ivanov 			to_free,
338ff6a1798SAnton Ivanov 			-1,
339ff6a1798SAnton Ivanov 			NULL,
340ff6a1798SAnton Ivanov 			IGNORE_IRQ | IGNORE_DEV
341ff6a1798SAnton Ivanov 		);
342ff6a1798SAnton Ivanov 	}
343ff6a1798SAnton Ivanov 	garbage_collect_irq_entries();
344ff6a1798SAnton Ivanov 	spin_unlock_irqrestore(&irq_lock, flags);
3459b4f018dSJeff Dike }
346f1668501SAnton Ivanov EXPORT_SYMBOL(free_irq_by_fd);
3479b4f018dSJeff Dike 
348ff6a1798SAnton Ivanov static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
3499b4f018dSJeff Dike {
350ff6a1798SAnton Ivanov 	struct irq_entry *to_free;
351ff6a1798SAnton Ivanov 	unsigned long flags;
3529b4f018dSJeff Dike 
353ff6a1798SAnton Ivanov 	spin_lock_irqsave(&irq_lock, flags);
354ff6a1798SAnton Ivanov 	to_free = active_fds;
355ff6a1798SAnton Ivanov 	while (to_free != NULL) {
356ff6a1798SAnton Ivanov 		do_free_by_irq_and_dev(
357ff6a1798SAnton Ivanov 			to_free,
358ff6a1798SAnton Ivanov 			irq,
359ff6a1798SAnton Ivanov 			dev,
360ff6a1798SAnton Ivanov 			0
361ff6a1798SAnton Ivanov 		);
362ff6a1798SAnton Ivanov 		to_free = to_free->next;
3639b4f018dSJeff Dike 	}
364ff6a1798SAnton Ivanov 	garbage_collect_irq_entries();
365ff6a1798SAnton Ivanov 	spin_unlock_irqrestore(&irq_lock, flags);
3669b4f018dSJeff Dike }
367ff6a1798SAnton Ivanov 
3689b4f018dSJeff Dike 
3699b4f018dSJeff Dike void deactivate_fd(int fd, int irqnum)
3709b4f018dSJeff Dike {
371ff6a1798SAnton Ivanov 	struct irq_entry *to_free;
3729b4f018dSJeff Dike 	unsigned long flags;
3739b4f018dSJeff Dike 
374ff6a1798SAnton Ivanov 	os_del_epoll_fd(fd);
375bfaafd71SJeff Dike 	spin_lock_irqsave(&irq_lock, flags);
376ff6a1798SAnton Ivanov 	to_free = get_irq_entry_by_fd(fd);
377ff6a1798SAnton Ivanov 	if (to_free != NULL) {
378ff6a1798SAnton Ivanov 		do_free_by_irq_and_dev(
379ff6a1798SAnton Ivanov 			to_free,
380ff6a1798SAnton Ivanov 			irqnum,
381ff6a1798SAnton Ivanov 			NULL,
382ff6a1798SAnton Ivanov 			IGNORE_DEV
383ff6a1798SAnton Ivanov 		);
38419bdf040SJeff Dike 	}
385ff6a1798SAnton Ivanov 	garbage_collect_irq_entries();
38619bdf040SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
38719bdf040SJeff Dike 	ignore_sigio_fd(fd);
3889b4f018dSJeff Dike }
38973395a00SAl Viro EXPORT_SYMBOL(deactivate_fd);
3909b4f018dSJeff Dike 
391d973a77bSJeff Dike /*
392d973a77bSJeff Dike  * Called just before shutdown in order to provide a clean exec
393d973a77bSJeff Dike  * environment in case the system is rebooting.  No locking because
394d973a77bSJeff Dike  * that would cause a pointless shutdown hang if something hadn't
395d973a77bSJeff Dike  * released the lock.
396d973a77bSJeff Dike  */
3979b4f018dSJeff Dike int deactivate_all_fds(void)
3989b4f018dSJeff Dike {
399ff6a1798SAnton Ivanov 	struct irq_entry *to_free;
4009b4f018dSJeff Dike 
401ff6a1798SAnton Ivanov 	/* Stop IO. The IRQ loop has no lock so this is our
402ff6a1798SAnton Ivanov 	 * only way of making sure we are safe to dispose
403ff6a1798SAnton Ivanov 	 * of all IRQ handlers
404ff6a1798SAnton Ivanov 	 */
4059b4f018dSJeff Dike 	os_set_ioignore();
406ff6a1798SAnton Ivanov 	to_free = active_fds;
407ff6a1798SAnton Ivanov 	while (to_free != NULL) {
408ff6a1798SAnton Ivanov 		do_free_by_irq_and_dev(
409ff6a1798SAnton Ivanov 			to_free,
410ff6a1798SAnton Ivanov 			-1,
411ff6a1798SAnton Ivanov 			NULL,
412ff6a1798SAnton Ivanov 			IGNORE_IRQ | IGNORE_DEV
413ff6a1798SAnton Ivanov 		);
414ff6a1798SAnton Ivanov 		to_free = to_free->next;
415ff6a1798SAnton Ivanov 	}
416c7f04e87SJohannes Berg 	/* don't garbage collect - we can no longer call kfree() here */
417ff6a1798SAnton Ivanov 	os_close_epoll_fd();
418191ef966SJesper Juhl 	return 0;
4199b4f018dSJeff Dike }
4209b4f018dSJeff Dike 
4211da177e4SLinus Torvalds /*
422b60745b9SSimon Arlott  * do_IRQ handles all normal device IRQs (the special
4231da177e4SLinus Torvalds  * SMP cross-CPU interrupts have their own specific
4241da177e4SLinus Torvalds  * handlers).
4251da177e4SLinus Torvalds  */
42677bf4400SJeff Dike unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
4271da177e4SLinus Torvalds {
4287bea96fdSAl Viro 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
4291da177e4SLinus Torvalds 	irq_enter();
430be76d81fSRichard Weinberger 	generic_handle_irq(irq);
4311da177e4SLinus Torvalds 	irq_exit();
4327bea96fdSAl Viro 	set_irq_regs(old_regs);
4331da177e4SLinus Torvalds 	return 1;
4341da177e4SLinus Torvalds }
4351da177e4SLinus Torvalds 
43636d46a59SJohannes Berg void um_free_irq(int irq, void *dev)
437fa7a0449SRichard Weinberger {
43836d46a59SJohannes Berg 	if (WARN(irq < 0 || irq > NR_IRQS, "freeing invalid irq %d", irq))
43936d46a59SJohannes Berg 		return;
44036d46a59SJohannes Berg 
441fa7a0449SRichard Weinberger 	free_irq_by_irq_and_dev(irq, dev);
442fa7a0449SRichard Weinberger 	free_irq(irq, dev);
44336d46a59SJohannes Berg 	clear_bit(irq, irqs_allocated);
444fa7a0449SRichard Weinberger }
445fa7a0449SRichard Weinberger EXPORT_SYMBOL(um_free_irq);
446fa7a0449SRichard Weinberger 
447*2fccfcc0SJohannes Berg int um_request_irq(int irq, int fd, enum um_irq_type type,
448*2fccfcc0SJohannes Berg 		   irq_handler_t handler, unsigned long irqflags,
449*2fccfcc0SJohannes Berg 		   const char *devname, void *dev_id)
4501da177e4SLinus Torvalds {
4511da177e4SLinus Torvalds 	int err;
4521da177e4SLinus Torvalds 
45336d46a59SJohannes Berg 	if (irq == UM_IRQ_ALLOC) {
45436d46a59SJohannes Berg 		int i;
45536d46a59SJohannes Berg 
45636d46a59SJohannes Berg 		for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
45736d46a59SJohannes Berg 			if (!test_and_set_bit(i, irqs_allocated)) {
45836d46a59SJohannes Berg 				irq = i;
45936d46a59SJohannes Berg 				break;
46036d46a59SJohannes Berg 			}
46136d46a59SJohannes Berg 		}
46236d46a59SJohannes Berg 	}
46336d46a59SJohannes Berg 
46436d46a59SJohannes Berg 	if (irq < 0)
46536d46a59SJohannes Berg 		return -ENOSPC;
46636d46a59SJohannes Berg 
4679ac625a3SJeff Dike 	if (fd != -1) {
4689ac625a3SJeff Dike 		err = activate_fd(irq, fd, type, dev_id);
4691da177e4SLinus Torvalds 		if (err)
47036d46a59SJohannes Berg 			goto error;
4711da177e4SLinus Torvalds 	}
4729ac625a3SJeff Dike 
47336d46a59SJohannes Berg 	err = request_irq(irq, handler, irqflags, devname, dev_id);
47436d46a59SJohannes Berg 	if (err < 0)
47536d46a59SJohannes Berg 		goto error;
47636d46a59SJohannes Berg 
47736d46a59SJohannes Berg 	return irq;
47836d46a59SJohannes Berg error:
47936d46a59SJohannes Berg 	clear_bit(irq, irqs_allocated);
48036d46a59SJohannes Berg 	return err;
4819ac625a3SJeff Dike }
4829ac625a3SJeff Dike 
4831da177e4SLinus Torvalds EXPORT_SYMBOL(um_request_irq);
4841da177e4SLinus Torvalds 
485ba180fd4SJeff Dike /*
4861d119aa0SThomas Gleixner  * irq_chip must define at least enable/disable and ack when
4871d119aa0SThomas Gleixner  * the edge handler is used.
488ba180fd4SJeff Dike  */
4891d119aa0SThomas Gleixner static void dummy(struct irq_data *d)
4901da177e4SLinus Torvalds {
4911da177e4SLinus Torvalds }
4921da177e4SLinus Torvalds 
4930ede3c05SJohannes Berg /* This is used for everything other than the timer. */
4946fa851c3SThomas Gleixner static struct irq_chip normal_irq_type = {
495d1ea13c6SThomas Gleixner 	.name = "SIGIO",
4961d119aa0SThomas Gleixner 	.irq_disable = dummy,
4971d119aa0SThomas Gleixner 	.irq_enable = dummy,
4981d119aa0SThomas Gleixner 	.irq_ack = dummy,
49981bab4c3SRichard Weinberger 	.irq_mask = dummy,
50081bab4c3SRichard Weinberger 	.irq_unmask = dummy,
5011da177e4SLinus Torvalds };
5021da177e4SLinus Torvalds 
5030ede3c05SJohannes Berg static struct irq_chip alarm_irq_type = {
5040ede3c05SJohannes Berg 	.name = "SIGALRM",
5051d119aa0SThomas Gleixner 	.irq_disable = dummy,
5061d119aa0SThomas Gleixner 	.irq_enable = dummy,
5071d119aa0SThomas Gleixner 	.irq_ack = dummy,
50881bab4c3SRichard Weinberger 	.irq_mask = dummy,
50981bab4c3SRichard Weinberger 	.irq_unmask = dummy,
5101da177e4SLinus Torvalds };
5111da177e4SLinus Torvalds 
5121da177e4SLinus Torvalds void __init init_IRQ(void)
5131da177e4SLinus Torvalds {
5141da177e4SLinus Torvalds 	int i;
5151da177e4SLinus Torvalds 
5160ede3c05SJohannes Berg 	irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
517ff6a1798SAnton Ivanov 
51836d46a59SJohannes Berg 	for (i = 1; i < NR_IRQS; i++)
5190ebec35fSThomas Gleixner 		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
520ff6a1798SAnton Ivanov 	/* Initialize EPOLL Loop */
521ff6a1798SAnton Ivanov 	os_setup_epoll();
5221da177e4SLinus Torvalds }
5231da177e4SLinus Torvalds 
524c14b8494SJeff Dike /*
525c14b8494SJeff Dike  * IRQ stack entry and exit:
526c14b8494SJeff Dike  *
527c14b8494SJeff Dike  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
528c14b8494SJeff Dike  * and switch over to the IRQ stack after some preparation.  We use
529c14b8494SJeff Dike  * sigaltstack to receive signals on a separate stack from the start.
530c14b8494SJeff Dike  * These two functions make sure the rest of the kernel won't be too
531c14b8494SJeff Dike  * upset by being on a different stack.  The IRQ stack has a
532c14b8494SJeff Dike  * thread_info structure at the bottom so that current et al continue
533c14b8494SJeff Dike  * to work.
534c14b8494SJeff Dike  *
535c14b8494SJeff Dike  * to_irq_stack copies the current task's thread_info to the IRQ stack
536c14b8494SJeff Dike  * thread_info and sets the tasks's stack to point to the IRQ stack.
537c14b8494SJeff Dike  *
538c14b8494SJeff Dike  * from_irq_stack copies the thread_info struct back (flags may have
539c14b8494SJeff Dike  * been modified) and resets the task's stack pointer.
540c14b8494SJeff Dike  *
541c14b8494SJeff Dike  * Tricky bits -
542c14b8494SJeff Dike  *
543c14b8494SJeff Dike  * What happens when two signals race each other?  UML doesn't block
544c14b8494SJeff Dike  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
545c14b8494SJeff Dike  * could arrive while a previous one is still setting up the
546c14b8494SJeff Dike  * thread_info.
547c14b8494SJeff Dike  *
548c14b8494SJeff Dike  * There are three cases -
549c14b8494SJeff Dike  *     The first interrupt on the stack - sets up the thread_info and
550c14b8494SJeff Dike  * handles the interrupt
551c14b8494SJeff Dike  *     A nested interrupt interrupting the copying of the thread_info -
552c14b8494SJeff Dike  * can't handle the interrupt, as the stack is in an unknown state
553c14b8494SJeff Dike  *     A nested interrupt not interrupting the copying of the
554c14b8494SJeff Dike  * thread_info - doesn't do any setup, just handles the interrupt
555c14b8494SJeff Dike  *
556c14b8494SJeff Dike  * The first job is to figure out whether we interrupted stack setup.
557c14b8494SJeff Dike  * This is done by xchging the signal mask with thread_info->pending.
558c14b8494SJeff Dike  * If the value that comes back is zero, then there is no setup in
559c14b8494SJeff Dike  * progress, and the interrupt can be handled.  If the value is
560c14b8494SJeff Dike  * non-zero, then there is stack setup in progress.  In order to have
561c14b8494SJeff Dike  * the interrupt handled, we leave our signal in the mask, and it will
562c14b8494SJeff Dike  * be handled by the upper handler after it has set up the stack.
563c14b8494SJeff Dike  *
564c14b8494SJeff Dike  * Next is to figure out whether we are the outer handler or a nested
565c14b8494SJeff Dike  * one.  As part of setting up the stack, thread_info->real_thread is
566c14b8494SJeff Dike  * set to non-NULL (and is reset to NULL on exit).  This is the
567c14b8494SJeff Dike  * nesting indicator.  If it is non-NULL, then the stack is already
568c14b8494SJeff Dike  * set up and the handler can run.
569c14b8494SJeff Dike  */
570c14b8494SJeff Dike 
571c14b8494SJeff Dike static unsigned long pending_mask;
572c14b8494SJeff Dike 
573508a9274SJeff Dike unsigned long to_irq_stack(unsigned long *mask_out)
574c14b8494SJeff Dike {
575c14b8494SJeff Dike 	struct thread_info *ti;
576c14b8494SJeff Dike 	unsigned long mask, old;
577c14b8494SJeff Dike 	int nested;
578c14b8494SJeff Dike 
579508a9274SJeff Dike 	mask = xchg(&pending_mask, *mask_out);
580c14b8494SJeff Dike 	if (mask != 0) {
581ba180fd4SJeff Dike 		/*
582ba180fd4SJeff Dike 		 * If any interrupts come in at this point, we want to
583c14b8494SJeff Dike 		 * make sure that their bits aren't lost by our
584c14b8494SJeff Dike 		 * putting our bit in.  So, this loop accumulates bits
585c14b8494SJeff Dike 		 * until xchg returns the same value that we put in.
586c14b8494SJeff Dike 		 * When that happens, there were no new interrupts,
587c14b8494SJeff Dike 		 * and pending_mask contains a bit for each interrupt
588c14b8494SJeff Dike 		 * that came in.
589c14b8494SJeff Dike 		 */
590508a9274SJeff Dike 		old = *mask_out;
591c14b8494SJeff Dike 		do {
592c14b8494SJeff Dike 			old |= mask;
593c14b8494SJeff Dike 			mask = xchg(&pending_mask, old);
594c14b8494SJeff Dike 		} while (mask != old);
595c14b8494SJeff Dike 		return 1;
596c14b8494SJeff Dike 	}
597c14b8494SJeff Dike 
598c14b8494SJeff Dike 	ti = current_thread_info();
599c14b8494SJeff Dike 	nested = (ti->real_thread != NULL);
600c14b8494SJeff Dike 	if (!nested) {
601c14b8494SJeff Dike 		struct task_struct *task;
602c14b8494SJeff Dike 		struct thread_info *tti;
603c14b8494SJeff Dike 
604c14b8494SJeff Dike 		task = cpu_tasks[ti->cpu].task;
605c14b8494SJeff Dike 		tti = task_thread_info(task);
606508a9274SJeff Dike 
607c14b8494SJeff Dike 		*ti = *tti;
608c14b8494SJeff Dike 		ti->real_thread = tti;
609c14b8494SJeff Dike 		task->stack = ti;
610c14b8494SJeff Dike 	}
611c14b8494SJeff Dike 
612c14b8494SJeff Dike 	mask = xchg(&pending_mask, 0);
613c14b8494SJeff Dike 	*mask_out |= mask | nested;
614c14b8494SJeff Dike 	return 0;
615c14b8494SJeff Dike }
616c14b8494SJeff Dike 
617c14b8494SJeff Dike unsigned long from_irq_stack(int nested)
618c14b8494SJeff Dike {
619c14b8494SJeff Dike 	struct thread_info *ti, *to;
620c14b8494SJeff Dike 	unsigned long mask;
621c14b8494SJeff Dike 
622c14b8494SJeff Dike 	ti = current_thread_info();
623c14b8494SJeff Dike 
624c14b8494SJeff Dike 	pending_mask = 1;
625c14b8494SJeff Dike 
626c14b8494SJeff Dike 	to = ti->real_thread;
627c14b8494SJeff Dike 	current->stack = to;
628c14b8494SJeff Dike 	ti->real_thread = NULL;
629c14b8494SJeff Dike 	*to = *ti;
630c14b8494SJeff Dike 
631c14b8494SJeff Dike 	mask = xchg(&pending_mask, 0);
632c14b8494SJeff Dike 	return mask & ~1;
633c14b8494SJeff Dike }
634c14b8494SJeff Dike 
635