xref: /linux/arch/um/kernel/irq.c (revision 81bab4c38ad59b35d1bea2a96ce6bc90570b1392)
11da177e4SLinus Torvalds /*
2ba180fd4SJeff Dike  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
31da177e4SLinus Torvalds  * Licensed under the GPL
41da177e4SLinus Torvalds  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
51da177e4SLinus Torvalds  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
837185b33SAl Viro #include <linux/cpumask.h>
937185b33SAl Viro #include <linux/hardirq.h>
1037185b33SAl Viro #include <linux/interrupt.h>
1137185b33SAl Viro #include <linux/kernel_stat.h>
1237185b33SAl Viro #include <linux/module.h>
1337185b33SAl Viro #include <linux/sched.h>
1437185b33SAl Viro #include <linux/seq_file.h>
1537185b33SAl Viro #include <linux/slab.h>
1637185b33SAl Viro #include <as-layout.h>
1737185b33SAl Viro #include <kern_util.h>
1837185b33SAl Viro #include <os.h>
191da177e4SLinus Torvalds 
201da177e4SLinus Torvalds /*
21d973a77bSJeff Dike  * This list is accessed under irq_lock, except in sigio_handler,
22d973a77bSJeff Dike  * where it is safe from being modified.  IRQ handlers won't change it -
23d973a77bSJeff Dike  * if an IRQ source has vanished, it will be freed by free_irqs just
24d973a77bSJeff Dike  * before returning from sigio_handler.  That will process a separate
25d973a77bSJeff Dike  * list of irqs to free, with its own locking, coming back here to
26d973a77bSJeff Dike  * remove list elements, taking the irq_lock to do so.
27d973a77bSJeff Dike  */
28f2e62992SJeff Dike static struct irq_fd *active_fds = NULL;
299b4f018dSJeff Dike static struct irq_fd **last_irq_ptr = &active_fds;
309b4f018dSJeff Dike 
319b4f018dSJeff Dike extern void free_irqs(void);
329b4f018dSJeff Dike 
33d3c1cfcdSMartin Pärtel void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
349b4f018dSJeff Dike {
359b4f018dSJeff Dike 	struct irq_fd *irq_fd;
369b4f018dSJeff Dike 	int n;
379b4f018dSJeff Dike 
38191ef966SJesper Juhl 	if (smp_sigio_handler())
39191ef966SJesper Juhl 		return;
40191ef966SJesper Juhl 
419b4f018dSJeff Dike 	while (1) {
429b4f018dSJeff Dike 		n = os_waiting_for_events(active_fds);
439b4f018dSJeff Dike 		if (n <= 0) {
44ba180fd4SJeff Dike 			if (n == -EINTR)
45ba180fd4SJeff Dike 				continue;
469b4f018dSJeff Dike 			else break;
479b4f018dSJeff Dike 		}
489b4f018dSJeff Dike 
49ba180fd4SJeff Dike 		for (irq_fd = active_fds; irq_fd != NULL;
50ba180fd4SJeff Dike 		     irq_fd = irq_fd->next) {
519b4f018dSJeff Dike 			if (irq_fd->current_events != 0) {
529b4f018dSJeff Dike 				irq_fd->current_events = 0;
539b4f018dSJeff Dike 				do_IRQ(irq_fd->irq, regs);
549b4f018dSJeff Dike 			}
559b4f018dSJeff Dike 		}
569b4f018dSJeff Dike 	}
579b4f018dSJeff Dike 
589b4f018dSJeff Dike 	free_irqs();
599b4f018dSJeff Dike }
609b4f018dSJeff Dike 
61bfaafd71SJeff Dike static DEFINE_SPINLOCK(irq_lock);
62bfaafd71SJeff Dike 
634c182ae7SWANG Cong static int activate_fd(int irq, int fd, int type, void *dev_id)
649b4f018dSJeff Dike {
659b4f018dSJeff Dike 	struct pollfd *tmp_pfd;
669b4f018dSJeff Dike 	struct irq_fd *new_fd, *irq_fd;
679b4f018dSJeff Dike 	unsigned long flags;
68bf8fde78SJeff Dike 	int events, err, n;
699b4f018dSJeff Dike 
70bf8fde78SJeff Dike 	err = os_set_fd_async(fd);
719b4f018dSJeff Dike 	if (err < 0)
729b4f018dSJeff Dike 		goto out;
739b4f018dSJeff Dike 
749b4f018dSJeff Dike 	err = -ENOMEM;
75f2e62992SJeff Dike 	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
769b4f018dSJeff Dike 	if (new_fd == NULL)
779b4f018dSJeff Dike 		goto out;
789b4f018dSJeff Dike 
79191ef966SJesper Juhl 	if (type == IRQ_READ)
80191ef966SJesper Juhl 		events = UM_POLLIN | UM_POLLPRI;
81ba180fd4SJeff Dike 	else events = UM_POLLOUT;
829b4f018dSJeff Dike 	*new_fd = ((struct irq_fd) { .next  		= NULL,
839b4f018dSJeff Dike 				     .id 		= dev_id,
849b4f018dSJeff Dike 				     .fd 		= fd,
859b4f018dSJeff Dike 				     .type 		= type,
869b4f018dSJeff Dike 				     .irq 		= irq,
879b4f018dSJeff Dike 				     .events 		= events,
889b4f018dSJeff Dike 				     .current_events 	= 0 } );
899b4f018dSJeff Dike 
900f97869dSPaolo 'Blaisorblade' Giarrusso 	err = -EBUSY;
91bfaafd71SJeff Dike 	spin_lock_irqsave(&irq_lock, flags);
929b4f018dSJeff Dike 	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
939b4f018dSJeff Dike 		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
94ba180fd4SJeff Dike 			printk(KERN_ERR "Registering fd %d twice\n", fd);
95ba180fd4SJeff Dike 			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
96ba180fd4SJeff Dike 			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
97ba180fd4SJeff Dike 			       dev_id);
989b4f018dSJeff Dike 			goto out_unlock;
999b4f018dSJeff Dike 		}
1009b4f018dSJeff Dike 	}
1019b4f018dSJeff Dike 
1029b4f018dSJeff Dike 	if (type == IRQ_WRITE)
1039b4f018dSJeff Dike 		fd = -1;
1049b4f018dSJeff Dike 
1059b4f018dSJeff Dike 	tmp_pfd = NULL;
1069b4f018dSJeff Dike 	n = 0;
1079b4f018dSJeff Dike 
1089b4f018dSJeff Dike 	while (1) {
1099b4f018dSJeff Dike 		n = os_create_pollfd(fd, events, tmp_pfd, n);
1109b4f018dSJeff Dike 		if (n == 0)
1119b4f018dSJeff Dike 			break;
1129b4f018dSJeff Dike 
113ba180fd4SJeff Dike 		/*
114ba180fd4SJeff Dike 		 * n > 0
1159b4f018dSJeff Dike 		 * It means we couldn't put new pollfd to current pollfds
1169b4f018dSJeff Dike 		 * and tmp_fds is NULL or too small for new pollfds array.
1179b4f018dSJeff Dike 		 * Needed size is equal to n as minimum.
1189b4f018dSJeff Dike 		 *
1199b4f018dSJeff Dike 		 * Here we have to drop the lock in order to call
1209b4f018dSJeff Dike 		 * kmalloc, which might sleep.
1219b4f018dSJeff Dike 		 * If something else came in and changed the pollfds array
1229b4f018dSJeff Dike 		 * so we will not be able to put new pollfd struct to pollfds
1239b4f018dSJeff Dike 		 * then we free the buffer tmp_fds and try again.
1249b4f018dSJeff Dike 		 */
125bfaafd71SJeff Dike 		spin_unlock_irqrestore(&irq_lock, flags);
1269b4f018dSJeff Dike 		kfree(tmp_pfd);
1279b4f018dSJeff Dike 
128f2e62992SJeff Dike 		tmp_pfd = kmalloc(n, GFP_KERNEL);
1299b4f018dSJeff Dike 		if (tmp_pfd == NULL)
1309b4f018dSJeff Dike 			goto out_kfree;
1319b4f018dSJeff Dike 
132bfaafd71SJeff Dike 		spin_lock_irqsave(&irq_lock, flags);
1339b4f018dSJeff Dike 	}
1349b4f018dSJeff Dike 
1359b4f018dSJeff Dike 	*last_irq_ptr = new_fd;
1369b4f018dSJeff Dike 	last_irq_ptr = &new_fd->next;
1379b4f018dSJeff Dike 
138bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
1399b4f018dSJeff Dike 
140ba180fd4SJeff Dike 	/*
141ba180fd4SJeff Dike 	 * This calls activate_fd, so it has to be outside the critical
1429b4f018dSJeff Dike 	 * section.
1439b4f018dSJeff Dike 	 */
1448e64d96aSJeff Dike 	maybe_sigio_broken(fd, (type == IRQ_READ));
1459b4f018dSJeff Dike 
14619bdf040SJeff Dike 	return 0;
1479b4f018dSJeff Dike 
1489b4f018dSJeff Dike  out_unlock:
149bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
1509b4f018dSJeff Dike  out_kfree:
1519b4f018dSJeff Dike 	kfree(new_fd);
1529b4f018dSJeff Dike  out:
15319bdf040SJeff Dike 	return err;
1549b4f018dSJeff Dike }
1559b4f018dSJeff Dike 
1569b4f018dSJeff Dike static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
1579b4f018dSJeff Dike {
1589b4f018dSJeff Dike 	unsigned long flags;
1599b4f018dSJeff Dike 
160bfaafd71SJeff Dike 	spin_lock_irqsave(&irq_lock, flags);
1619b4f018dSJeff Dike 	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
1639b4f018dSJeff Dike }
1649b4f018dSJeff Dike 
1659b4f018dSJeff Dike struct irq_and_dev {
1669b4f018dSJeff Dike 	int irq;
1679b4f018dSJeff Dike 	void *dev;
1689b4f018dSJeff Dike };
1699b4f018dSJeff Dike 
1709b4f018dSJeff Dike static int same_irq_and_dev(struct irq_fd *irq, void *d)
1719b4f018dSJeff Dike {
1729b4f018dSJeff Dike 	struct irq_and_dev *data = d;
1739b4f018dSJeff Dike 
1749b4f018dSJeff Dike 	return ((irq->irq == data->irq) && (irq->id == data->dev));
1759b4f018dSJeff Dike }
1769b4f018dSJeff Dike 
1774c182ae7SWANG Cong static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
1789b4f018dSJeff Dike {
1799b4f018dSJeff Dike 	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
1809b4f018dSJeff Dike 							  .dev  = dev });
1819b4f018dSJeff Dike 
1829b4f018dSJeff Dike 	free_irq_by_cb(same_irq_and_dev, &data);
1839b4f018dSJeff Dike }
1849b4f018dSJeff Dike 
1859b4f018dSJeff Dike static int same_fd(struct irq_fd *irq, void *fd)
1869b4f018dSJeff Dike {
1879b4f018dSJeff Dike 	return (irq->fd == *((int *)fd));
1889b4f018dSJeff Dike }
1899b4f018dSJeff Dike 
1909b4f018dSJeff Dike void free_irq_by_fd(int fd)
1919b4f018dSJeff Dike {
1929b4f018dSJeff Dike 	free_irq_by_cb(same_fd, &fd);
1939b4f018dSJeff Dike }
1949b4f018dSJeff Dike 
195d973a77bSJeff Dike /* Must be called with irq_lock held */
1969b4f018dSJeff Dike static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
1979b4f018dSJeff Dike {
1989b4f018dSJeff Dike 	struct irq_fd *irq;
1999b4f018dSJeff Dike 	int i = 0;
2009b4f018dSJeff Dike 	int fdi;
2019b4f018dSJeff Dike 
2029b4f018dSJeff Dike 	for (irq = active_fds; irq != NULL; irq = irq->next) {
203191ef966SJesper Juhl 		if ((irq->fd == fd) && (irq->irq == irqnum))
204191ef966SJesper Juhl 			break;
2059b4f018dSJeff Dike 		i++;
2069b4f018dSJeff Dike 	}
2079b4f018dSJeff Dike 	if (irq == NULL) {
208ba180fd4SJeff Dike 		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209ba180fd4SJeff Dike 		       fd);
2109b4f018dSJeff Dike 		goto out;
2119b4f018dSJeff Dike 	}
2129b4f018dSJeff Dike 	fdi = os_get_pollfd(i);
2139b4f018dSJeff Dike 	if ((fdi != -1) && (fdi != fd)) {
214ba180fd4SJeff Dike 		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215ba180fd4SJeff Dike 		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
2169b4f018dSJeff Dike 		       fdi, fd);
2179b4f018dSJeff Dike 		irq = NULL;
2189b4f018dSJeff Dike 		goto out;
2199b4f018dSJeff Dike 	}
2209b4f018dSJeff Dike 	*index_out = i;
2219b4f018dSJeff Dike  out:
222191ef966SJesper Juhl 	return irq;
2239b4f018dSJeff Dike }
2249b4f018dSJeff Dike 
2259b4f018dSJeff Dike void reactivate_fd(int fd, int irqnum)
2269b4f018dSJeff Dike {
2279b4f018dSJeff Dike 	struct irq_fd *irq;
2289b4f018dSJeff Dike 	unsigned long flags;
2299b4f018dSJeff Dike 	int i;
2309b4f018dSJeff Dike 
231bfaafd71SJeff Dike 	spin_lock_irqsave(&irq_lock, flags);
2329b4f018dSJeff Dike 	irq = find_irq_by_fd(fd, irqnum, &i);
2339b4f018dSJeff Dike 	if (irq == NULL) {
234bfaafd71SJeff Dike 		spin_unlock_irqrestore(&irq_lock, flags);
2359b4f018dSJeff Dike 		return;
2369b4f018dSJeff Dike 	}
2379b4f018dSJeff Dike 	os_set_pollfd(i, irq->fd);
238bfaafd71SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
2399b4f018dSJeff Dike 
24019bdf040SJeff Dike 	add_sigio_fd(fd);
2419b4f018dSJeff Dike }
2429b4f018dSJeff Dike 
2439b4f018dSJeff Dike void deactivate_fd(int fd, int irqnum)
2449b4f018dSJeff Dike {
2459b4f018dSJeff Dike 	struct irq_fd *irq;
2469b4f018dSJeff Dike 	unsigned long flags;
2479b4f018dSJeff Dike 	int i;
2489b4f018dSJeff Dike 
249bfaafd71SJeff Dike 	spin_lock_irqsave(&irq_lock, flags);
2509b4f018dSJeff Dike 	irq = find_irq_by_fd(fd, irqnum, &i);
25119bdf040SJeff Dike 	if (irq == NULL) {
252bfaafd71SJeff Dike 		spin_unlock_irqrestore(&irq_lock, flags);
25319bdf040SJeff Dike 		return;
25419bdf040SJeff Dike 	}
25519bdf040SJeff Dike 
25619bdf040SJeff Dike 	os_set_pollfd(i, -1);
25719bdf040SJeff Dike 	spin_unlock_irqrestore(&irq_lock, flags);
25819bdf040SJeff Dike 
25919bdf040SJeff Dike 	ignore_sigio_fd(fd);
2609b4f018dSJeff Dike }
26173395a00SAl Viro EXPORT_SYMBOL(deactivate_fd);
2629b4f018dSJeff Dike 
263d973a77bSJeff Dike /*
264d973a77bSJeff Dike  * Called just before shutdown in order to provide a clean exec
265d973a77bSJeff Dike  * environment in case the system is rebooting.  No locking because
266d973a77bSJeff Dike  * that would cause a pointless shutdown hang if something hadn't
267d973a77bSJeff Dike  * released the lock.
268d973a77bSJeff Dike  */
2699b4f018dSJeff Dike int deactivate_all_fds(void)
2709b4f018dSJeff Dike {
2719b4f018dSJeff Dike 	struct irq_fd *irq;
2729b4f018dSJeff Dike 	int err;
2739b4f018dSJeff Dike 
2749b4f018dSJeff Dike 	for (irq = active_fds; irq != NULL; irq = irq->next) {
2759b4f018dSJeff Dike 		err = os_clear_fd_async(irq->fd);
2769b4f018dSJeff Dike 		if (err)
277191ef966SJesper Juhl 			return err;
2789b4f018dSJeff Dike 	}
2799b4f018dSJeff Dike 	/* If there is a signal already queued, after unblocking ignore it */
2809b4f018dSJeff Dike 	os_set_ioignore();
2819b4f018dSJeff Dike 
282191ef966SJesper Juhl 	return 0;
2839b4f018dSJeff Dike }
2849b4f018dSJeff Dike 
2851da177e4SLinus Torvalds /*
286b60745b9SSimon Arlott  * do_IRQ handles all normal device IRQs (the special
2871da177e4SLinus Torvalds  * SMP cross-CPU interrupts have their own specific
2881da177e4SLinus Torvalds  * handlers).
2891da177e4SLinus Torvalds  */
29077bf4400SJeff Dike unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
2911da177e4SLinus Torvalds {
2927bea96fdSAl Viro 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
2931da177e4SLinus Torvalds 	irq_enter();
294be76d81fSRichard Weinberger 	generic_handle_irq(irq);
2951da177e4SLinus Torvalds 	irq_exit();
2967bea96fdSAl Viro 	set_irq_regs(old_regs);
2971da177e4SLinus Torvalds 	return 1;
2981da177e4SLinus Torvalds }
2991da177e4SLinus Torvalds 
300fa7a0449SRichard Weinberger void um_free_irq(unsigned int irq, void *dev)
301fa7a0449SRichard Weinberger {
302fa7a0449SRichard Weinberger 	free_irq_by_irq_and_dev(irq, dev);
303fa7a0449SRichard Weinberger 	free_irq(irq, dev);
304fa7a0449SRichard Weinberger }
305fa7a0449SRichard Weinberger EXPORT_SYMBOL(um_free_irq);
306fa7a0449SRichard Weinberger 
3071da177e4SLinus Torvalds int um_request_irq(unsigned int irq, int fd, int type,
30840220c1aSDavid Howells 		   irq_handler_t handler,
3091da177e4SLinus Torvalds 		   unsigned long irqflags, const char * devname,
3101da177e4SLinus Torvalds 		   void *dev_id)
3111da177e4SLinus Torvalds {
3121da177e4SLinus Torvalds 	int err;
3131da177e4SLinus Torvalds 
3149ac625a3SJeff Dike 	if (fd != -1) {
3159ac625a3SJeff Dike 		err = activate_fd(irq, fd, type, dev_id);
3161da177e4SLinus Torvalds 		if (err)
317191ef966SJesper Juhl 			return err;
3181da177e4SLinus Torvalds 	}
3199ac625a3SJeff Dike 
3209ac625a3SJeff Dike 	return request_irq(irq, handler, irqflags, devname, dev_id);
3219ac625a3SJeff Dike }
3229ac625a3SJeff Dike 
3231da177e4SLinus Torvalds EXPORT_SYMBOL(um_request_irq);
3241da177e4SLinus Torvalds EXPORT_SYMBOL(reactivate_fd);
3251da177e4SLinus Torvalds 
326ba180fd4SJeff Dike /*
3271d119aa0SThomas Gleixner  * irq_chip must define at least enable/disable and ack when
3281d119aa0SThomas Gleixner  * the edge handler is used.
329ba180fd4SJeff Dike  */
3301d119aa0SThomas Gleixner static void dummy(struct irq_data *d)
3311da177e4SLinus Torvalds {
3321da177e4SLinus Torvalds }
3331da177e4SLinus Torvalds 
334dbce706eSPaolo 'Blaisorblade' Giarrusso /* This is used for everything else than the timer. */
3356fa851c3SThomas Gleixner static struct irq_chip normal_irq_type = {
336d1ea13c6SThomas Gleixner 	.name = "SIGIO",
3371d119aa0SThomas Gleixner 	.irq_disable = dummy,
3381d119aa0SThomas Gleixner 	.irq_enable = dummy,
3391d119aa0SThomas Gleixner 	.irq_ack = dummy,
340*81bab4c3SRichard Weinberger 	.irq_mask = dummy,
341*81bab4c3SRichard Weinberger 	.irq_unmask = dummy,
3421da177e4SLinus Torvalds };
3431da177e4SLinus Torvalds 
3446fa851c3SThomas Gleixner static struct irq_chip SIGVTALRM_irq_type = {
345d1ea13c6SThomas Gleixner 	.name = "SIGVTALRM",
3461d119aa0SThomas Gleixner 	.irq_disable = dummy,
3471d119aa0SThomas Gleixner 	.irq_enable = dummy,
3481d119aa0SThomas Gleixner 	.irq_ack = dummy,
349*81bab4c3SRichard Weinberger 	.irq_mask = dummy,
350*81bab4c3SRichard Weinberger 	.irq_unmask = dummy,
3511da177e4SLinus Torvalds };
3521da177e4SLinus Torvalds 
3531da177e4SLinus Torvalds void __init init_IRQ(void)
3541da177e4SLinus Torvalds {
3551da177e4SLinus Torvalds 	int i;
3561da177e4SLinus Torvalds 
3570ebec35fSThomas Gleixner 	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
358be76d81fSRichard Weinberger 
3590ebec35fSThomas Gleixner 	for (i = 1; i < NR_IRQS; i++)
3600ebec35fSThomas Gleixner 		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
3611da177e4SLinus Torvalds }
3621da177e4SLinus Torvalds 
363c14b8494SJeff Dike /*
364c14b8494SJeff Dike  * IRQ stack entry and exit:
365c14b8494SJeff Dike  *
366c14b8494SJeff Dike  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
367c14b8494SJeff Dike  * and switch over to the IRQ stack after some preparation.  We use
368c14b8494SJeff Dike  * sigaltstack to receive signals on a separate stack from the start.
369c14b8494SJeff Dike  * These two functions make sure the rest of the kernel won't be too
370c14b8494SJeff Dike  * upset by being on a different stack.  The IRQ stack has a
371c14b8494SJeff Dike  * thread_info structure at the bottom so that current et al continue
372c14b8494SJeff Dike  * to work.
373c14b8494SJeff Dike  *
374c14b8494SJeff Dike  * to_irq_stack copies the current task's thread_info to the IRQ stack
375c14b8494SJeff Dike  * thread_info and sets the tasks's stack to point to the IRQ stack.
376c14b8494SJeff Dike  *
377c14b8494SJeff Dike  * from_irq_stack copies the thread_info struct back (flags may have
378c14b8494SJeff Dike  * been modified) and resets the task's stack pointer.
379c14b8494SJeff Dike  *
380c14b8494SJeff Dike  * Tricky bits -
381c14b8494SJeff Dike  *
382c14b8494SJeff Dike  * What happens when two signals race each other?  UML doesn't block
383c14b8494SJeff Dike  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
384c14b8494SJeff Dike  * could arrive while a previous one is still setting up the
385c14b8494SJeff Dike  * thread_info.
386c14b8494SJeff Dike  *
387c14b8494SJeff Dike  * There are three cases -
388c14b8494SJeff Dike  *     The first interrupt on the stack - sets up the thread_info and
389c14b8494SJeff Dike  * handles the interrupt
390c14b8494SJeff Dike  *     A nested interrupt interrupting the copying of the thread_info -
391c14b8494SJeff Dike  * can't handle the interrupt, as the stack is in an unknown state
392c14b8494SJeff Dike  *     A nested interrupt not interrupting the copying of the
393c14b8494SJeff Dike  * thread_info - doesn't do any setup, just handles the interrupt
394c14b8494SJeff Dike  *
395c14b8494SJeff Dike  * The first job is to figure out whether we interrupted stack setup.
396c14b8494SJeff Dike  * This is done by xchging the signal mask with thread_info->pending.
397c14b8494SJeff Dike  * If the value that comes back is zero, then there is no setup in
398c14b8494SJeff Dike  * progress, and the interrupt can be handled.  If the value is
399c14b8494SJeff Dike  * non-zero, then there is stack setup in progress.  In order to have
400c14b8494SJeff Dike  * the interrupt handled, we leave our signal in the mask, and it will
401c14b8494SJeff Dike  * be handled by the upper handler after it has set up the stack.
402c14b8494SJeff Dike  *
403c14b8494SJeff Dike  * Next is to figure out whether we are the outer handler or a nested
404c14b8494SJeff Dike  * one.  As part of setting up the stack, thread_info->real_thread is
405c14b8494SJeff Dike  * set to non-NULL (and is reset to NULL on exit).  This is the
406c14b8494SJeff Dike  * nesting indicator.  If it is non-NULL, then the stack is already
407c14b8494SJeff Dike  * set up and the handler can run.
408c14b8494SJeff Dike  */
409c14b8494SJeff Dike 
410c14b8494SJeff Dike static unsigned long pending_mask;
411c14b8494SJeff Dike 
412508a9274SJeff Dike unsigned long to_irq_stack(unsigned long *mask_out)
413c14b8494SJeff Dike {
414c14b8494SJeff Dike 	struct thread_info *ti;
415c14b8494SJeff Dike 	unsigned long mask, old;
416c14b8494SJeff Dike 	int nested;
417c14b8494SJeff Dike 
418508a9274SJeff Dike 	mask = xchg(&pending_mask, *mask_out);
419c14b8494SJeff Dike 	if (mask != 0) {
420ba180fd4SJeff Dike 		/*
421ba180fd4SJeff Dike 		 * If any interrupts come in at this point, we want to
422c14b8494SJeff Dike 		 * make sure that their bits aren't lost by our
423c14b8494SJeff Dike 		 * putting our bit in.  So, this loop accumulates bits
424c14b8494SJeff Dike 		 * until xchg returns the same value that we put in.
425c14b8494SJeff Dike 		 * When that happens, there were no new interrupts,
426c14b8494SJeff Dike 		 * and pending_mask contains a bit for each interrupt
427c14b8494SJeff Dike 		 * that came in.
428c14b8494SJeff Dike 		 */
429508a9274SJeff Dike 		old = *mask_out;
430c14b8494SJeff Dike 		do {
431c14b8494SJeff Dike 			old |= mask;
432c14b8494SJeff Dike 			mask = xchg(&pending_mask, old);
433c14b8494SJeff Dike 		} while (mask != old);
434c14b8494SJeff Dike 		return 1;
435c14b8494SJeff Dike 	}
436c14b8494SJeff Dike 
437c14b8494SJeff Dike 	ti = current_thread_info();
438c14b8494SJeff Dike 	nested = (ti->real_thread != NULL);
439c14b8494SJeff Dike 	if (!nested) {
440c14b8494SJeff Dike 		struct task_struct *task;
441c14b8494SJeff Dike 		struct thread_info *tti;
442c14b8494SJeff Dike 
443c14b8494SJeff Dike 		task = cpu_tasks[ti->cpu].task;
444c14b8494SJeff Dike 		tti = task_thread_info(task);
445508a9274SJeff Dike 
446c14b8494SJeff Dike 		*ti = *tti;
447c14b8494SJeff Dike 		ti->real_thread = tti;
448c14b8494SJeff Dike 		task->stack = ti;
449c14b8494SJeff Dike 	}
450c14b8494SJeff Dike 
451c14b8494SJeff Dike 	mask = xchg(&pending_mask, 0);
452c14b8494SJeff Dike 	*mask_out |= mask | nested;
453c14b8494SJeff Dike 	return 0;
454c14b8494SJeff Dike }
455c14b8494SJeff Dike 
456c14b8494SJeff Dike unsigned long from_irq_stack(int nested)
457c14b8494SJeff Dike {
458c14b8494SJeff Dike 	struct thread_info *ti, *to;
459c14b8494SJeff Dike 	unsigned long mask;
460c14b8494SJeff Dike 
461c14b8494SJeff Dike 	ti = current_thread_info();
462c14b8494SJeff Dike 
463c14b8494SJeff Dike 	pending_mask = 1;
464c14b8494SJeff Dike 
465c14b8494SJeff Dike 	to = ti->real_thread;
466c14b8494SJeff Dike 	current->stack = to;
467c14b8494SJeff Dike 	ti->real_thread = NULL;
468c14b8494SJeff Dike 	*to = *ti;
469c14b8494SJeff Dike 
470c14b8494SJeff Dike 	mask = xchg(&pending_mask, 0);
471c14b8494SJeff Dike 	return mask & ~1;
472c14b8494SJeff Dike }
473c14b8494SJeff Dike 
474