xref: /linux/arch/um/kernel/irq.c (revision ad73b9a17d66366d8c9198bc90f1ea99f24a912c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - Cambridge Greys Ltd
4  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8  */
9 
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
20 #include <os.h>
21 #include <irq_user.h>
22 #include <irq_kern.h>
23 #include <linux/time-internal.h>
24 
25 
26 /* When epoll triggers we do not know why it did so
27  * we can also have different IRQs for read and write.
28  * This is why we keep a small irq_reg array for each fd -
29  * one entry per IRQ type
30  */
31 struct irq_reg {
32 	void *id;
33 	int irq;
34 	/* it's cheaper to store this than to query it */
35 	int events;
36 	bool active;
37 	bool pending;
38 	bool wakeup;
39 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
40 	bool pending_event;
41 	void (*timetravel_handler)(int, int, void *,
42 				   struct time_travel_event *);
43 	struct time_travel_event event;
44 #endif
45 };
46 
47 struct irq_entry {
48 	struct list_head list;
49 	int fd;
50 	struct irq_reg reg[NUM_IRQ_TYPES];
51 	bool suspended;
52 	bool sigio_workaround;
53 };
54 
55 static DEFINE_RAW_SPINLOCK(irq_lock);
56 static LIST_HEAD(active_fds);
57 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
58 static bool irqs_suspended;
59 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
60 static bool irqs_pending;
61 #endif
62 
irq_io_loop(struct irq_reg * irq,struct uml_pt_regs * regs)63 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
64 {
65 /*
66  * irq->active guards against reentry
67  * irq->pending accumulates pending requests
68  * if pending is raised the irq_handler is re-run
69  * until pending is cleared
70  */
71 	if (irq->active) {
72 		irq->active = false;
73 
74 		do {
75 			irq->pending = false;
76 			do_IRQ(irq->irq, regs);
77 		} while (irq->pending);
78 
79 		irq->active = true;
80 	} else {
81 		irq->pending = true;
82 	}
83 }
84 
85 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
irq_event_handler(struct time_travel_event * ev)86 static void irq_event_handler(struct time_travel_event *ev)
87 {
88 	struct irq_reg *reg = container_of(ev, struct irq_reg, event);
89 
90 	/* do nothing if suspended; just cause a wakeup and mark as pending */
91 	if (irqs_suspended) {
92 		irqs_pending = true;
93 		reg->pending_event = true;
94 		return;
95 	}
96 
97 	generic_handle_irq(reg->irq);
98 }
99 
irq_do_timetravel_handler(struct irq_entry * entry,enum um_irq_type t)100 static bool irq_do_timetravel_handler(struct irq_entry *entry,
101 				      enum um_irq_type t)
102 {
103 	struct irq_reg *reg = &entry->reg[t];
104 
105 	if (!reg->timetravel_handler)
106 		return false;
107 
108 	/*
109 	 * Handle all messages - we might get multiple even while
110 	 * interrupts are already suspended, due to suspend order
111 	 * etc. Note that time_travel_add_irq_event() will not add
112 	 * an event twice, if it's pending already "first wins".
113 	 */
114 	reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
115 
116 	if (!reg->event.pending)
117 		return false;
118 
119 	return true;
120 }
121 
irq_do_pending_events(bool timetravel_handlers_only)122 static void irq_do_pending_events(bool timetravel_handlers_only)
123 {
124 	struct irq_entry *entry;
125 
126 	if (!irqs_pending || timetravel_handlers_only)
127 		return;
128 
129 	irqs_pending = false;
130 
131 	list_for_each_entry(entry, &active_fds, list) {
132 		enum um_irq_type t;
133 
134 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
135 			struct irq_reg *reg = &entry->reg[t];
136 
137 			/*
138 			 * Any timetravel_handler was invoked already, just
139 			 * directly run the IRQ.
140 			 */
141 			if (reg->pending_event) {
142 				irq_enter();
143 				generic_handle_irq(reg->irq);
144 				irq_exit();
145 				reg->pending_event = false;
146 			}
147 		}
148 	}
149 }
150 #else
irq_do_timetravel_handler(struct irq_entry * entry,enum um_irq_type t)151 static bool irq_do_timetravel_handler(struct irq_entry *entry,
152 				      enum um_irq_type t)
153 {
154 	return false;
155 }
156 
irq_do_pending_events(bool timetravel_handlers_only)157 static void irq_do_pending_events(bool timetravel_handlers_only)
158 {
159 }
160 #endif
161 
sigio_reg_handler(int idx,struct irq_entry * entry,enum um_irq_type t,struct uml_pt_regs * regs,bool timetravel_handlers_only)162 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
163 			      struct uml_pt_regs *regs,
164 			      bool timetravel_handlers_only)
165 {
166 	struct irq_reg *reg = &entry->reg[t];
167 
168 	if (!reg->events)
169 		return;
170 
171 	if (os_epoll_triggered(idx, reg->events) <= 0)
172 		return;
173 
174 	if (irq_do_timetravel_handler(entry, t))
175 		return;
176 
177 	/*
178 	 * If we're called to only run time-travel handlers then don't
179 	 * actually proceed but mark sigio as pending (if applicable).
180 	 * For suspend/resume, timetravel_handlers_only may be true
181 	 * despite time-travel not being configured and used.
182 	 */
183 	if (timetravel_handlers_only) {
184 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
185 		reg->pending_event = true;
186 		irqs_pending = true;
187 		mark_sigio_pending();
188 #endif
189 		return;
190 	}
191 
192 	irq_io_loop(reg, regs);
193 }
194 
_sigio_handler(struct uml_pt_regs * regs,bool timetravel_handlers_only)195 static void _sigio_handler(struct uml_pt_regs *regs,
196 			   bool timetravel_handlers_only)
197 {
198 	struct irq_entry *irq_entry;
199 	int n, i;
200 
201 	if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
202 		return;
203 
204 	/* Flush out pending events that were ignored due to time-travel. */
205 	if (!irqs_suspended)
206 		irq_do_pending_events(timetravel_handlers_only);
207 
208 	while (1) {
209 		/* This is now lockless - epoll keeps back-referencesto the irqs
210 		 * which have trigger it so there is no need to walk the irq
211 		 * list and lock it every time. We avoid locking by turning off
212 		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
213 		 * we do any changes to the actual data structures
214 		 */
215 		n = os_waiting_for_events_epoll();
216 
217 		if (n <= 0) {
218 			if (n == -EINTR)
219 				continue;
220 			else
221 				break;
222 		}
223 
224 		for (i = 0; i < n ; i++) {
225 			enum um_irq_type t;
226 
227 			irq_entry = os_epoll_get_data_pointer(i);
228 
229 			for (t = 0; t < NUM_IRQ_TYPES; t++)
230 				sigio_reg_handler(i, irq_entry, t, regs,
231 						  timetravel_handlers_only);
232 		}
233 	}
234 
235 	if (!timetravel_handlers_only)
236 		free_irqs();
237 }
238 
sigio_handler(int sig,struct siginfo * unused_si,struct uml_pt_regs * regs)239 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
240 {
241 	preempt_disable();
242 	_sigio_handler(regs, irqs_suspended);
243 	preempt_enable();
244 }
245 
get_irq_entry_by_fd(int fd)246 static struct irq_entry *get_irq_entry_by_fd(int fd)
247 {
248 	struct irq_entry *walk;
249 
250 	lockdep_assert_held(&irq_lock);
251 
252 	list_for_each_entry(walk, &active_fds, list) {
253 		if (walk->fd == fd)
254 			return walk;
255 	}
256 
257 	return NULL;
258 }
259 
remove_irq_entry(struct irq_entry * to_free,bool remove)260 static void remove_irq_entry(struct irq_entry *to_free, bool remove)
261 {
262 	if (!to_free)
263 		return;
264 
265 	if (remove)
266 		os_del_epoll_fd(to_free->fd);
267 	list_del(&to_free->list);
268 }
269 
update_irq_entry(struct irq_entry * entry)270 static bool update_irq_entry(struct irq_entry *entry)
271 {
272 	enum um_irq_type i;
273 	int events = 0;
274 
275 	for (i = 0; i < NUM_IRQ_TYPES; i++)
276 		events |= entry->reg[i].events;
277 
278 	if (events) {
279 		/* will modify (instead of add) if needed */
280 		os_add_epoll_fd(events, entry->fd, entry);
281 		return true;
282 	}
283 
284 	os_del_epoll_fd(entry->fd);
285 	return false;
286 }
287 
update_or_remove_irq_entry(struct irq_entry * entry)288 static struct irq_entry *update_or_remove_irq_entry(struct irq_entry *entry)
289 {
290 	if (update_irq_entry(entry))
291 		return NULL;
292 	remove_irq_entry(entry, false);
293 	return entry;
294 }
295 
activate_fd(int irq,int fd,enum um_irq_type type,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))296 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
297 		       void (*timetravel_handler)(int, int, void *,
298 						  struct time_travel_event *))
299 {
300 	struct irq_entry *irq_entry, *to_free = NULL;
301 	int err, events = os_event_mask(type);
302 	unsigned long flags;
303 
304 	err = os_set_fd_async(fd);
305 	if (err < 0)
306 		goto out;
307 
308 	raw_spin_lock_irqsave(&irq_lock, flags);
309 	irq_entry = get_irq_entry_by_fd(fd);
310 	if (irq_entry) {
311 already:
312 		/* cannot register the same FD twice with the same type */
313 		if (WARN_ON(irq_entry->reg[type].events)) {
314 			err = -EALREADY;
315 			goto out_unlock;
316 		}
317 
318 		/* temporarily disable to avoid IRQ-side locking */
319 		os_del_epoll_fd(fd);
320 	} else {
321 		struct irq_entry *new;
322 
323 		/* don't restore interrupts */
324 		raw_spin_unlock(&irq_lock);
325 		new = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
326 		if (!new) {
327 			local_irq_restore(flags);
328 			return -ENOMEM;
329 		}
330 		raw_spin_lock(&irq_lock);
331 		irq_entry = get_irq_entry_by_fd(fd);
332 		if (irq_entry) {
333 			to_free = new;
334 			goto already;
335 		}
336 		irq_entry = new;
337 		irq_entry->fd = fd;
338 		list_add_tail(&irq_entry->list, &active_fds);
339 		maybe_sigio_broken(fd);
340 	}
341 
342 	irq_entry->reg[type].id = dev_id;
343 	irq_entry->reg[type].irq = irq;
344 	irq_entry->reg[type].active = true;
345 	irq_entry->reg[type].events = events;
346 
347 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
348 	if (um_irq_timetravel_handler_used()) {
349 		irq_entry->reg[type].timetravel_handler = timetravel_handler;
350 		irq_entry->reg[type].event.fn = irq_event_handler;
351 	}
352 #endif
353 
354 	WARN_ON(!update_irq_entry(irq_entry));
355 	err = 0;
356 out_unlock:
357 	raw_spin_unlock_irqrestore(&irq_lock, flags);
358 out:
359 	kfree(to_free);
360 	return err;
361 }
362 
363 /*
364  * Remove the entry or entries for a specific FD, if you
365  * don't want to remove all the possible entries then use
366  * um_free_irq() or deactivate_fd() instead.
367  */
free_irq_by_fd(int fd)368 void free_irq_by_fd(int fd)
369 {
370 	struct irq_entry *to_free;
371 	unsigned long flags;
372 
373 	raw_spin_lock_irqsave(&irq_lock, flags);
374 	to_free = get_irq_entry_by_fd(fd);
375 	remove_irq_entry(to_free, true);
376 	raw_spin_unlock_irqrestore(&irq_lock, flags);
377 	kfree(to_free);
378 }
379 EXPORT_SYMBOL(free_irq_by_fd);
380 
free_irq_by_irq_and_dev(unsigned int irq,void * dev)381 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
382 {
383 	struct irq_entry *entry, *to_free = NULL;
384 	unsigned long flags;
385 
386 	raw_spin_lock_irqsave(&irq_lock, flags);
387 	list_for_each_entry(entry, &active_fds, list) {
388 		enum um_irq_type i;
389 
390 		for (i = 0; i < NUM_IRQ_TYPES; i++) {
391 			struct irq_reg *reg = &entry->reg[i];
392 
393 			if (!reg->events)
394 				continue;
395 			if (reg->irq != irq)
396 				continue;
397 			if (reg->id != dev)
398 				continue;
399 
400 			os_del_epoll_fd(entry->fd);
401 			reg->events = 0;
402 			to_free = update_or_remove_irq_entry(entry);
403 			goto out;
404 		}
405 	}
406 out:
407 	raw_spin_unlock_irqrestore(&irq_lock, flags);
408 	kfree(to_free);
409 }
410 
deactivate_fd(int fd,int irqnum)411 void deactivate_fd(int fd, int irqnum)
412 {
413 	struct irq_entry *entry;
414 	unsigned long flags;
415 	enum um_irq_type i;
416 
417 	os_del_epoll_fd(fd);
418 
419 	raw_spin_lock_irqsave(&irq_lock, flags);
420 	entry = get_irq_entry_by_fd(fd);
421 	if (!entry)
422 		goto out;
423 
424 	for (i = 0; i < NUM_IRQ_TYPES; i++) {
425 		if (!entry->reg[i].events)
426 			continue;
427 		if (entry->reg[i].irq == irqnum)
428 			entry->reg[i].events = 0;
429 	}
430 
431 	entry = update_or_remove_irq_entry(entry);
432 out:
433 	raw_spin_unlock_irqrestore(&irq_lock, flags);
434 	kfree(entry);
435 
436 	ignore_sigio_fd(fd);
437 }
438 EXPORT_SYMBOL(deactivate_fd);
439 
440 /*
441  * Called just before shutdown in order to provide a clean exec
442  * environment in case the system is rebooting.  No locking because
443  * that would cause a pointless shutdown hang if something hadn't
444  * released the lock.
445  */
deactivate_all_fds(void)446 int deactivate_all_fds(void)
447 {
448 	struct irq_entry *entry;
449 
450 	/* Stop IO. The IRQ loop has no lock so this is our
451 	 * only way of making sure we are safe to dispose
452 	 * of all IRQ handlers
453 	 */
454 	os_set_ioignore();
455 
456 	/* we can no longer call kfree() here so just deactivate */
457 	list_for_each_entry(entry, &active_fds, list)
458 		os_del_epoll_fd(entry->fd);
459 	os_close_epoll_fd();
460 	return 0;
461 }
462 
463 /*
464  * do_IRQ handles all normal device IRQs (the special
465  * SMP cross-CPU interrupts have their own specific
466  * handlers).
467  */
do_IRQ(int irq,struct uml_pt_regs * regs)468 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
469 {
470 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
471 	irq_enter();
472 	generic_handle_irq(irq);
473 	irq_exit();
474 	set_irq_regs(old_regs);
475 	return 1;
476 }
477 
um_free_irq(int irq,void * dev)478 void um_free_irq(int irq, void *dev)
479 {
480 	if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
481 		 "freeing invalid irq %d", irq))
482 		return;
483 
484 	free_irq_by_irq_and_dev(irq, dev);
485 	free_irq(irq, dev);
486 	clear_bit(irq, irqs_allocated);
487 }
488 EXPORT_SYMBOL(um_free_irq);
489 
490 static int
_um_request_irq(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))491 _um_request_irq(int irq, int fd, enum um_irq_type type,
492 		irq_handler_t handler, unsigned long irqflags,
493 		const char *devname, void *dev_id,
494 		void (*timetravel_handler)(int, int, void *,
495 					   struct time_travel_event *))
496 {
497 	int err;
498 
499 	if (irq == UM_IRQ_ALLOC) {
500 		int i;
501 
502 		for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
503 			if (!test_and_set_bit(i, irqs_allocated)) {
504 				irq = i;
505 				break;
506 			}
507 		}
508 	}
509 
510 	if (irq < 0)
511 		return -ENOSPC;
512 
513 	if (fd != -1) {
514 		err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
515 		if (err)
516 			goto error;
517 	}
518 
519 	err = request_irq(irq, handler, irqflags, devname, dev_id);
520 	if (err < 0)
521 		goto error;
522 
523 	return irq;
524 error:
525 	clear_bit(irq, irqs_allocated);
526 	return err;
527 }
528 
um_request_irq(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)529 int um_request_irq(int irq, int fd, enum um_irq_type type,
530 		   irq_handler_t handler, unsigned long irqflags,
531 		   const char *devname, void *dev_id)
532 {
533 	return _um_request_irq(irq, fd, type, handler, irqflags,
534 			       devname, dev_id, NULL);
535 }
536 EXPORT_SYMBOL(um_request_irq);
537 
538 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
um_request_irq_tt(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))539 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
540 		      irq_handler_t handler, unsigned long irqflags,
541 		      const char *devname, void *dev_id,
542 		      void (*timetravel_handler)(int, int, void *,
543 						 struct time_travel_event *))
544 {
545 	return _um_request_irq(irq, fd, type, handler, irqflags,
546 			       devname, dev_id, timetravel_handler);
547 }
548 EXPORT_SYMBOL(um_request_irq_tt);
549 
sigio_run_timetravel_handlers(void)550 void sigio_run_timetravel_handlers(void)
551 {
552 	_sigio_handler(NULL, true);
553 }
554 #endif
555 
556 #ifdef CONFIG_PM_SLEEP
um_irqs_suspend(void)557 void um_irqs_suspend(void)
558 {
559 	struct irq_entry *entry;
560 	unsigned long flags;
561 
562 	irqs_suspended = true;
563 
564 	raw_spin_lock_irqsave(&irq_lock, flags);
565 	list_for_each_entry(entry, &active_fds, list) {
566 		enum um_irq_type t;
567 		bool clear = true;
568 
569 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
570 			if (!entry->reg[t].events)
571 				continue;
572 
573 			/*
574 			 * For the SIGIO_WRITE_IRQ, which is used to handle the
575 			 * SIGIO workaround thread, we need special handling:
576 			 * enable wake for it itself, but below we tell it about
577 			 * any FDs that should be suspended.
578 			 */
579 			if (entry->reg[t].wakeup ||
580 			    entry->reg[t].irq == SIGIO_WRITE_IRQ
581 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
582 			    || entry->reg[t].timetravel_handler
583 #endif
584 			    ) {
585 				clear = false;
586 				break;
587 			}
588 		}
589 
590 		if (clear) {
591 			entry->suspended = true;
592 			os_clear_fd_async(entry->fd);
593 			entry->sigio_workaround =
594 				!__ignore_sigio_fd(entry->fd);
595 		}
596 	}
597 	raw_spin_unlock_irqrestore(&irq_lock, flags);
598 }
599 
um_irqs_resume(void)600 void um_irqs_resume(void)
601 {
602 	struct irq_entry *entry;
603 	unsigned long flags;
604 
605 
606 	raw_spin_lock_irqsave(&irq_lock, flags);
607 	list_for_each_entry(entry, &active_fds, list) {
608 		if (entry->suspended) {
609 			int err = os_set_fd_async(entry->fd);
610 
611 			WARN(err < 0, "os_set_fd_async returned %d\n", err);
612 			entry->suspended = false;
613 
614 			if (entry->sigio_workaround) {
615 				err = __add_sigio_fd(entry->fd);
616 				WARN(err < 0, "add_sigio_returned %d\n", err);
617 			}
618 		}
619 	}
620 	raw_spin_unlock_irqrestore(&irq_lock, flags);
621 
622 	irqs_suspended = false;
623 	send_sigio_to_self();
624 }
625 
normal_irq_set_wake(struct irq_data * d,unsigned int on)626 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
627 {
628 	struct irq_entry *entry;
629 	unsigned long flags;
630 
631 	raw_spin_lock_irqsave(&irq_lock, flags);
632 	list_for_each_entry(entry, &active_fds, list) {
633 		enum um_irq_type t;
634 
635 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
636 			if (!entry->reg[t].events)
637 				continue;
638 
639 			if (entry->reg[t].irq != d->irq)
640 				continue;
641 			entry->reg[t].wakeup = on;
642 			goto unlock;
643 		}
644 	}
645 unlock:
646 	raw_spin_unlock_irqrestore(&irq_lock, flags);
647 	return 0;
648 }
649 #else
650 #define normal_irq_set_wake NULL
651 #endif
652 
653 /*
654  * irq_chip must define at least enable/disable and ack when
655  * the edge handler is used.
656  */
dummy(struct irq_data * d)657 static void dummy(struct irq_data *d)
658 {
659 }
660 
661 /* This is used for everything other than the timer. */
662 static struct irq_chip normal_irq_type = {
663 	.name = "SIGIO",
664 	.irq_disable = dummy,
665 	.irq_enable = dummy,
666 	.irq_ack = dummy,
667 	.irq_mask = dummy,
668 	.irq_unmask = dummy,
669 	.irq_set_wake = normal_irq_set_wake,
670 };
671 
672 static struct irq_chip alarm_irq_type = {
673 	.name = "SIGALRM",
674 	.irq_disable = dummy,
675 	.irq_enable = dummy,
676 	.irq_ack = dummy,
677 	.irq_mask = dummy,
678 	.irq_unmask = dummy,
679 };
680 
init_IRQ(void)681 void __init init_IRQ(void)
682 {
683 	int i;
684 
685 	irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
686 
687 	for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
688 		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
689 	/* Initialize EPOLL Loop */
690 	os_setup_epoll();
691 }
692