xref: /linux/arch/um/kernel/irq.c (revision 399ead3a6d76cbdd29a716660db5c84a314dab70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - Cambridge Greys Ltd
4  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8  */
9 
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
20 #include <os.h>
21 #include <irq_user.h>
22 #include <irq_kern.h>
23 #include <linux/time-internal.h>
24 
25 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
26 
27 #define irq_stats(x)		(&per_cpu(irq_stat, x))
28 
29 /* When epoll triggers we do not know why it did so
30  * we can also have different IRQs for read and write.
31  * This is why we keep a small irq_reg array for each fd -
32  * one entry per IRQ type
33  */
34 struct irq_reg {
35 	void *id;
36 	int irq;
37 	/* it's cheaper to store this than to query it */
38 	int events;
39 	bool active;
40 	bool pending;
41 	bool wakeup;
42 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
43 	bool pending_event;
44 	void (*timetravel_handler)(int, int, void *,
45 				   struct time_travel_event *);
46 	struct time_travel_event event;
47 #endif
48 };
49 
50 struct irq_entry {
51 	struct list_head list;
52 	int fd;
53 	struct irq_reg reg[NUM_IRQ_TYPES];
54 	bool suspended;
55 	bool sigio_workaround;
56 };
57 
58 static DEFINE_RAW_SPINLOCK(irq_lock);
59 static LIST_HEAD(active_fds);
60 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
61 static bool irqs_suspended;
62 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
63 static bool irqs_pending;
64 #endif
65 
irq_io_loop(struct irq_reg * irq,struct uml_pt_regs * regs)66 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
67 {
68 /*
69  * irq->active guards against reentry
70  * irq->pending accumulates pending requests
71  * if pending is raised the irq_handler is re-run
72  * until pending is cleared
73  */
74 	if (irq->active) {
75 		irq->active = false;
76 
77 		do {
78 			irq->pending = false;
79 			do_IRQ(irq->irq, regs);
80 		} while (irq->pending);
81 
82 		irq->active = true;
83 	} else {
84 		irq->pending = true;
85 	}
86 }
87 
88 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
irq_event_handler(struct time_travel_event * ev)89 static void irq_event_handler(struct time_travel_event *ev)
90 {
91 	struct irq_reg *reg = container_of(ev, struct irq_reg, event);
92 
93 	/* do nothing if suspended; just cause a wakeup and mark as pending */
94 	if (irqs_suspended) {
95 		irqs_pending = true;
96 		reg->pending_event = true;
97 		return;
98 	}
99 
100 	generic_handle_irq(reg->irq);
101 }
102 
irq_do_timetravel_handler(struct irq_entry * entry,enum um_irq_type t)103 static bool irq_do_timetravel_handler(struct irq_entry *entry,
104 				      enum um_irq_type t)
105 {
106 	struct irq_reg *reg = &entry->reg[t];
107 
108 	if (!reg->timetravel_handler)
109 		return false;
110 
111 	/*
112 	 * Handle all messages - we might get multiple even while
113 	 * interrupts are already suspended, due to suspend order
114 	 * etc. Note that time_travel_add_irq_event() will not add
115 	 * an event twice, if it's pending already "first wins".
116 	 */
117 	reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
118 
119 	if (!reg->event.pending)
120 		return false;
121 
122 	return true;
123 }
124 
irq_do_pending_events(bool timetravel_handlers_only)125 static void irq_do_pending_events(bool timetravel_handlers_only)
126 {
127 	struct irq_entry *entry;
128 
129 	if (!irqs_pending || timetravel_handlers_only)
130 		return;
131 
132 	irqs_pending = false;
133 
134 	list_for_each_entry(entry, &active_fds, list) {
135 		enum um_irq_type t;
136 
137 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
138 			struct irq_reg *reg = &entry->reg[t];
139 
140 			/*
141 			 * Any timetravel_handler was invoked already, just
142 			 * directly run the IRQ.
143 			 */
144 			if (reg->pending_event) {
145 				irq_enter();
146 				generic_handle_irq(reg->irq);
147 				irq_exit();
148 				reg->pending_event = false;
149 			}
150 		}
151 	}
152 }
153 #else
irq_do_timetravel_handler(struct irq_entry * entry,enum um_irq_type t)154 static bool irq_do_timetravel_handler(struct irq_entry *entry,
155 				      enum um_irq_type t)
156 {
157 	return false;
158 }
159 
irq_do_pending_events(bool timetravel_handlers_only)160 static void irq_do_pending_events(bool timetravel_handlers_only)
161 {
162 }
163 #endif
164 
sigio_reg_handler(int idx,struct irq_entry * entry,enum um_irq_type t,struct uml_pt_regs * regs,bool timetravel_handlers_only)165 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
166 			      struct uml_pt_regs *regs,
167 			      bool timetravel_handlers_only)
168 {
169 	struct irq_reg *reg = &entry->reg[t];
170 
171 	if (!reg->events)
172 		return;
173 
174 	if (os_epoll_triggered(idx, reg->events) <= 0)
175 		return;
176 
177 	if (irq_do_timetravel_handler(entry, t))
178 		return;
179 
180 	/*
181 	 * If we're called to only run time-travel handlers then don't
182 	 * actually proceed but mark sigio as pending (if applicable).
183 	 * For suspend/resume, timetravel_handlers_only may be true
184 	 * despite time-travel not being configured and used.
185 	 */
186 	if (timetravel_handlers_only) {
187 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
188 		reg->pending_event = true;
189 		irqs_pending = true;
190 		mark_sigio_pending();
191 #endif
192 		return;
193 	}
194 
195 	irq_io_loop(reg, regs);
196 }
197 
_sigio_handler(struct uml_pt_regs * regs,bool timetravel_handlers_only)198 static void _sigio_handler(struct uml_pt_regs *regs,
199 			   bool timetravel_handlers_only)
200 {
201 	struct irq_entry *irq_entry;
202 	int n, i;
203 
204 	if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
205 		return;
206 
207 	/* Flush out pending events that were ignored due to time-travel. */
208 	if (!irqs_suspended)
209 		irq_do_pending_events(timetravel_handlers_only);
210 
211 	while (1) {
212 		/* This is now lockless - epoll keeps back-referencesto the irqs
213 		 * which have trigger it so there is no need to walk the irq
214 		 * list and lock it every time. We avoid locking by turning off
215 		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
216 		 * we do any changes to the actual data structures
217 		 */
218 		n = os_waiting_for_events_epoll();
219 
220 		if (n <= 0) {
221 			if (n == -EINTR)
222 				continue;
223 			else
224 				break;
225 		}
226 
227 		for (i = 0; i < n ; i++) {
228 			enum um_irq_type t;
229 
230 			irq_entry = os_epoll_get_data_pointer(i);
231 
232 			for (t = 0; t < NUM_IRQ_TYPES; t++)
233 				sigio_reg_handler(i, irq_entry, t, regs,
234 						  timetravel_handlers_only);
235 		}
236 	}
237 
238 	if (!timetravel_handlers_only)
239 		free_irqs();
240 }
241 
sigio_handler(int sig,struct siginfo * unused_si,struct uml_pt_regs * regs,void * mc)242 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
243 		   void *mc)
244 {
245 	preempt_disable();
246 	_sigio_handler(regs, irqs_suspended);
247 	preempt_enable();
248 }
249 
get_irq_entry_by_fd(int fd)250 static struct irq_entry *get_irq_entry_by_fd(int fd)
251 {
252 	struct irq_entry *walk;
253 
254 	lockdep_assert_held(&irq_lock);
255 
256 	list_for_each_entry(walk, &active_fds, list) {
257 		if (walk->fd == fd)
258 			return walk;
259 	}
260 
261 	return NULL;
262 }
263 
remove_irq_entry(struct irq_entry * to_free,bool remove)264 static void remove_irq_entry(struct irq_entry *to_free, bool remove)
265 {
266 	if (!to_free)
267 		return;
268 
269 	if (remove)
270 		os_del_epoll_fd(to_free->fd);
271 	list_del(&to_free->list);
272 }
273 
update_irq_entry(struct irq_entry * entry)274 static bool update_irq_entry(struct irq_entry *entry)
275 {
276 	enum um_irq_type i;
277 	int events = 0;
278 
279 	for (i = 0; i < NUM_IRQ_TYPES; i++)
280 		events |= entry->reg[i].events;
281 
282 	if (events) {
283 		/* will modify (instead of add) if needed */
284 		os_add_epoll_fd(events, entry->fd, entry);
285 		return true;
286 	}
287 
288 	os_del_epoll_fd(entry->fd);
289 	return false;
290 }
291 
update_or_remove_irq_entry(struct irq_entry * entry)292 static struct irq_entry *update_or_remove_irq_entry(struct irq_entry *entry)
293 {
294 	if (update_irq_entry(entry))
295 		return NULL;
296 	remove_irq_entry(entry, false);
297 	return entry;
298 }
299 
activate_fd(int irq,int fd,enum um_irq_type type,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))300 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
301 		       void (*timetravel_handler)(int, int, void *,
302 						  struct time_travel_event *))
303 {
304 	struct irq_entry *irq_entry, *to_free = NULL;
305 	int err, events = os_event_mask(type);
306 	unsigned long flags;
307 
308 	err = os_set_fd_async(fd);
309 	if (err < 0)
310 		goto out;
311 
312 	raw_spin_lock_irqsave(&irq_lock, flags);
313 	irq_entry = get_irq_entry_by_fd(fd);
314 	if (irq_entry) {
315 already:
316 		/* cannot register the same FD twice with the same type */
317 		if (WARN_ON(irq_entry->reg[type].events)) {
318 			err = -EALREADY;
319 			goto out_unlock;
320 		}
321 
322 		/* temporarily disable to avoid IRQ-side locking */
323 		os_del_epoll_fd(fd);
324 	} else {
325 		struct irq_entry *new;
326 
327 		/* don't restore interrupts */
328 		raw_spin_unlock(&irq_lock);
329 		new = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
330 		if (!new) {
331 			local_irq_restore(flags);
332 			return -ENOMEM;
333 		}
334 		raw_spin_lock(&irq_lock);
335 		irq_entry = get_irq_entry_by_fd(fd);
336 		if (irq_entry) {
337 			to_free = new;
338 			goto already;
339 		}
340 		irq_entry = new;
341 		irq_entry->fd = fd;
342 		list_add_tail(&irq_entry->list, &active_fds);
343 		maybe_sigio_broken(fd);
344 	}
345 
346 	irq_entry->reg[type].id = dev_id;
347 	irq_entry->reg[type].irq = irq;
348 	irq_entry->reg[type].active = true;
349 	irq_entry->reg[type].events = events;
350 
351 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
352 	if (um_irq_timetravel_handler_used()) {
353 		irq_entry->reg[type].timetravel_handler = timetravel_handler;
354 		irq_entry->reg[type].event.fn = irq_event_handler;
355 	}
356 #endif
357 
358 	WARN_ON(!update_irq_entry(irq_entry));
359 	err = 0;
360 out_unlock:
361 	raw_spin_unlock_irqrestore(&irq_lock, flags);
362 out:
363 	kfree(to_free);
364 	return err;
365 }
366 
367 /*
368  * Remove the entry or entries for a specific FD, if you
369  * don't want to remove all the possible entries then use
370  * um_free_irq() or deactivate_fd() instead.
371  */
free_irq_by_fd(int fd)372 void free_irq_by_fd(int fd)
373 {
374 	struct irq_entry *to_free;
375 	unsigned long flags;
376 
377 	raw_spin_lock_irqsave(&irq_lock, flags);
378 	to_free = get_irq_entry_by_fd(fd);
379 	remove_irq_entry(to_free, true);
380 	raw_spin_unlock_irqrestore(&irq_lock, flags);
381 	kfree(to_free);
382 }
383 EXPORT_SYMBOL(free_irq_by_fd);
384 
free_irq_by_irq_and_dev(unsigned int irq,void * dev)385 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
386 {
387 	struct irq_entry *entry, *to_free = NULL;
388 	unsigned long flags;
389 
390 	raw_spin_lock_irqsave(&irq_lock, flags);
391 	list_for_each_entry(entry, &active_fds, list) {
392 		enum um_irq_type i;
393 
394 		for (i = 0; i < NUM_IRQ_TYPES; i++) {
395 			struct irq_reg *reg = &entry->reg[i];
396 
397 			if (!reg->events)
398 				continue;
399 			if (reg->irq != irq)
400 				continue;
401 			if (reg->id != dev)
402 				continue;
403 
404 			os_del_epoll_fd(entry->fd);
405 			reg->events = 0;
406 			to_free = update_or_remove_irq_entry(entry);
407 			goto out;
408 		}
409 	}
410 out:
411 	raw_spin_unlock_irqrestore(&irq_lock, flags);
412 	kfree(to_free);
413 }
414 
deactivate_fd(int fd,int irqnum)415 void deactivate_fd(int fd, int irqnum)
416 {
417 	struct irq_entry *entry;
418 	unsigned long flags;
419 	enum um_irq_type i;
420 
421 	os_del_epoll_fd(fd);
422 
423 	raw_spin_lock_irqsave(&irq_lock, flags);
424 	entry = get_irq_entry_by_fd(fd);
425 	if (!entry)
426 		goto out;
427 
428 	for (i = 0; i < NUM_IRQ_TYPES; i++) {
429 		if (!entry->reg[i].events)
430 			continue;
431 		if (entry->reg[i].irq == irqnum)
432 			entry->reg[i].events = 0;
433 	}
434 
435 	entry = update_or_remove_irq_entry(entry);
436 out:
437 	raw_spin_unlock_irqrestore(&irq_lock, flags);
438 	kfree(entry);
439 
440 	ignore_sigio_fd(fd);
441 }
442 EXPORT_SYMBOL(deactivate_fd);
443 
444 /*
445  * Called just before shutdown in order to provide a clean exec
446  * environment in case the system is rebooting.  No locking because
447  * that would cause a pointless shutdown hang if something hadn't
448  * released the lock.
449  */
deactivate_all_fds(void)450 int deactivate_all_fds(void)
451 {
452 	struct irq_entry *entry;
453 
454 	/* Stop IO. The IRQ loop has no lock so this is our
455 	 * only way of making sure we are safe to dispose
456 	 * of all IRQ handlers
457 	 */
458 	os_set_ioignore();
459 
460 	/* we can no longer call kfree() here so just deactivate */
461 	list_for_each_entry(entry, &active_fds, list)
462 		os_del_epoll_fd(entry->fd);
463 	os_close_epoll_fd();
464 	return 0;
465 }
466 
467 /*
468  * do_IRQ handles all normal device IRQs (the special
469  * SMP cross-CPU interrupts have their own specific
470  * handlers).
471  */
do_IRQ(int irq,struct uml_pt_regs * regs)472 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
473 {
474 	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
475 	irq_enter();
476 	generic_handle_irq(irq);
477 	irq_exit();
478 	set_irq_regs(old_regs);
479 	return 1;
480 }
481 
um_free_irq(int irq,void * dev)482 void um_free_irq(int irq, void *dev)
483 {
484 	if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
485 		 "freeing invalid irq %d", irq))
486 		return;
487 
488 	free_irq_by_irq_and_dev(irq, dev);
489 	free_irq(irq, dev);
490 	clear_bit(irq, irqs_allocated);
491 }
492 EXPORT_SYMBOL(um_free_irq);
493 
494 static int
_um_request_irq(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))495 _um_request_irq(int irq, int fd, enum um_irq_type type,
496 		irq_handler_t handler, unsigned long irqflags,
497 		const char *devname, void *dev_id,
498 		void (*timetravel_handler)(int, int, void *,
499 					   struct time_travel_event *))
500 {
501 	int err;
502 
503 	if (irq == UM_IRQ_ALLOC) {
504 		int i;
505 
506 		for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
507 			if (!test_and_set_bit(i, irqs_allocated)) {
508 				irq = i;
509 				break;
510 			}
511 		}
512 	}
513 
514 	if (irq < 0)
515 		return -ENOSPC;
516 
517 	if (fd != -1) {
518 		err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
519 		if (err)
520 			goto error;
521 	}
522 
523 	err = request_irq(irq, handler, irqflags, devname, dev_id);
524 	if (err < 0)
525 		goto error;
526 
527 	return irq;
528 error:
529 	clear_bit(irq, irqs_allocated);
530 	return err;
531 }
532 
um_request_irq(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)533 int um_request_irq(int irq, int fd, enum um_irq_type type,
534 		   irq_handler_t handler, unsigned long irqflags,
535 		   const char *devname, void *dev_id)
536 {
537 	return _um_request_irq(irq, fd, type, handler, irqflags,
538 			       devname, dev_id, NULL);
539 }
540 EXPORT_SYMBOL(um_request_irq);
541 
542 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
um_request_irq_tt(int irq,int fd,enum um_irq_type type,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id,void (* timetravel_handler)(int,int,void *,struct time_travel_event *))543 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
544 		      irq_handler_t handler, unsigned long irqflags,
545 		      const char *devname, void *dev_id,
546 		      void (*timetravel_handler)(int, int, void *,
547 						 struct time_travel_event *))
548 {
549 	return _um_request_irq(irq, fd, type, handler, irqflags,
550 			       devname, dev_id, timetravel_handler);
551 }
552 EXPORT_SYMBOL(um_request_irq_tt);
553 
sigio_run_timetravel_handlers(void)554 void sigio_run_timetravel_handlers(void)
555 {
556 	_sigio_handler(NULL, true);
557 }
558 #endif
559 
560 #ifdef CONFIG_PM_SLEEP
um_irqs_suspend(void)561 void um_irqs_suspend(void)
562 {
563 	struct irq_entry *entry;
564 	unsigned long flags;
565 
566 	irqs_suspended = true;
567 
568 	raw_spin_lock_irqsave(&irq_lock, flags);
569 	list_for_each_entry(entry, &active_fds, list) {
570 		enum um_irq_type t;
571 		bool clear = true;
572 
573 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
574 			if (!entry->reg[t].events)
575 				continue;
576 
577 			/*
578 			 * For the SIGIO_WRITE_IRQ, which is used to handle the
579 			 * SIGIO workaround thread, we need special handling:
580 			 * enable wake for it itself, but below we tell it about
581 			 * any FDs that should be suspended.
582 			 */
583 			if (entry->reg[t].wakeup ||
584 			    entry->reg[t].irq == SIGIO_WRITE_IRQ
585 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
586 			    || entry->reg[t].timetravel_handler
587 #endif
588 			    ) {
589 				clear = false;
590 				break;
591 			}
592 		}
593 
594 		if (clear) {
595 			entry->suspended = true;
596 			os_clear_fd_async(entry->fd);
597 			entry->sigio_workaround =
598 				!__ignore_sigio_fd(entry->fd);
599 		}
600 	}
601 	raw_spin_unlock_irqrestore(&irq_lock, flags);
602 }
603 
um_irqs_resume(void)604 void um_irqs_resume(void)
605 {
606 	struct irq_entry *entry;
607 	unsigned long flags;
608 
609 
610 	raw_spin_lock_irqsave(&irq_lock, flags);
611 	list_for_each_entry(entry, &active_fds, list) {
612 		if (entry->suspended) {
613 			int err = os_set_fd_async(entry->fd);
614 
615 			WARN(err < 0, "os_set_fd_async returned %d\n", err);
616 			entry->suspended = false;
617 
618 			if (entry->sigio_workaround) {
619 				err = __add_sigio_fd(entry->fd);
620 				WARN(err < 0, "add_sigio_returned %d\n", err);
621 			}
622 		}
623 	}
624 	raw_spin_unlock_irqrestore(&irq_lock, flags);
625 
626 	irqs_suspended = false;
627 	send_sigio_to_self();
628 }
629 
normal_irq_set_wake(struct irq_data * d,unsigned int on)630 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
631 {
632 	struct irq_entry *entry;
633 	unsigned long flags;
634 
635 	raw_spin_lock_irqsave(&irq_lock, flags);
636 	list_for_each_entry(entry, &active_fds, list) {
637 		enum um_irq_type t;
638 
639 		for (t = 0; t < NUM_IRQ_TYPES; t++) {
640 			if (!entry->reg[t].events)
641 				continue;
642 
643 			if (entry->reg[t].irq != d->irq)
644 				continue;
645 			entry->reg[t].wakeup = on;
646 			goto unlock;
647 		}
648 	}
649 unlock:
650 	raw_spin_unlock_irqrestore(&irq_lock, flags);
651 	return 0;
652 }
653 #else
654 #define normal_irq_set_wake NULL
655 #endif
656 
657 /*
658  * irq_chip must define at least enable/disable and ack when
659  * the edge handler is used.
660  */
dummy(struct irq_data * d)661 static void dummy(struct irq_data *d)
662 {
663 }
664 
665 /* This is used for everything other than the timer. */
666 static struct irq_chip normal_irq_type = {
667 	.name = "SIGIO",
668 	.irq_disable = dummy,
669 	.irq_enable = dummy,
670 	.irq_ack = dummy,
671 	.irq_mask = dummy,
672 	.irq_unmask = dummy,
673 	.irq_set_wake = normal_irq_set_wake,
674 };
675 
676 static struct irq_chip alarm_irq_type = {
677 	.name = "SIGALRM",
678 	.irq_disable = dummy,
679 	.irq_enable = dummy,
680 	.irq_ack = dummy,
681 	.irq_mask = dummy,
682 	.irq_unmask = dummy,
683 };
684 
init_IRQ(void)685 void __init init_IRQ(void)
686 {
687 	int i;
688 
689 	irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_percpu_irq);
690 
691 	for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
692 		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
693 	/* Initialize EPOLL Loop */
694 	os_setup_epoll();
695 }
696 
arch_probe_nr_irqs(void)697 int __init arch_probe_nr_irqs(void)
698 {
699 	return NR_IRQS;
700 }
701 
sigchld_handler(int sig,struct siginfo * unused_si,struct uml_pt_regs * regs,void * mc)702 void sigchld_handler(int sig, struct siginfo *unused_si,
703 		     struct uml_pt_regs *regs, void *mc)
704 {
705 	do_IRQ(SIGCHLD_IRQ, regs);
706 }
707 
708 /*
709  * /proc/interrupts printing for arch specific interrupts
710  */
arch_show_interrupts(struct seq_file * p,int prec)711 int arch_show_interrupts(struct seq_file *p, int prec)
712 {
713 #if IS_ENABLED(CONFIG_SMP)
714 	int cpu;
715 
716 	seq_printf(p, "%*s: ", prec, "RES");
717 	for_each_online_cpu(cpu)
718 		seq_printf(p, "%10u ", irq_stats(cpu)->irq_resched_count);
719 	seq_puts(p, "  Rescheduling interrupts\n");
720 
721 	seq_printf(p, "%*s: ", prec, "CAL");
722 	for_each_online_cpu(cpu)
723 		seq_printf(p, "%10u ", irq_stats(cpu)->irq_call_count);
724 	seq_puts(p, "  Function call interrupts\n");
725 #endif
726 
727 	return 0;
728 }
729