irq.c (7306e83ccf5ce3a324546d274945ec1981d78f9a) | irq.c (ed1cd6deb013a11959d17a94e35ce159197632da) |
---|---|
1/* 2 * Derived from arch/i386/kernel/irq.c 3 * Copyright (C) 1992 Linus Torvalds 4 * Adapted from arch/i386 by Gary Thomas 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 7 * Copyright (C) 1996-2001 Cort Dougan 8 * Adapted for Power Macintosh by Paul Mackerras --- 659 unchanged lines hidden (view full) --- 668 sirqtp = softirq_ctx[raw_smp_processor_id()]; 669 670 /* Already there ? */ 671 if (unlikely(curtp == irqtp || curtp == sirqtp)) { 672 __do_irq(regs); 673 set_irq_regs(old_regs); 674 return; 675 } | 1/* 2 * Derived from arch/i386/kernel/irq.c 3 * Copyright (C) 1992 Linus Torvalds 4 * Adapted from arch/i386 by Gary Thomas 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 7 * Copyright (C) 1996-2001 Cort Dougan 8 * Adapted for Power Macintosh by Paul Mackerras --- 659 unchanged lines hidden (view full) --- 668 sirqtp = softirq_ctx[raw_smp_processor_id()]; 669 670 /* Already there ? */ 671 if (unlikely(curtp == irqtp || curtp == sirqtp)) { 672 __do_irq(regs); 673 set_irq_regs(old_regs); 674 return; 675 } |
676 677 /* Prepare the thread_info in the irq stack */ 678 irqtp->task = curtp->task; 679 irqtp->flags = 0; 680 681 /* Copy the preempt_count so that the [soft]irq checks work. */ 682 irqtp->preempt_count = curtp->preempt_count; 683 | |
684 /* Switch stack and call */ 685 call_do_irq(regs, irqtp); 686 | 676 /* Switch stack and call */ 677 call_do_irq(regs, irqtp); 678 |
687 /* Restore stack limit */ 688 irqtp->task = NULL; 689 690 /* Copy back updates to the thread_info */ 691 if (irqtp->flags) 692 set_bits(irqtp->flags, &curtp->flags); 693 | |
694 set_irq_regs(old_regs); 695} 696 697void __init init_IRQ(void) 698{ 699 if (ppc_md.init_IRQ) 700 ppc_md.init_IRQ(); | 679 set_irq_regs(old_regs); 680} 681 682void __init init_IRQ(void) 683{ 684 if (ppc_md.init_IRQ) 685 ppc_md.init_IRQ(); |
701 702 exc_lvl_ctx_init(); 703 704 irq_ctx_init(); | |
705} 706 707#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 708struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 709struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 710struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | 686} 687 688#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 689struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 690struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 691struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; |
711 712void exc_lvl_ctx_init(void) 713{ 714 struct thread_info *tp; 715 int i, cpu_nr; 716 717 for_each_possible_cpu(i) { 718#ifdef CONFIG_PPC64 719 cpu_nr = i; 720#else 721#ifdef CONFIG_SMP 722 cpu_nr = get_hard_smp_processor_id(i); 723#else 724 cpu_nr = 0; | |
725#endif | 692#endif |
726#endif | |
727 | 693 |
728 tp = critirq_ctx[cpu_nr]; 729 tp->cpu = cpu_nr; 730 tp->preempt_count = 0; 731 732#ifdef CONFIG_BOOKE 733 tp = dbgirq_ctx[cpu_nr]; 734 tp->cpu = cpu_nr; 735 tp->preempt_count = 0; 736 737 tp = mcheckirq_ctx[cpu_nr]; 738 tp->cpu = cpu_nr; 739 tp->preempt_count = HARDIRQ_OFFSET; 740#endif 741 } 742} 743#endif 744 | |
745struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 746struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 747 | 694struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 695struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 696 |
748void irq_ctx_init(void) 749{ 750 struct thread_info *tp; 751 int i; 752 753 for_each_possible_cpu(i) { 754 tp = softirq_ctx[i]; 755 tp->cpu = i; 756 klp_init_thread_info(tp); 757 758 tp = hardirq_ctx[i]; 759 tp->cpu = i; 760 klp_init_thread_info(tp); 761 } 762} 763 | |
764void do_softirq_own_stack(void) 765{ | 697void do_softirq_own_stack(void) 698{ |
766 struct thread_info *curtp, *irqtp; | 699 struct thread_info *irqtp; |
767 | 700 |
768 curtp = current_thread_info(); | |
769 irqtp = softirq_ctx[smp_processor_id()]; | 701 irqtp = softirq_ctx[smp_processor_id()]; |
770 irqtp->task = curtp->task; 771 irqtp->flags = 0; | |
772 call_do_softirq(irqtp); | 702 call_do_softirq(irqtp); |
773 irqtp->task = NULL; 774 775 /* Set any flag that may have been set on the 776 * alternate stack 777 */ 778 if (irqtp->flags) 779 set_bits(irqtp->flags, &curtp->flags); | |
780} 781 782irq_hw_number_t virq_to_hw(unsigned int virq) 783{ 784 struct irq_data *irq_data = irq_get_irq_data(virq); 785 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; 786} 787EXPORT_SYMBOL_GPL(virq_to_hw); --- 46 unchanged lines hidden --- | 703} 704 705irq_hw_number_t virq_to_hw(unsigned int virq) 706{ 707 struct irq_data *irq_data = irq_get_irq_data(virq); 708 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; 709} 710EXPORT_SYMBOL_GPL(virq_to_hw); --- 46 unchanged lines hidden --- |