1 /* 2 * linux/kernel/softirq.c 3 * 4 * Copyright (C) 1992 Linus Torvalds 5 * 6 * Distribute under GPLv2. 7 * 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 9 * 10 * Remote softirq infrastructure is by Jens Axboe. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/interrupt.h> 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/notifier.h> 19 #include <linux/percpu.h> 20 #include <linux/cpu.h> 21 #include <linux/freezer.h> 22 #include <linux/kthread.h> 23 #include <linux/rcupdate.h> 24 #include <linux/ftrace.h> 25 #include <linux/smp.h> 26 #include <linux/tick.h> 27 28 #define CREATE_TRACE_POINTS 29 #include <trace/events/irq.h> 30 31 #include <asm/irq.h> 32 /* 33 - No shared variables, all the data are CPU local. 34 - If a softirq needs serialization, let it serialize itself 35 by its own spinlocks. 36 - Even if softirq is serialized, only local cpu is marked for 37 execution. Hence, we get something sort of weak cpu binding. 38 Though it is still not clear, will it result in better locality 39 or will not. 40 41 Examples: 42 - NET RX softirq. It is multithreaded and does not require 43 any global serialization. 44 - NET TX softirq. It kicks software netdevice queues, hence 45 it is logically serialized per device, but this serialization 46 is invisible to common code. 47 - Tasklets: serialized wrt itself. 48 */ 49 50 #ifndef __ARCH_IRQ_STAT 51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; 52 EXPORT_SYMBOL(irq_stat); 53 #endif 54 55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 56 57 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 58 59 char *softirq_to_name[NR_SOFTIRQS] = { 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", 61 "TASKLET", "SCHED", "HRTIMER", "RCU" 62 }; 63 64 /* 65 * we cannot loop indefinitely here to avoid userspace starvation, 66 * but we also don't want to introduce a worst case 1/HZ latency 67 * to the pending events, so lets the scheduler to balance 68 * the softirq load for us. 69 */ 70 void wakeup_softirqd(void) 71 { 72 /* Interrupts are disabled: no need to stop preemption */ 73 struct task_struct *tsk = __get_cpu_var(ksoftirqd); 74 75 if (tsk && tsk->state != TASK_RUNNING) 76 wake_up_process(tsk); 77 } 78 79 /* 80 * This one is for softirq.c-internal use, 81 * where hardirqs are disabled legitimately: 82 */ 83 #ifdef CONFIG_TRACE_IRQFLAGS 84 static void __local_bh_disable(unsigned long ip) 85 { 86 unsigned long flags; 87 88 WARN_ON_ONCE(in_irq()); 89 90 raw_local_irq_save(flags); 91 /* 92 * The preempt tracer hooks into add_preempt_count and will break 93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET 94 * is set and before current->softirq_enabled is cleared. 95 * We must manually increment preempt_count here and manually 96 * call the trace_preempt_off later. 97 */ 98 preempt_count() += SOFTIRQ_OFFSET; 99 /* 100 * Were softirqs turned off above: 101 */ 102 if (softirq_count() == SOFTIRQ_OFFSET) 103 trace_softirqs_off(ip); 104 raw_local_irq_restore(flags); 105 106 if (preempt_count() == SOFTIRQ_OFFSET) 107 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 108 } 109 #else /* !CONFIG_TRACE_IRQFLAGS */ 110 static inline void __local_bh_disable(unsigned long ip) 111 { 112 add_preempt_count(SOFTIRQ_OFFSET); 113 barrier(); 114 } 115 #endif /* CONFIG_TRACE_IRQFLAGS */ 116 117 void local_bh_disable(void) 118 { 119 __local_bh_disable((unsigned long)__builtin_return_address(0)); 120 } 121 122 EXPORT_SYMBOL(local_bh_disable); 123 124 /* 125 * Special-case - softirqs can safely be enabled in 126 * cond_resched_softirq(), or by __do_softirq(), 127 * without processing still-pending softirqs: 128 */ 129 void _local_bh_enable(void) 130 { 131 WARN_ON_ONCE(in_irq()); 132 WARN_ON_ONCE(!irqs_disabled()); 133 134 if (softirq_count() == SOFTIRQ_OFFSET) 135 trace_softirqs_on((unsigned long)__builtin_return_address(0)); 136 sub_preempt_count(SOFTIRQ_OFFSET); 137 } 138 139 EXPORT_SYMBOL(_local_bh_enable); 140 141 static inline void _local_bh_enable_ip(unsigned long ip) 142 { 143 WARN_ON_ONCE(in_irq() || irqs_disabled()); 144 #ifdef CONFIG_TRACE_IRQFLAGS 145 local_irq_disable(); 146 #endif 147 /* 148 * Are softirqs going to be turned on now: 149 */ 150 if (softirq_count() == SOFTIRQ_OFFSET) 151 trace_softirqs_on(ip); 152 /* 153 * Keep preemption disabled until we are done with 154 * softirq processing: 155 */ 156 sub_preempt_count(SOFTIRQ_OFFSET - 1); 157 158 if (unlikely(!in_interrupt() && local_softirq_pending())) 159 do_softirq(); 160 161 dec_preempt_count(); 162 #ifdef CONFIG_TRACE_IRQFLAGS 163 local_irq_enable(); 164 #endif 165 preempt_check_resched(); 166 } 167 168 void local_bh_enable(void) 169 { 170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 171 } 172 EXPORT_SYMBOL(local_bh_enable); 173 174 void local_bh_enable_ip(unsigned long ip) 175 { 176 _local_bh_enable_ip(ip); 177 } 178 EXPORT_SYMBOL(local_bh_enable_ip); 179 180 /* 181 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 182 * and we fall back to softirqd after that. 183 * 184 * This number has been established via experimentation. 185 * The two things to balance is latency against fairness - 186 * we want to handle softirqs as soon as possible, but they 187 * should not be able to lock up the box. 188 */ 189 #define MAX_SOFTIRQ_RESTART 10 190 191 asmlinkage void __do_softirq(void) 192 { 193 struct softirq_action *h; 194 __u32 pending; 195 int max_restart = MAX_SOFTIRQ_RESTART; 196 int cpu; 197 198 pending = local_softirq_pending(); 199 account_system_vtime(current); 200 201 __local_bh_disable((unsigned long)__builtin_return_address(0)); 202 lockdep_softirq_enter(); 203 204 cpu = smp_processor_id(); 205 restart: 206 /* Reset the pending bitmask before enabling irqs */ 207 set_softirq_pending(0); 208 209 local_irq_enable(); 210 211 h = softirq_vec; 212 213 do { 214 if (pending & 1) { 215 int prev_count = preempt_count(); 216 217 trace_softirq_entry(h, softirq_vec); 218 h->action(h); 219 trace_softirq_exit(h, softirq_vec); 220 if (unlikely(prev_count != preempt_count())) { 221 printk(KERN_ERR "huh, entered softirq %td %s %p" 222 "with preempt_count %08x," 223 " exited with %08x?\n", h - softirq_vec, 224 softirq_to_name[h - softirq_vec], 225 h->action, prev_count, preempt_count()); 226 preempt_count() = prev_count; 227 } 228 229 rcu_bh_qsctr_inc(cpu); 230 } 231 h++; 232 pending >>= 1; 233 } while (pending); 234 235 local_irq_disable(); 236 237 pending = local_softirq_pending(); 238 if (pending && --max_restart) 239 goto restart; 240 241 if (pending) 242 wakeup_softirqd(); 243 244 lockdep_softirq_exit(); 245 246 account_system_vtime(current); 247 _local_bh_enable(); 248 } 249 250 #ifndef __ARCH_HAS_DO_SOFTIRQ 251 252 asmlinkage void do_softirq(void) 253 { 254 __u32 pending; 255 unsigned long flags; 256 257 if (in_interrupt()) 258 return; 259 260 local_irq_save(flags); 261 262 pending = local_softirq_pending(); 263 264 if (pending) 265 __do_softirq(); 266 267 local_irq_restore(flags); 268 } 269 270 #endif 271 272 /* 273 * Enter an interrupt context. 274 */ 275 void irq_enter(void) 276 { 277 int cpu = smp_processor_id(); 278 279 rcu_irq_enter(); 280 if (idle_cpu(cpu) && !in_interrupt()) { 281 __irq_enter(); 282 tick_check_idle(cpu); 283 } else 284 __irq_enter(); 285 } 286 287 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 288 # define invoke_softirq() __do_softirq() 289 #else 290 # define invoke_softirq() do_softirq() 291 #endif 292 293 /* 294 * Exit an interrupt context. Process softirqs if needed and possible: 295 */ 296 void irq_exit(void) 297 { 298 account_system_vtime(current); 299 trace_hardirq_exit(); 300 sub_preempt_count(IRQ_EXIT_OFFSET); 301 if (!in_interrupt() && local_softirq_pending()) 302 invoke_softirq(); 303 304 #ifdef CONFIG_NO_HZ 305 /* Make sure that timer wheel updates are propagated */ 306 rcu_irq_exit(); 307 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 308 tick_nohz_stop_sched_tick(0); 309 #endif 310 preempt_enable_no_resched(); 311 } 312 313 /* 314 * This function must run with irqs disabled! 315 */ 316 inline void raise_softirq_irqoff(unsigned int nr) 317 { 318 __raise_softirq_irqoff(nr); 319 320 /* 321 * If we're in an interrupt or softirq, we're done 322 * (this also catches softirq-disabled code). We will 323 * actually run the softirq once we return from 324 * the irq or softirq. 325 * 326 * Otherwise we wake up ksoftirqd to make sure we 327 * schedule the softirq soon. 328 */ 329 if (!in_interrupt()) 330 wakeup_softirqd(); 331 } 332 333 void raise_softirq(unsigned int nr) 334 { 335 unsigned long flags; 336 337 local_irq_save(flags); 338 raise_softirq_irqoff(nr); 339 local_irq_restore(flags); 340 } 341 342 void open_softirq(int nr, void (*action)(struct softirq_action *)) 343 { 344 softirq_vec[nr].action = action; 345 } 346 347 /* Tasklets */ 348 struct tasklet_head 349 { 350 struct tasklet_struct *head; 351 struct tasklet_struct **tail; 352 }; 353 354 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 355 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 356 357 void __tasklet_schedule(struct tasklet_struct *t) 358 { 359 unsigned long flags; 360 361 local_irq_save(flags); 362 t->next = NULL; 363 *__get_cpu_var(tasklet_vec).tail = t; 364 __get_cpu_var(tasklet_vec).tail = &(t->next); 365 raise_softirq_irqoff(TASKLET_SOFTIRQ); 366 local_irq_restore(flags); 367 } 368 369 EXPORT_SYMBOL(__tasklet_schedule); 370 371 void __tasklet_hi_schedule(struct tasklet_struct *t) 372 { 373 unsigned long flags; 374 375 local_irq_save(flags); 376 t->next = NULL; 377 *__get_cpu_var(tasklet_hi_vec).tail = t; 378 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); 379 raise_softirq_irqoff(HI_SOFTIRQ); 380 local_irq_restore(flags); 381 } 382 383 EXPORT_SYMBOL(__tasklet_hi_schedule); 384 385 void __tasklet_hi_schedule_first(struct tasklet_struct *t) 386 { 387 BUG_ON(!irqs_disabled()); 388 389 t->next = __get_cpu_var(tasklet_hi_vec).head; 390 __get_cpu_var(tasklet_hi_vec).head = t; 391 __raise_softirq_irqoff(HI_SOFTIRQ); 392 } 393 394 EXPORT_SYMBOL(__tasklet_hi_schedule_first); 395 396 static void tasklet_action(struct softirq_action *a) 397 { 398 struct tasklet_struct *list; 399 400 local_irq_disable(); 401 list = __get_cpu_var(tasklet_vec).head; 402 __get_cpu_var(tasklet_vec).head = NULL; 403 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; 404 local_irq_enable(); 405 406 while (list) { 407 struct tasklet_struct *t = list; 408 409 list = list->next; 410 411 if (tasklet_trylock(t)) { 412 if (!atomic_read(&t->count)) { 413 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 414 BUG(); 415 t->func(t->data); 416 tasklet_unlock(t); 417 continue; 418 } 419 tasklet_unlock(t); 420 } 421 422 local_irq_disable(); 423 t->next = NULL; 424 *__get_cpu_var(tasklet_vec).tail = t; 425 __get_cpu_var(tasklet_vec).tail = &(t->next); 426 __raise_softirq_irqoff(TASKLET_SOFTIRQ); 427 local_irq_enable(); 428 } 429 } 430 431 static void tasklet_hi_action(struct softirq_action *a) 432 { 433 struct tasklet_struct *list; 434 435 local_irq_disable(); 436 list = __get_cpu_var(tasklet_hi_vec).head; 437 __get_cpu_var(tasklet_hi_vec).head = NULL; 438 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; 439 local_irq_enable(); 440 441 while (list) { 442 struct tasklet_struct *t = list; 443 444 list = list->next; 445 446 if (tasklet_trylock(t)) { 447 if (!atomic_read(&t->count)) { 448 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 449 BUG(); 450 t->func(t->data); 451 tasklet_unlock(t); 452 continue; 453 } 454 tasklet_unlock(t); 455 } 456 457 local_irq_disable(); 458 t->next = NULL; 459 *__get_cpu_var(tasklet_hi_vec).tail = t; 460 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); 461 __raise_softirq_irqoff(HI_SOFTIRQ); 462 local_irq_enable(); 463 } 464 } 465 466 467 void tasklet_init(struct tasklet_struct *t, 468 void (*func)(unsigned long), unsigned long data) 469 { 470 t->next = NULL; 471 t->state = 0; 472 atomic_set(&t->count, 0); 473 t->func = func; 474 t->data = data; 475 } 476 477 EXPORT_SYMBOL(tasklet_init); 478 479 void tasklet_kill(struct tasklet_struct *t) 480 { 481 if (in_interrupt()) 482 printk("Attempt to kill tasklet from interrupt\n"); 483 484 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 485 do { 486 yield(); 487 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 488 } 489 tasklet_unlock_wait(t); 490 clear_bit(TASKLET_STATE_SCHED, &t->state); 491 } 492 493 EXPORT_SYMBOL(tasklet_kill); 494 495 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 496 EXPORT_PER_CPU_SYMBOL(softirq_work_list); 497 498 static void __local_trigger(struct call_single_data *cp, int softirq) 499 { 500 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); 501 502 list_add_tail(&cp->list, head); 503 504 /* Trigger the softirq only if the list was previously empty. */ 505 if (head->next == &cp->list) 506 raise_softirq_irqoff(softirq); 507 } 508 509 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS 510 static void remote_softirq_receive(void *data) 511 { 512 struct call_single_data *cp = data; 513 unsigned long flags; 514 int softirq; 515 516 softirq = cp->priv; 517 518 local_irq_save(flags); 519 __local_trigger(cp, softirq); 520 local_irq_restore(flags); 521 } 522 523 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 524 { 525 if (cpu_online(cpu)) { 526 cp->func = remote_softirq_receive; 527 cp->info = cp; 528 cp->flags = 0; 529 cp->priv = softirq; 530 531 __smp_call_function_single(cpu, cp, 0); 532 return 0; 533 } 534 return 1; 535 } 536 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ 537 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 538 { 539 return 1; 540 } 541 #endif 542 543 /** 544 * __send_remote_softirq - try to schedule softirq work on a remote cpu 545 * @cp: private SMP call function data area 546 * @cpu: the remote cpu 547 * @this_cpu: the currently executing cpu 548 * @softirq: the softirq for the work 549 * 550 * Attempt to schedule softirq work on a remote cpu. If this cannot be 551 * done, the work is instead queued up on the local cpu. 552 * 553 * Interrupts must be disabled. 554 */ 555 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) 556 { 557 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) 558 __local_trigger(cp, softirq); 559 } 560 EXPORT_SYMBOL(__send_remote_softirq); 561 562 /** 563 * send_remote_softirq - try to schedule softirq work on a remote cpu 564 * @cp: private SMP call function data area 565 * @cpu: the remote cpu 566 * @softirq: the softirq for the work 567 * 568 * Like __send_remote_softirq except that disabling interrupts and 569 * computing the current cpu is done for the caller. 570 */ 571 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) 572 { 573 unsigned long flags; 574 int this_cpu; 575 576 local_irq_save(flags); 577 this_cpu = smp_processor_id(); 578 __send_remote_softirq(cp, cpu, this_cpu, softirq); 579 local_irq_restore(flags); 580 } 581 EXPORT_SYMBOL(send_remote_softirq); 582 583 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, 584 unsigned long action, void *hcpu) 585 { 586 /* 587 * If a CPU goes away, splice its entries to the current CPU 588 * and trigger a run of the softirq 589 */ 590 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 591 int cpu = (unsigned long) hcpu; 592 int i; 593 594 local_irq_disable(); 595 for (i = 0; i < NR_SOFTIRQS; i++) { 596 struct list_head *head = &per_cpu(softirq_work_list[i], cpu); 597 struct list_head *local_head; 598 599 if (list_empty(head)) 600 continue; 601 602 local_head = &__get_cpu_var(softirq_work_list[i]); 603 list_splice_init(head, local_head); 604 raise_softirq_irqoff(i); 605 } 606 local_irq_enable(); 607 } 608 609 return NOTIFY_OK; 610 } 611 612 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { 613 .notifier_call = remote_softirq_cpu_notify, 614 }; 615 616 void __init softirq_init(void) 617 { 618 int cpu; 619 620 for_each_possible_cpu(cpu) { 621 int i; 622 623 per_cpu(tasklet_vec, cpu).tail = 624 &per_cpu(tasklet_vec, cpu).head; 625 per_cpu(tasklet_hi_vec, cpu).tail = 626 &per_cpu(tasklet_hi_vec, cpu).head; 627 for (i = 0; i < NR_SOFTIRQS; i++) 628 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); 629 } 630 631 register_hotcpu_notifier(&remote_softirq_cpu_notifier); 632 633 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 634 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 635 } 636 637 static int ksoftirqd(void * __bind_cpu) 638 { 639 set_current_state(TASK_INTERRUPTIBLE); 640 641 while (!kthread_should_stop()) { 642 preempt_disable(); 643 if (!local_softirq_pending()) { 644 preempt_enable_no_resched(); 645 schedule(); 646 preempt_disable(); 647 } 648 649 __set_current_state(TASK_RUNNING); 650 651 while (local_softirq_pending()) { 652 /* Preempt disable stops cpu going offline. 653 If already offline, we'll be on wrong CPU: 654 don't process */ 655 if (cpu_is_offline((long)__bind_cpu)) 656 goto wait_to_die; 657 do_softirq(); 658 preempt_enable_no_resched(); 659 cond_resched(); 660 preempt_disable(); 661 rcu_qsctr_inc((long)__bind_cpu); 662 } 663 preempt_enable(); 664 set_current_state(TASK_INTERRUPTIBLE); 665 } 666 __set_current_state(TASK_RUNNING); 667 return 0; 668 669 wait_to_die: 670 preempt_enable(); 671 /* Wait for kthread_stop */ 672 set_current_state(TASK_INTERRUPTIBLE); 673 while (!kthread_should_stop()) { 674 schedule(); 675 set_current_state(TASK_INTERRUPTIBLE); 676 } 677 __set_current_state(TASK_RUNNING); 678 return 0; 679 } 680 681 #ifdef CONFIG_HOTPLUG_CPU 682 /* 683 * tasklet_kill_immediate is called to remove a tasklet which can already be 684 * scheduled for execution on @cpu. 685 * 686 * Unlike tasklet_kill, this function removes the tasklet 687 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. 688 * 689 * When this function is called, @cpu must be in the CPU_DEAD state. 690 */ 691 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) 692 { 693 struct tasklet_struct **i; 694 695 BUG_ON(cpu_online(cpu)); 696 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); 697 698 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) 699 return; 700 701 /* CPU is dead, so no lock needed. */ 702 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 703 if (*i == t) { 704 *i = t->next; 705 /* If this was the tail element, move the tail ptr */ 706 if (*i == NULL) 707 per_cpu(tasklet_vec, cpu).tail = i; 708 return; 709 } 710 } 711 BUG(); 712 } 713 714 static void takeover_tasklets(unsigned int cpu) 715 { 716 /* CPU is dead, so no lock needed. */ 717 local_irq_disable(); 718 719 /* Find end, append list for that CPU. */ 720 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 721 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; 722 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; 723 per_cpu(tasklet_vec, cpu).head = NULL; 724 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 725 } 726 raise_softirq_irqoff(TASKLET_SOFTIRQ); 727 728 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 729 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; 730 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; 731 per_cpu(tasklet_hi_vec, cpu).head = NULL; 732 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 733 } 734 raise_softirq_irqoff(HI_SOFTIRQ); 735 736 local_irq_enable(); 737 } 738 #endif /* CONFIG_HOTPLUG_CPU */ 739 740 static int __cpuinit cpu_callback(struct notifier_block *nfb, 741 unsigned long action, 742 void *hcpu) 743 { 744 int hotcpu = (unsigned long)hcpu; 745 struct task_struct *p; 746 747 switch (action) { 748 case CPU_UP_PREPARE: 749 case CPU_UP_PREPARE_FROZEN: 750 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 751 if (IS_ERR(p)) { 752 printk("ksoftirqd for %i failed\n", hotcpu); 753 return NOTIFY_BAD; 754 } 755 kthread_bind(p, hotcpu); 756 per_cpu(ksoftirqd, hotcpu) = p; 757 break; 758 case CPU_ONLINE: 759 case CPU_ONLINE_FROZEN: 760 wake_up_process(per_cpu(ksoftirqd, hotcpu)); 761 break; 762 #ifdef CONFIG_HOTPLUG_CPU 763 case CPU_UP_CANCELED: 764 case CPU_UP_CANCELED_FROZEN: 765 if (!per_cpu(ksoftirqd, hotcpu)) 766 break; 767 /* Unbind so it can run. Fall thru. */ 768 kthread_bind(per_cpu(ksoftirqd, hotcpu), 769 cpumask_any(cpu_online_mask)); 770 case CPU_DEAD: 771 case CPU_DEAD_FROZEN: { 772 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 773 774 p = per_cpu(ksoftirqd, hotcpu); 775 per_cpu(ksoftirqd, hotcpu) = NULL; 776 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); 777 kthread_stop(p); 778 takeover_tasklets(hotcpu); 779 break; 780 } 781 #endif /* CONFIG_HOTPLUG_CPU */ 782 } 783 return NOTIFY_OK; 784 } 785 786 static struct notifier_block __cpuinitdata cpu_nfb = { 787 .notifier_call = cpu_callback 788 }; 789 790 static __init int spawn_ksoftirqd(void) 791 { 792 void *cpu = (void *)(long)smp_processor_id(); 793 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 794 795 BUG_ON(err == NOTIFY_BAD); 796 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 797 register_cpu_notifier(&cpu_nfb); 798 return 0; 799 } 800 early_initcall(spawn_ksoftirqd); 801 802 #ifdef CONFIG_SMP 803 /* 804 * Call a function on all processors 805 */ 806 int on_each_cpu(void (*func) (void *info), void *info, int wait) 807 { 808 int ret = 0; 809 810 preempt_disable(); 811 ret = smp_call_function(func, info, wait); 812 local_irq_disable(); 813 func(info); 814 local_irq_enable(); 815 preempt_enable(); 816 return ret; 817 } 818 EXPORT_SYMBOL(on_each_cpu); 819 #endif 820 821 /* 822 * [ These __weak aliases are kept in a separate compilation unit, so that 823 * GCC does not inline them incorrectly. ] 824 */ 825 826 int __init __weak early_irq_init(void) 827 { 828 return 0; 829 } 830 831 int __init __weak arch_probe_nr_irqs(void) 832 { 833 return 0; 834 } 835 836 int __init __weak arch_early_irq_init(void) 837 { 838 return 0; 839 } 840 841 int __weak arch_init_chip_data(struct irq_desc *desc, int node) 842 { 843 return 0; 844 } 845