1 /* 2 * Xen SMP support 3 * 4 * This file implements the Xen versions of smp_ops. SMP under Xen is 5 * very straightforward. Bringing a CPU up is simply a matter of 6 * loading its initial context and setting it running. 7 * 8 * IPIs are handled through the Xen event mechanism. 9 * 10 * Because virtual CPUs can be scheduled onto any real CPU, there's no 11 * useful topology information for the kernel to make use of. As a 12 * result, all CPUs are treated as if they're single-core and 13 * single-threaded. 14 */ 15 #include <linux/sched.h> 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/smp.h> 19 #include <linux/irq_work.h> 20 #include <linux/tick.h> 21 22 #include <asm/paravirt.h> 23 #include <asm/desc.h> 24 #include <asm/pgtable.h> 25 #include <asm/cpu.h> 26 27 #include <xen/interface/xen.h> 28 #include <xen/interface/vcpu.h> 29 30 #include <asm/xen/interface.h> 31 #include <asm/xen/hypercall.h> 32 33 #include <xen/xen.h> 34 #include <xen/page.h> 35 #include <xen/events.h> 36 37 #include <xen/hvc-console.h> 38 #include "xen-ops.h" 39 #include "mmu.h" 40 41 cpumask_var_t xen_cpu_initialized_map; 42 43 static DEFINE_PER_CPU(int, xen_resched_irq); 44 static DEFINE_PER_CPU(int, xen_callfunc_irq); 45 static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 46 static DEFINE_PER_CPU(int, xen_irq_work); 47 static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 48 49 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 50 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 51 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); 52 53 /* 54 * Reschedule call back. 55 */ 56 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) 57 { 58 inc_irq_stat(irq_resched_count); 59 scheduler_ipi(); 60 61 return IRQ_HANDLED; 62 } 63 64 static void __cpuinit cpu_bringup(void) 65 { 66 int cpu; 67 68 cpu_init(); 69 touch_softlockup_watchdog(); 70 preempt_disable(); 71 72 xen_enable_sysenter(); 73 xen_enable_syscall(); 74 75 cpu = smp_processor_id(); 76 smp_store_cpu_info(cpu); 77 cpu_data(cpu).x86_max_cores = 1; 78 set_cpu_sibling_map(cpu); 79 80 xen_setup_cpu_clockevents(); 81 82 notify_cpu_starting(cpu); 83 84 set_cpu_online(cpu, true); 85 86 this_cpu_write(cpu_state, CPU_ONLINE); 87 88 wmb(); 89 90 /* We can take interrupts now: we're officially "up". */ 91 local_irq_enable(); 92 93 wmb(); /* make sure everything is out */ 94 } 95 96 static void __cpuinit cpu_bringup_and_idle(void) 97 { 98 cpu_bringup(); 99 cpu_startup_entry(CPUHP_ONLINE); 100 } 101 102 static int xen_smp_intr_init(unsigned int cpu) 103 { 104 int rc; 105 const char *resched_name, *callfunc_name, *debug_name; 106 107 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); 108 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, 109 cpu, 110 xen_reschedule_interrupt, 111 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 112 resched_name, 113 NULL); 114 if (rc < 0) 115 goto fail; 116 per_cpu(xen_resched_irq, cpu) = rc; 117 118 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 119 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 120 cpu, 121 xen_call_function_interrupt, 122 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 123 callfunc_name, 124 NULL); 125 if (rc < 0) 126 goto fail; 127 per_cpu(xen_callfunc_irq, cpu) = rc; 128 129 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 130 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 131 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, 132 debug_name, NULL); 133 if (rc < 0) 134 goto fail; 135 per_cpu(xen_debug_irq, cpu) = rc; 136 137 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 138 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 139 cpu, 140 xen_call_function_single_interrupt, 141 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 142 callfunc_name, 143 NULL); 144 if (rc < 0) 145 goto fail; 146 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 147 148 /* 149 * The IRQ worker on PVHVM goes through the native path and uses the 150 * IPI mechanism. 151 */ 152 if (xen_hvm_domain()) 153 return 0; 154 155 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); 156 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 157 cpu, 158 xen_irq_work_interrupt, 159 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 160 callfunc_name, 161 NULL); 162 if (rc < 0) 163 goto fail; 164 per_cpu(xen_irq_work, cpu) = rc; 165 166 return 0; 167 168 fail: 169 if (per_cpu(xen_resched_irq, cpu) >= 0) 170 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 171 if (per_cpu(xen_callfunc_irq, cpu) >= 0) 172 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 173 if (per_cpu(xen_debug_irq, cpu) >= 0) 174 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 175 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 176 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 177 NULL); 178 if (xen_hvm_domain()) 179 return rc; 180 181 if (per_cpu(xen_irq_work, cpu) >= 0) 182 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 183 184 return rc; 185 } 186 187 static void __init xen_fill_possible_map(void) 188 { 189 int i, rc; 190 191 if (xen_initial_domain()) 192 return; 193 194 for (i = 0; i < nr_cpu_ids; i++) { 195 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 196 if (rc >= 0) { 197 num_processors++; 198 set_cpu_possible(i, true); 199 } 200 } 201 } 202 203 static void __init xen_filter_cpu_maps(void) 204 { 205 int i, rc; 206 unsigned int subtract = 0; 207 208 if (!xen_initial_domain()) 209 return; 210 211 num_processors = 0; 212 disabled_cpus = 0; 213 for (i = 0; i < nr_cpu_ids; i++) { 214 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); 215 if (rc >= 0) { 216 num_processors++; 217 set_cpu_possible(i, true); 218 } else { 219 set_cpu_possible(i, false); 220 set_cpu_present(i, false); 221 subtract++; 222 } 223 } 224 #ifdef CONFIG_HOTPLUG_CPU 225 /* This is akin to using 'nr_cpus' on the Linux command line. 226 * Which is OK as when we use 'dom0_max_vcpus=X' we can only 227 * have up to X, while nr_cpu_ids is greater than X. This 228 * normally is not a problem, except when CPU hotplugging 229 * is involved and then there might be more than X CPUs 230 * in the guest - which will not work as there is no 231 * hypercall to expand the max number of VCPUs an already 232 * running guest has. So cap it up to X. */ 233 if (subtract) 234 nr_cpu_ids = nr_cpu_ids - subtract; 235 #endif 236 237 } 238 239 static void __init xen_smp_prepare_boot_cpu(void) 240 { 241 BUG_ON(smp_processor_id() != 0); 242 native_smp_prepare_boot_cpu(); 243 244 /* We've switched to the "real" per-cpu gdt, so make sure the 245 old memory can be recycled */ 246 make_lowmem_page_readwrite(xen_initial_gdt); 247 248 xen_filter_cpu_maps(); 249 xen_setup_vcpu_info_placement(); 250 } 251 252 static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 253 { 254 unsigned cpu; 255 unsigned int i; 256 257 if (skip_ioapic_setup) { 258 char *m = (max_cpus == 0) ? 259 "The nosmp parameter is incompatible with Xen; " \ 260 "use Xen dom0_max_vcpus=1 parameter" : 261 "The noapic parameter is incompatible with Xen"; 262 263 xen_raw_printk(m); 264 panic(m); 265 } 266 xen_init_lock_cpu(0); 267 268 smp_store_boot_cpu_info(); 269 cpu_data(0).x86_max_cores = 1; 270 271 for_each_possible_cpu(i) { 272 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 273 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 274 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 275 } 276 set_cpu_sibling_map(0); 277 278 if (xen_smp_intr_init(0)) 279 BUG(); 280 281 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) 282 panic("could not allocate xen_cpu_initialized_map\n"); 283 284 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); 285 286 /* Restrict the possible_map according to max_cpus. */ 287 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { 288 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) 289 continue; 290 set_cpu_possible(cpu, false); 291 } 292 293 for_each_possible_cpu(cpu) 294 set_cpu_present(cpu, true); 295 } 296 297 static int __cpuinit 298 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 299 { 300 struct vcpu_guest_context *ctxt; 301 struct desc_struct *gdt; 302 unsigned long gdt_mfn; 303 304 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) 305 return 0; 306 307 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 308 if (ctxt == NULL) 309 return -ENOMEM; 310 311 gdt = get_cpu_gdt_table(cpu); 312 313 ctxt->flags = VGCF_IN_KERNEL; 314 ctxt->user_regs.ss = __KERNEL_DS; 315 #ifdef CONFIG_X86_32 316 ctxt->user_regs.fs = __KERNEL_PERCPU; 317 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; 318 #else 319 ctxt->gs_base_kernel = per_cpu_offset(cpu); 320 #endif 321 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; 322 323 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 324 325 { 326 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ 327 ctxt->user_regs.ds = __USER_DS; 328 ctxt->user_regs.es = __USER_DS; 329 330 xen_copy_trap_info(ctxt->trap_ctxt); 331 332 ctxt->ldt_ents = 0; 333 334 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 335 336 gdt_mfn = arbitrary_virt_to_mfn(gdt); 337 make_lowmem_page_readonly(gdt); 338 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); 339 340 ctxt->gdt_frames[0] = gdt_mfn; 341 ctxt->gdt_ents = GDT_ENTRIES; 342 343 ctxt->kernel_ss = __KERNEL_DS; 344 ctxt->kernel_sp = idle->thread.sp0; 345 346 #ifdef CONFIG_X86_32 347 ctxt->event_callback_cs = __KERNEL_CS; 348 ctxt->failsafe_callback_cs = __KERNEL_CS; 349 #endif 350 ctxt->event_callback_eip = 351 (unsigned long)xen_hypervisor_callback; 352 ctxt->failsafe_callback_eip = 353 (unsigned long)xen_failsafe_callback; 354 } 355 ctxt->user_regs.cs = __KERNEL_CS; 356 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 357 358 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); 359 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); 360 361 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) 362 BUG(); 363 364 kfree(ctxt); 365 return 0; 366 } 367 368 static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 369 { 370 int rc; 371 372 per_cpu(current_task, cpu) = idle; 373 #ifdef CONFIG_X86_32 374 irq_ctx_init(cpu); 375 #else 376 clear_tsk_thread_flag(idle, TIF_FORK); 377 per_cpu(kernel_stack, cpu) = 378 (unsigned long)task_stack_page(idle) - 379 KERNEL_STACK_OFFSET + THREAD_SIZE; 380 #endif 381 xen_setup_runstate_info(cpu); 382 xen_setup_timer(cpu); 383 xen_init_lock_cpu(cpu); 384 385 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 386 387 /* make sure interrupts start blocked */ 388 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 389 390 rc = cpu_initialize_context(cpu, idle); 391 if (rc) 392 return rc; 393 394 if (num_online_cpus() == 1) 395 /* Just in case we booted with a single CPU. */ 396 alternatives_enable_smp(); 397 398 rc = xen_smp_intr_init(cpu); 399 if (rc) 400 return rc; 401 402 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 403 BUG_ON(rc); 404 405 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 406 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 407 barrier(); 408 } 409 410 return 0; 411 } 412 413 static void xen_smp_cpus_done(unsigned int max_cpus) 414 { 415 } 416 417 #ifdef CONFIG_HOTPLUG_CPU 418 static int xen_cpu_disable(void) 419 { 420 unsigned int cpu = smp_processor_id(); 421 if (cpu == 0) 422 return -EBUSY; 423 424 cpu_disable_common(); 425 426 load_cr3(swapper_pg_dir); 427 return 0; 428 } 429 430 static void xen_cpu_die(unsigned int cpu) 431 { 432 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 433 current->state = TASK_UNINTERRUPTIBLE; 434 schedule_timeout(HZ/10); 435 } 436 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 437 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 438 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 439 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 440 if (!xen_hvm_domain()) 441 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 442 xen_uninit_lock_cpu(cpu); 443 xen_teardown_timer(cpu); 444 } 445 446 static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 447 { 448 play_dead_common(); 449 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 450 cpu_bringup(); 451 /* 452 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) 453 * clears certain data that the cpu_idle loop (which called us 454 * and that we return from) expects. The only way to get that 455 * data back is to call: 456 */ 457 tick_nohz_idle_enter(); 458 } 459 460 #else /* !CONFIG_HOTPLUG_CPU */ 461 static int xen_cpu_disable(void) 462 { 463 return -ENOSYS; 464 } 465 466 static void xen_cpu_die(unsigned int cpu) 467 { 468 BUG(); 469 } 470 471 static void xen_play_dead(void) 472 { 473 BUG(); 474 } 475 476 #endif 477 static void stop_self(void *v) 478 { 479 int cpu = smp_processor_id(); 480 481 /* make sure we're not pinning something down */ 482 load_cr3(swapper_pg_dir); 483 /* should set up a minimal gdt */ 484 485 set_cpu_online(cpu, false); 486 487 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); 488 BUG(); 489 } 490 491 static void xen_stop_other_cpus(int wait) 492 { 493 smp_call_function(stop_self, NULL, wait); 494 } 495 496 static void xen_smp_send_reschedule(int cpu) 497 { 498 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 499 } 500 501 static void __xen_send_IPI_mask(const struct cpumask *mask, 502 int vector) 503 { 504 unsigned cpu; 505 506 for_each_cpu_and(cpu, mask, cpu_online_mask) 507 xen_send_IPI_one(cpu, vector); 508 } 509 510 static void xen_smp_send_call_function_ipi(const struct cpumask *mask) 511 { 512 int cpu; 513 514 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 515 516 /* Make sure other vcpus get a chance to run if they need to. */ 517 for_each_cpu(cpu, mask) { 518 if (xen_vcpu_stolen(cpu)) { 519 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 520 break; 521 } 522 } 523 } 524 525 static void xen_smp_send_call_function_single_ipi(int cpu) 526 { 527 __xen_send_IPI_mask(cpumask_of(cpu), 528 XEN_CALL_FUNCTION_SINGLE_VECTOR); 529 } 530 531 static inline int xen_map_vector(int vector) 532 { 533 int xen_vector; 534 535 switch (vector) { 536 case RESCHEDULE_VECTOR: 537 xen_vector = XEN_RESCHEDULE_VECTOR; 538 break; 539 case CALL_FUNCTION_VECTOR: 540 xen_vector = XEN_CALL_FUNCTION_VECTOR; 541 break; 542 case CALL_FUNCTION_SINGLE_VECTOR: 543 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; 544 break; 545 case IRQ_WORK_VECTOR: 546 xen_vector = XEN_IRQ_WORK_VECTOR; 547 break; 548 default: 549 xen_vector = -1; 550 printk(KERN_ERR "xen: vector 0x%x is not implemented\n", 551 vector); 552 } 553 554 return xen_vector; 555 } 556 557 void xen_send_IPI_mask(const struct cpumask *mask, 558 int vector) 559 { 560 int xen_vector = xen_map_vector(vector); 561 562 if (xen_vector >= 0) 563 __xen_send_IPI_mask(mask, xen_vector); 564 } 565 566 void xen_send_IPI_all(int vector) 567 { 568 int xen_vector = xen_map_vector(vector); 569 570 if (xen_vector >= 0) 571 __xen_send_IPI_mask(cpu_online_mask, xen_vector); 572 } 573 574 void xen_send_IPI_self(int vector) 575 { 576 int xen_vector = xen_map_vector(vector); 577 578 if (xen_vector >= 0) 579 xen_send_IPI_one(smp_processor_id(), xen_vector); 580 } 581 582 void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 583 int vector) 584 { 585 unsigned cpu; 586 unsigned int this_cpu = smp_processor_id(); 587 int xen_vector = xen_map_vector(vector); 588 589 if (!(num_online_cpus() > 1) || (xen_vector < 0)) 590 return; 591 592 for_each_cpu_and(cpu, mask, cpu_online_mask) { 593 if (this_cpu == cpu) 594 continue; 595 596 xen_send_IPI_one(cpu, xen_vector); 597 } 598 } 599 600 void xen_send_IPI_allbutself(int vector) 601 { 602 xen_send_IPI_mask_allbutself(cpu_online_mask, vector); 603 } 604 605 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 606 { 607 irq_enter(); 608 generic_smp_call_function_interrupt(); 609 inc_irq_stat(irq_call_count); 610 irq_exit(); 611 612 return IRQ_HANDLED; 613 } 614 615 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) 616 { 617 irq_enter(); 618 generic_smp_call_function_single_interrupt(); 619 inc_irq_stat(irq_call_count); 620 irq_exit(); 621 622 return IRQ_HANDLED; 623 } 624 625 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) 626 { 627 irq_enter(); 628 irq_work_run(); 629 inc_irq_stat(apic_irq_work_irqs); 630 irq_exit(); 631 632 return IRQ_HANDLED; 633 } 634 635 static const struct smp_ops xen_smp_ops __initconst = { 636 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 637 .smp_prepare_cpus = xen_smp_prepare_cpus, 638 .smp_cpus_done = xen_smp_cpus_done, 639 640 .cpu_up = xen_cpu_up, 641 .cpu_die = xen_cpu_die, 642 .cpu_disable = xen_cpu_disable, 643 .play_dead = xen_play_dead, 644 645 .stop_other_cpus = xen_stop_other_cpus, 646 .smp_send_reschedule = xen_smp_send_reschedule, 647 648 .send_call_func_ipi = xen_smp_send_call_function_ipi, 649 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, 650 }; 651 652 void __init xen_smp_init(void) 653 { 654 smp_ops = xen_smp_ops; 655 xen_fill_possible_map(); 656 xen_init_spinlocks(); 657 } 658 659 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) 660 { 661 native_smp_prepare_cpus(max_cpus); 662 WARN_ON(xen_smp_intr_init(0)); 663 664 xen_init_lock_cpu(0); 665 } 666 667 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 668 { 669 int rc; 670 rc = native_cpu_up(cpu, tidle); 671 WARN_ON (xen_smp_intr_init(cpu)); 672 return rc; 673 } 674 675 static void xen_hvm_cpu_die(unsigned int cpu) 676 { 677 xen_cpu_die(cpu); 678 native_cpu_die(cpu); 679 } 680 681 void __init xen_hvm_smp_init(void) 682 { 683 if (!xen_have_vector_callback) 684 return; 685 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 686 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 687 smp_ops.cpu_up = xen_hvm_cpu_up; 688 smp_ops.cpu_die = xen_hvm_cpu_die; 689 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 690 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 691 } 692