1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/kthread.h> 15 #include <linux/stop_machine.h> 16 #include <linux/mutex.h> 17 #include <linux/gfp.h> 18 19 #ifdef CONFIG_SMP 20 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 21 static DEFINE_MUTEX(cpu_add_remove_lock); 22 23 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 24 25 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 26 * Should always be manipulated under cpu_add_remove_lock 27 */ 28 static int cpu_hotplug_disabled; 29 30 static struct { 31 struct task_struct *active_writer; 32 struct mutex lock; /* Synchronizes accesses to refcount, */ 33 /* 34 * Also blocks the new readers during 35 * an ongoing cpu hotplug operation. 36 */ 37 int refcount; 38 } cpu_hotplug = { 39 .active_writer = NULL, 40 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 41 .refcount = 0, 42 }; 43 44 #ifdef CONFIG_HOTPLUG_CPU 45 46 void get_online_cpus(void) 47 { 48 might_sleep(); 49 if (cpu_hotplug.active_writer == current) 50 return; 51 mutex_lock(&cpu_hotplug.lock); 52 cpu_hotplug.refcount++; 53 mutex_unlock(&cpu_hotplug.lock); 54 55 } 56 EXPORT_SYMBOL_GPL(get_online_cpus); 57 58 void put_online_cpus(void) 59 { 60 if (cpu_hotplug.active_writer == current) 61 return; 62 mutex_lock(&cpu_hotplug.lock); 63 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 64 wake_up_process(cpu_hotplug.active_writer); 65 mutex_unlock(&cpu_hotplug.lock); 66 67 } 68 EXPORT_SYMBOL_GPL(put_online_cpus); 69 70 #endif /* CONFIG_HOTPLUG_CPU */ 71 72 /* 73 * The following two API's must be used when attempting 74 * to serialize the updates to cpu_online_mask, cpu_present_mask. 75 */ 76 void cpu_maps_update_begin(void) 77 { 78 mutex_lock(&cpu_add_remove_lock); 79 } 80 81 void cpu_maps_update_done(void) 82 { 83 mutex_unlock(&cpu_add_remove_lock); 84 } 85 86 /* 87 * This ensures that the hotplug operation can begin only when the 88 * refcount goes to zero. 89 * 90 * Note that during a cpu-hotplug operation, the new readers, if any, 91 * will be blocked by the cpu_hotplug.lock 92 * 93 * Since cpu_hotplug_begin() is always called after invoking 94 * cpu_maps_update_begin(), we can be sure that only one writer is active. 95 * 96 * Note that theoretically, there is a possibility of a livelock: 97 * - Refcount goes to zero, last reader wakes up the sleeping 98 * writer. 99 * - Last reader unlocks the cpu_hotplug.lock. 100 * - A new reader arrives at this moment, bumps up the refcount. 101 * - The writer acquires the cpu_hotplug.lock finds the refcount 102 * non zero and goes to sleep again. 103 * 104 * However, this is very difficult to achieve in practice since 105 * get_online_cpus() not an api which is called all that often. 106 * 107 */ 108 static void cpu_hotplug_begin(void) 109 { 110 cpu_hotplug.active_writer = current; 111 112 for (;;) { 113 mutex_lock(&cpu_hotplug.lock); 114 if (likely(!cpu_hotplug.refcount)) 115 break; 116 __set_current_state(TASK_UNINTERRUPTIBLE); 117 mutex_unlock(&cpu_hotplug.lock); 118 schedule(); 119 } 120 } 121 122 static void cpu_hotplug_done(void) 123 { 124 cpu_hotplug.active_writer = NULL; 125 mutex_unlock(&cpu_hotplug.lock); 126 } 127 /* Need to know about CPUs going up/down? */ 128 int __ref register_cpu_notifier(struct notifier_block *nb) 129 { 130 int ret; 131 cpu_maps_update_begin(); 132 ret = raw_notifier_chain_register(&cpu_chain, nb); 133 cpu_maps_update_done(); 134 return ret; 135 } 136 137 #ifdef CONFIG_HOTPLUG_CPU 138 139 EXPORT_SYMBOL(register_cpu_notifier); 140 141 void __ref unregister_cpu_notifier(struct notifier_block *nb) 142 { 143 cpu_maps_update_begin(); 144 raw_notifier_chain_unregister(&cpu_chain, nb); 145 cpu_maps_update_done(); 146 } 147 EXPORT_SYMBOL(unregister_cpu_notifier); 148 149 static inline void check_for_tasks(int cpu) 150 { 151 struct task_struct *p; 152 153 write_lock_irq(&tasklist_lock); 154 for_each_process(p) { 155 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 156 (!cputime_eq(p->utime, cputime_zero) || 157 !cputime_eq(p->stime, cputime_zero))) 158 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 159 "(state = %ld, flags = %x)\n", 160 p->comm, task_pid_nr(p), cpu, 161 p->state, p->flags); 162 } 163 write_unlock_irq(&tasklist_lock); 164 } 165 166 struct take_cpu_down_param { 167 struct task_struct *caller; 168 unsigned long mod; 169 void *hcpu; 170 }; 171 172 /* Take this CPU down. */ 173 static int __ref take_cpu_down(void *_param) 174 { 175 struct take_cpu_down_param *param = _param; 176 unsigned int cpu = (unsigned long)param->hcpu; 177 int err; 178 179 /* Ensure this CPU doesn't handle any more interrupts. */ 180 err = __cpu_disable(); 181 if (err < 0) 182 return err; 183 184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 185 param->hcpu); 186 187 if (task_cpu(param->caller) == cpu) 188 move_task_off_dead_cpu(cpu, param->caller); 189 /* Force idle task to run as soon as we yield: it should 190 immediately notice cpu is offline and die quickly. */ 191 sched_idle_next(); 192 return 0; 193 } 194 195 /* Requires cpu_add_remove_lock to be held */ 196 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 197 { 198 int err, nr_calls = 0; 199 void *hcpu = (void *)(long)cpu; 200 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 201 struct take_cpu_down_param tcd_param = { 202 .caller = current, 203 .mod = mod, 204 .hcpu = hcpu, 205 }; 206 207 if (num_online_cpus() == 1) 208 return -EBUSY; 209 210 if (!cpu_online(cpu)) 211 return -EINVAL; 212 213 cpu_hotplug_begin(); 214 set_cpu_active(cpu, false); 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 216 hcpu, -1, &nr_calls); 217 if (err == NOTIFY_BAD) { 218 set_cpu_active(cpu, true); 219 220 nr_calls--; 221 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 222 hcpu, nr_calls, NULL); 223 printk("%s: attempt to take down CPU %u failed\n", 224 __func__, cpu); 225 err = -EINVAL; 226 goto out_release; 227 } 228 229 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 230 if (err) { 231 set_cpu_active(cpu, true); 232 /* CPU didn't die: tell everyone. Can't complain. */ 233 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 234 hcpu) == NOTIFY_BAD) 235 BUG(); 236 237 goto out_release; 238 } 239 BUG_ON(cpu_online(cpu)); 240 241 /* Wait for it to sleep (leaving idle task). */ 242 while (!idle_cpu(cpu)) 243 yield(); 244 245 /* This actually kills the CPU. */ 246 __cpu_die(cpu); 247 248 /* CPU is completely dead: tell everyone. Too late to complain. */ 249 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 250 hcpu) == NOTIFY_BAD) 251 BUG(); 252 253 check_for_tasks(cpu); 254 255 out_release: 256 cpu_hotplug_done(); 257 if (!err) { 258 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 259 hcpu) == NOTIFY_BAD) 260 BUG(); 261 } 262 return err; 263 } 264 265 int __ref cpu_down(unsigned int cpu) 266 { 267 int err; 268 269 cpu_maps_update_begin(); 270 271 if (cpu_hotplug_disabled) { 272 err = -EBUSY; 273 goto out; 274 } 275 276 err = _cpu_down(cpu, 0); 277 278 out: 279 cpu_maps_update_done(); 280 return err; 281 } 282 EXPORT_SYMBOL(cpu_down); 283 #endif /*CONFIG_HOTPLUG_CPU*/ 284 285 /* Requires cpu_add_remove_lock to be held */ 286 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 287 { 288 int ret, nr_calls = 0; 289 void *hcpu = (void *)(long)cpu; 290 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 291 292 if (cpu_online(cpu) || !cpu_present(cpu)) 293 return -EINVAL; 294 295 cpu_hotplug_begin(); 296 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 297 -1, &nr_calls); 298 if (ret == NOTIFY_BAD) { 299 nr_calls--; 300 printk("%s: attempt to bring up CPU %u failed\n", 301 __func__, cpu); 302 ret = -EINVAL; 303 goto out_notify; 304 } 305 306 /* Arch-specific enabling code. */ 307 ret = __cpu_up(cpu); 308 if (ret != 0) 309 goto out_notify; 310 BUG_ON(!cpu_online(cpu)); 311 312 set_cpu_active(cpu, true); 313 314 /* Now call notifier in preparation. */ 315 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 316 317 out_notify: 318 if (ret != 0) 319 __raw_notifier_call_chain(&cpu_chain, 320 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 321 cpu_hotplug_done(); 322 323 return ret; 324 } 325 326 int __cpuinit cpu_up(unsigned int cpu) 327 { 328 int err = 0; 329 330 #ifdef CONFIG_MEMORY_HOTPLUG 331 int nid; 332 pg_data_t *pgdat; 333 #endif 334 335 if (!cpu_possible(cpu)) { 336 printk(KERN_ERR "can't online cpu %d because it is not " 337 "configured as may-hotadd at boot time\n", cpu); 338 #if defined(CONFIG_IA64) 339 printk(KERN_ERR "please check additional_cpus= boot " 340 "parameter\n"); 341 #endif 342 return -EINVAL; 343 } 344 345 #ifdef CONFIG_MEMORY_HOTPLUG 346 nid = cpu_to_node(cpu); 347 if (!node_online(nid)) { 348 err = mem_online_node(nid); 349 if (err) 350 return err; 351 } 352 353 pgdat = NODE_DATA(nid); 354 if (!pgdat) { 355 printk(KERN_ERR 356 "Can't online cpu %d due to NULL pgdat\n", cpu); 357 return -ENOMEM; 358 } 359 360 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 361 mutex_lock(&zonelists_mutex); 362 build_all_zonelists(NULL); 363 mutex_unlock(&zonelists_mutex); 364 } 365 #endif 366 367 cpu_maps_update_begin(); 368 369 if (cpu_hotplug_disabled) { 370 err = -EBUSY; 371 goto out; 372 } 373 374 err = _cpu_up(cpu, 0); 375 376 out: 377 cpu_maps_update_done(); 378 return err; 379 } 380 381 #ifdef CONFIG_PM_SLEEP_SMP 382 static cpumask_var_t frozen_cpus; 383 384 int disable_nonboot_cpus(void) 385 { 386 int cpu, first_cpu, error; 387 388 cpu_maps_update_begin(); 389 first_cpu = cpumask_first(cpu_online_mask); 390 /* 391 * We take down all of the non-boot CPUs in one shot to avoid races 392 * with the userspace trying to use the CPU hotplug at the same time 393 */ 394 cpumask_clear(frozen_cpus); 395 396 printk("Disabling non-boot CPUs ...\n"); 397 for_each_online_cpu(cpu) { 398 if (cpu == first_cpu) 399 continue; 400 error = _cpu_down(cpu, 1); 401 if (!error) 402 cpumask_set_cpu(cpu, frozen_cpus); 403 else { 404 printk(KERN_ERR "Error taking CPU%d down: %d\n", 405 cpu, error); 406 break; 407 } 408 } 409 410 if (!error) { 411 BUG_ON(num_online_cpus() > 1); 412 /* Make sure the CPUs won't be enabled by someone else */ 413 cpu_hotplug_disabled = 1; 414 } else { 415 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 416 } 417 cpu_maps_update_done(); 418 return error; 419 } 420 421 void __weak arch_enable_nonboot_cpus_begin(void) 422 { 423 } 424 425 void __weak arch_enable_nonboot_cpus_end(void) 426 { 427 } 428 429 void __ref enable_nonboot_cpus(void) 430 { 431 int cpu, error; 432 433 /* Allow everyone to use the CPU hotplug again */ 434 cpu_maps_update_begin(); 435 cpu_hotplug_disabled = 0; 436 if (cpumask_empty(frozen_cpus)) 437 goto out; 438 439 printk("Enabling non-boot CPUs ...\n"); 440 441 arch_enable_nonboot_cpus_begin(); 442 443 for_each_cpu(cpu, frozen_cpus) { 444 error = _cpu_up(cpu, 1); 445 if (!error) { 446 printk("CPU%d is up\n", cpu); 447 continue; 448 } 449 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 450 } 451 452 arch_enable_nonboot_cpus_end(); 453 454 cpumask_clear(frozen_cpus); 455 out: 456 cpu_maps_update_done(); 457 } 458 459 static int alloc_frozen_cpus(void) 460 { 461 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 462 return -ENOMEM; 463 return 0; 464 } 465 core_initcall(alloc_frozen_cpus); 466 #endif /* CONFIG_PM_SLEEP_SMP */ 467 468 /** 469 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 470 * @cpu: cpu that just started 471 * 472 * This function calls the cpu_chain notifiers with CPU_STARTING. 473 * It must be called by the arch code on the new cpu, before the new cpu 474 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 475 */ 476 void __cpuinit notify_cpu_starting(unsigned int cpu) 477 { 478 unsigned long val = CPU_STARTING; 479 480 #ifdef CONFIG_PM_SLEEP_SMP 481 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 482 val = CPU_STARTING_FROZEN; 483 #endif /* CONFIG_PM_SLEEP_SMP */ 484 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 485 } 486 487 #endif /* CONFIG_SMP */ 488 489 /* 490 * cpu_bit_bitmap[] is a special, "compressed" data structure that 491 * represents all NR_CPUS bits binary values of 1<<nr. 492 * 493 * It is used by cpumask_of() to get a constant address to a CPU 494 * mask value that has a single bit set only. 495 */ 496 497 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 498 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) 499 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 500 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 501 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 502 503 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 504 505 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 506 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 507 #if BITS_PER_LONG > 32 508 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 509 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 510 #endif 511 }; 512 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 513 514 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 515 EXPORT_SYMBOL(cpu_all_bits); 516 517 #ifdef CONFIG_INIT_ALL_POSSIBLE 518 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 519 = CPU_BITS_ALL; 520 #else 521 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 522 #endif 523 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 524 EXPORT_SYMBOL(cpu_possible_mask); 525 526 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 527 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 528 EXPORT_SYMBOL(cpu_online_mask); 529 530 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 531 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 532 EXPORT_SYMBOL(cpu_present_mask); 533 534 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 535 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 536 EXPORT_SYMBOL(cpu_active_mask); 537 538 void set_cpu_possible(unsigned int cpu, bool possible) 539 { 540 if (possible) 541 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 542 else 543 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 544 } 545 546 void set_cpu_present(unsigned int cpu, bool present) 547 { 548 if (present) 549 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 550 else 551 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 552 } 553 554 void set_cpu_online(unsigned int cpu, bool online) 555 { 556 if (online) 557 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 558 else 559 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 560 } 561 562 void set_cpu_active(unsigned int cpu, bool active) 563 { 564 if (active) 565 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 566 else 567 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 568 } 569 570 void init_cpu_present(const struct cpumask *src) 571 { 572 cpumask_copy(to_cpumask(cpu_present_bits), src); 573 } 574 575 void init_cpu_possible(const struct cpumask *src) 576 { 577 cpumask_copy(to_cpumask(cpu_possible_bits), src); 578 } 579 580 void init_cpu_online(const struct cpumask *src) 581 { 582 cpumask_copy(to_cpumask(cpu_online_bits), src); 583 } 584