1 /* CPU control. 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 3 * 4 * This code is licenced under the GPL. 5 */ 6 #include <linux/proc_fs.h> 7 #include <linux/smp.h> 8 #include <linux/init.h> 9 #include <linux/notifier.h> 10 #include <linux/sched.h> 11 #include <linux/unistd.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/kthread.h> 15 #include <linux/stop_machine.h> 16 #include <linux/mutex.h> 17 #include <linux/gfp.h> 18 19 #ifdef CONFIG_SMP 20 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 21 static DEFINE_MUTEX(cpu_add_remove_lock); 22 23 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 24 25 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 26 * Should always be manipulated under cpu_add_remove_lock 27 */ 28 static int cpu_hotplug_disabled; 29 30 static struct { 31 struct task_struct *active_writer; 32 struct mutex lock; /* Synchronizes accesses to refcount, */ 33 /* 34 * Also blocks the new readers during 35 * an ongoing cpu hotplug operation. 36 */ 37 int refcount; 38 } cpu_hotplug = { 39 .active_writer = NULL, 40 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 41 .refcount = 0, 42 }; 43 44 #ifdef CONFIG_HOTPLUG_CPU 45 46 void get_online_cpus(void) 47 { 48 might_sleep(); 49 if (cpu_hotplug.active_writer == current) 50 return; 51 mutex_lock(&cpu_hotplug.lock); 52 cpu_hotplug.refcount++; 53 mutex_unlock(&cpu_hotplug.lock); 54 55 } 56 EXPORT_SYMBOL_GPL(get_online_cpus); 57 58 void put_online_cpus(void) 59 { 60 if (cpu_hotplug.active_writer == current) 61 return; 62 mutex_lock(&cpu_hotplug.lock); 63 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 64 wake_up_process(cpu_hotplug.active_writer); 65 mutex_unlock(&cpu_hotplug.lock); 66 67 } 68 EXPORT_SYMBOL_GPL(put_online_cpus); 69 70 #endif /* CONFIG_HOTPLUG_CPU */ 71 72 /* 73 * The following two API's must be used when attempting 74 * to serialize the updates to cpu_online_mask, cpu_present_mask. 75 */ 76 void cpu_maps_update_begin(void) 77 { 78 mutex_lock(&cpu_add_remove_lock); 79 } 80 81 void cpu_maps_update_done(void) 82 { 83 mutex_unlock(&cpu_add_remove_lock); 84 } 85 86 /* 87 * This ensures that the hotplug operation can begin only when the 88 * refcount goes to zero. 89 * 90 * Note that during a cpu-hotplug operation, the new readers, if any, 91 * will be blocked by the cpu_hotplug.lock 92 * 93 * Since cpu_hotplug_begin() is always called after invoking 94 * cpu_maps_update_begin(), we can be sure that only one writer is active. 95 * 96 * Note that theoretically, there is a possibility of a livelock: 97 * - Refcount goes to zero, last reader wakes up the sleeping 98 * writer. 99 * - Last reader unlocks the cpu_hotplug.lock. 100 * - A new reader arrives at this moment, bumps up the refcount. 101 * - The writer acquires the cpu_hotplug.lock finds the refcount 102 * non zero and goes to sleep again. 103 * 104 * However, this is very difficult to achieve in practice since 105 * get_online_cpus() not an api which is called all that often. 106 * 107 */ 108 static void cpu_hotplug_begin(void) 109 { 110 cpu_hotplug.active_writer = current; 111 112 for (;;) { 113 mutex_lock(&cpu_hotplug.lock); 114 if (likely(!cpu_hotplug.refcount)) 115 break; 116 __set_current_state(TASK_UNINTERRUPTIBLE); 117 mutex_unlock(&cpu_hotplug.lock); 118 schedule(); 119 } 120 } 121 122 static void cpu_hotplug_done(void) 123 { 124 cpu_hotplug.active_writer = NULL; 125 mutex_unlock(&cpu_hotplug.lock); 126 } 127 /* Need to know about CPUs going up/down? */ 128 int __ref register_cpu_notifier(struct notifier_block *nb) 129 { 130 int ret; 131 cpu_maps_update_begin(); 132 ret = raw_notifier_chain_register(&cpu_chain, nb); 133 cpu_maps_update_done(); 134 return ret; 135 } 136 137 #ifdef CONFIG_HOTPLUG_CPU 138 139 EXPORT_SYMBOL(register_cpu_notifier); 140 141 void __ref unregister_cpu_notifier(struct notifier_block *nb) 142 { 143 cpu_maps_update_begin(); 144 raw_notifier_chain_unregister(&cpu_chain, nb); 145 cpu_maps_update_done(); 146 } 147 EXPORT_SYMBOL(unregister_cpu_notifier); 148 149 static inline void check_for_tasks(int cpu) 150 { 151 struct task_struct *p; 152 153 write_lock_irq(&tasklist_lock); 154 for_each_process(p) { 155 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 156 (!cputime_eq(p->utime, cputime_zero) || 157 !cputime_eq(p->stime, cputime_zero))) 158 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 159 "(state = %ld, flags = %x)\n", 160 p->comm, task_pid_nr(p), cpu, 161 p->state, p->flags); 162 } 163 write_unlock_irq(&tasklist_lock); 164 } 165 166 struct take_cpu_down_param { 167 struct task_struct *caller; 168 unsigned long mod; 169 void *hcpu; 170 }; 171 172 /* Take this CPU down. */ 173 static int __ref take_cpu_down(void *_param) 174 { 175 struct take_cpu_down_param *param = _param; 176 unsigned int cpu = (unsigned long)param->hcpu; 177 int err; 178 179 /* Ensure this CPU doesn't handle any more interrupts. */ 180 err = __cpu_disable(); 181 if (err < 0) 182 return err; 183 184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 185 param->hcpu); 186 187 if (task_cpu(param->caller) == cpu) 188 move_task_off_dead_cpu(cpu, param->caller); 189 /* Force idle task to run as soon as we yield: it should 190 immediately notice cpu is offline and die quickly. */ 191 sched_idle_next(); 192 return 0; 193 } 194 195 /* Requires cpu_add_remove_lock to be held */ 196 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 197 { 198 int err, nr_calls = 0; 199 void *hcpu = (void *)(long)cpu; 200 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 201 struct take_cpu_down_param tcd_param = { 202 .caller = current, 203 .mod = mod, 204 .hcpu = hcpu, 205 }; 206 207 if (num_online_cpus() == 1) 208 return -EBUSY; 209 210 if (!cpu_online(cpu)) 211 return -EINVAL; 212 213 cpu_hotplug_begin(); 214 set_cpu_active(cpu, false); 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 216 hcpu, -1, &nr_calls); 217 if (err == NOTIFY_BAD) { 218 set_cpu_active(cpu, true); 219 220 nr_calls--; 221 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 222 hcpu, nr_calls, NULL); 223 printk("%s: attempt to take down CPU %u failed\n", 224 __func__, cpu); 225 err = -EINVAL; 226 goto out_release; 227 } 228 229 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 230 if (err) { 231 set_cpu_active(cpu, true); 232 /* CPU didn't die: tell everyone. Can't complain. */ 233 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 234 hcpu) == NOTIFY_BAD) 235 BUG(); 236 237 goto out_release; 238 } 239 BUG_ON(cpu_online(cpu)); 240 241 /* Wait for it to sleep (leaving idle task). */ 242 while (!idle_cpu(cpu)) 243 yield(); 244 245 /* This actually kills the CPU. */ 246 __cpu_die(cpu); 247 248 /* CPU is completely dead: tell everyone. Too late to complain. */ 249 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 250 hcpu) == NOTIFY_BAD) 251 BUG(); 252 253 check_for_tasks(cpu); 254 255 out_release: 256 cpu_hotplug_done(); 257 if (!err) { 258 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 259 hcpu) == NOTIFY_BAD) 260 BUG(); 261 } 262 return err; 263 } 264 265 int __ref cpu_down(unsigned int cpu) 266 { 267 int err; 268 269 cpu_maps_update_begin(); 270 271 if (cpu_hotplug_disabled) { 272 err = -EBUSY; 273 goto out; 274 } 275 276 err = _cpu_down(cpu, 0); 277 278 out: 279 cpu_maps_update_done(); 280 return err; 281 } 282 EXPORT_SYMBOL(cpu_down); 283 #endif /*CONFIG_HOTPLUG_CPU*/ 284 285 /* Requires cpu_add_remove_lock to be held */ 286 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 287 { 288 int ret, nr_calls = 0; 289 void *hcpu = (void *)(long)cpu; 290 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 291 292 if (cpu_online(cpu) || !cpu_present(cpu)) 293 return -EINVAL; 294 295 cpu_hotplug_begin(); 296 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 297 -1, &nr_calls); 298 if (ret == NOTIFY_BAD) { 299 nr_calls--; 300 printk("%s: attempt to bring up CPU %u failed\n", 301 __func__, cpu); 302 ret = -EINVAL; 303 goto out_notify; 304 } 305 306 /* Arch-specific enabling code. */ 307 ret = __cpu_up(cpu); 308 if (ret != 0) 309 goto out_notify; 310 BUG_ON(!cpu_online(cpu)); 311 312 set_cpu_active(cpu, true); 313 314 /* Now call notifier in preparation. */ 315 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 316 317 out_notify: 318 if (ret != 0) 319 __raw_notifier_call_chain(&cpu_chain, 320 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 321 cpu_hotplug_done(); 322 323 return ret; 324 } 325 326 int __cpuinit cpu_up(unsigned int cpu) 327 { 328 int err = 0; 329 if (!cpu_possible(cpu)) { 330 printk(KERN_ERR "can't online cpu %d because it is not " 331 "configured as may-hotadd at boot time\n", cpu); 332 #if defined(CONFIG_IA64) 333 printk(KERN_ERR "please check additional_cpus= boot " 334 "parameter\n"); 335 #endif 336 return -EINVAL; 337 } 338 339 cpu_maps_update_begin(); 340 341 if (cpu_hotplug_disabled) { 342 err = -EBUSY; 343 goto out; 344 } 345 346 err = _cpu_up(cpu, 0); 347 348 out: 349 cpu_maps_update_done(); 350 return err; 351 } 352 353 #ifdef CONFIG_PM_SLEEP_SMP 354 static cpumask_var_t frozen_cpus; 355 356 int disable_nonboot_cpus(void) 357 { 358 int cpu, first_cpu, error; 359 360 cpu_maps_update_begin(); 361 first_cpu = cpumask_first(cpu_online_mask); 362 /* 363 * We take down all of the non-boot CPUs in one shot to avoid races 364 * with the userspace trying to use the CPU hotplug at the same time 365 */ 366 cpumask_clear(frozen_cpus); 367 368 printk("Disabling non-boot CPUs ...\n"); 369 for_each_online_cpu(cpu) { 370 if (cpu == first_cpu) 371 continue; 372 error = _cpu_down(cpu, 1); 373 if (!error) 374 cpumask_set_cpu(cpu, frozen_cpus); 375 else { 376 printk(KERN_ERR "Error taking CPU%d down: %d\n", 377 cpu, error); 378 break; 379 } 380 } 381 382 if (!error) { 383 BUG_ON(num_online_cpus() > 1); 384 /* Make sure the CPUs won't be enabled by someone else */ 385 cpu_hotplug_disabled = 1; 386 } else { 387 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 388 } 389 cpu_maps_update_done(); 390 return error; 391 } 392 393 void __weak arch_enable_nonboot_cpus_begin(void) 394 { 395 } 396 397 void __weak arch_enable_nonboot_cpus_end(void) 398 { 399 } 400 401 void __ref enable_nonboot_cpus(void) 402 { 403 int cpu, error; 404 405 /* Allow everyone to use the CPU hotplug again */ 406 cpu_maps_update_begin(); 407 cpu_hotplug_disabled = 0; 408 if (cpumask_empty(frozen_cpus)) 409 goto out; 410 411 printk("Enabling non-boot CPUs ...\n"); 412 413 arch_enable_nonboot_cpus_begin(); 414 415 for_each_cpu(cpu, frozen_cpus) { 416 error = _cpu_up(cpu, 1); 417 if (!error) { 418 printk("CPU%d is up\n", cpu); 419 continue; 420 } 421 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 422 } 423 424 arch_enable_nonboot_cpus_end(); 425 426 cpumask_clear(frozen_cpus); 427 out: 428 cpu_maps_update_done(); 429 } 430 431 static int alloc_frozen_cpus(void) 432 { 433 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 434 return -ENOMEM; 435 return 0; 436 } 437 core_initcall(alloc_frozen_cpus); 438 #endif /* CONFIG_PM_SLEEP_SMP */ 439 440 /** 441 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 442 * @cpu: cpu that just started 443 * 444 * This function calls the cpu_chain notifiers with CPU_STARTING. 445 * It must be called by the arch code on the new cpu, before the new cpu 446 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 447 */ 448 void __cpuinit notify_cpu_starting(unsigned int cpu) 449 { 450 unsigned long val = CPU_STARTING; 451 452 #ifdef CONFIG_PM_SLEEP_SMP 453 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 454 val = CPU_STARTING_FROZEN; 455 #endif /* CONFIG_PM_SLEEP_SMP */ 456 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 457 } 458 459 #endif /* CONFIG_SMP */ 460 461 /* 462 * cpu_bit_bitmap[] is a special, "compressed" data structure that 463 * represents all NR_CPUS bits binary values of 1<<nr. 464 * 465 * It is used by cpumask_of() to get a constant address to a CPU 466 * mask value that has a single bit set only. 467 */ 468 469 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 470 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) 471 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 472 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 473 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 474 475 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 476 477 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 478 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 479 #if BITS_PER_LONG > 32 480 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 481 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 482 #endif 483 }; 484 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 485 486 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 487 EXPORT_SYMBOL(cpu_all_bits); 488 489 #ifdef CONFIG_INIT_ALL_POSSIBLE 490 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 491 = CPU_BITS_ALL; 492 #else 493 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 494 #endif 495 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 496 EXPORT_SYMBOL(cpu_possible_mask); 497 498 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 499 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 500 EXPORT_SYMBOL(cpu_online_mask); 501 502 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 503 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 504 EXPORT_SYMBOL(cpu_present_mask); 505 506 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 507 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 508 EXPORT_SYMBOL(cpu_active_mask); 509 510 void set_cpu_possible(unsigned int cpu, bool possible) 511 { 512 if (possible) 513 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 514 else 515 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 516 } 517 518 void set_cpu_present(unsigned int cpu, bool present) 519 { 520 if (present) 521 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 522 else 523 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 524 } 525 526 void set_cpu_online(unsigned int cpu, bool online) 527 { 528 if (online) 529 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 530 else 531 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 532 } 533 534 void set_cpu_active(unsigned int cpu, bool active) 535 { 536 if (active) 537 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 538 else 539 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 540 } 541 542 void init_cpu_present(const struct cpumask *src) 543 { 544 cpumask_copy(to_cpumask(cpu_present_bits), src); 545 } 546 547 void init_cpu_possible(const struct cpumask *src) 548 { 549 cpumask_copy(to_cpumask(cpu_possible_bits), src); 550 } 551 552 void init_cpu_online(const struct cpumask *src) 553 { 554 cpumask_copy(to_cpumask(cpu_online_bits), src); 555 } 556