1 /* 2 * linux/kernel/time/tick-broadcast.c 3 * 4 * This file contains functions which emulate a local clock-event 5 * device via a broadcast event source. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/interrupt.h> 18 #include <linux/percpu.h> 19 #include <linux/profile.h> 20 #include <linux/sched.h> 21 #include <linux/tick.h> 22 23 #include "tick-internal.h" 24 25 /* 26 * Broadcast support for broken x86 hardware, where the local apic 27 * timer stops in C3 state. 28 */ 29 30 struct tick_device tick_broadcast_device; 31 static cpumask_t tick_broadcast_mask; 32 static DEFINE_SPINLOCK(tick_broadcast_lock); 33 static int tick_broadcast_force; 34 35 #ifdef CONFIG_TICK_ONESHOT 36 static void tick_broadcast_clear_oneshot(int cpu); 37 #else 38 static inline void tick_broadcast_clear_oneshot(int cpu) { } 39 #endif 40 41 /* 42 * Debugging: see timer_list.c 43 */ 44 struct tick_device *tick_get_broadcast_device(void) 45 { 46 return &tick_broadcast_device; 47 } 48 49 cpumask_t *tick_get_broadcast_mask(void) 50 { 51 return &tick_broadcast_mask; 52 } 53 54 /* 55 * Start the device in periodic mode 56 */ 57 static void tick_broadcast_start_periodic(struct clock_event_device *bc) 58 { 59 if (bc) 60 tick_setup_periodic(bc, 1); 61 } 62 63 /* 64 * Check, if the device can be utilized as broadcast device: 65 */ 66 int tick_check_broadcast_device(struct clock_event_device *dev) 67 { 68 if ((tick_broadcast_device.evtdev && 69 tick_broadcast_device.evtdev->rating >= dev->rating) || 70 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 71 return 0; 72 73 clockevents_exchange_device(NULL, dev); 74 tick_broadcast_device.evtdev = dev; 75 if (!cpus_empty(tick_broadcast_mask)) 76 tick_broadcast_start_periodic(dev); 77 return 1; 78 } 79 80 /* 81 * Check, if the device is the broadcast device 82 */ 83 int tick_is_broadcast_device(struct clock_event_device *dev) 84 { 85 return (dev && tick_broadcast_device.evtdev == dev); 86 } 87 88 /* 89 * Check, if the device is disfunctional and a place holder, which 90 * needs to be handled by the broadcast device. 91 */ 92 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) 93 { 94 unsigned long flags; 95 int ret = 0; 96 97 spin_lock_irqsave(&tick_broadcast_lock, flags); 98 99 /* 100 * Devices might be registered with both periodic and oneshot 101 * mode disabled. This signals, that the device needs to be 102 * operated from the broadcast device and is a placeholder for 103 * the cpu local device. 104 */ 105 if (!tick_device_is_functional(dev)) { 106 dev->event_handler = tick_handle_periodic; 107 cpu_set(cpu, tick_broadcast_mask); 108 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 109 ret = 1; 110 } else { 111 /* 112 * When the new device is not affected by the stop 113 * feature and the cpu is marked in the broadcast mask 114 * then clear the broadcast bit. 115 */ 116 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 117 int cpu = smp_processor_id(); 118 119 cpu_clear(cpu, tick_broadcast_mask); 120 tick_broadcast_clear_oneshot(cpu); 121 } 122 } 123 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 124 return ret; 125 } 126 127 /* 128 * Broadcast the event to the cpus, which are set in the mask 129 */ 130 static void tick_do_broadcast(cpumask_t mask) 131 { 132 int cpu = smp_processor_id(); 133 struct tick_device *td; 134 135 /* 136 * Check, if the current cpu is in the mask 137 */ 138 if (cpu_isset(cpu, mask)) { 139 cpu_clear(cpu, mask); 140 td = &per_cpu(tick_cpu_device, cpu); 141 td->evtdev->event_handler(td->evtdev); 142 } 143 144 if (!cpus_empty(mask)) { 145 /* 146 * It might be necessary to actually check whether the devices 147 * have different broadcast functions. For now, just use the 148 * one of the first device. This works as long as we have this 149 * misfeature only on x86 (lapic) 150 */ 151 cpu = first_cpu(mask); 152 td = &per_cpu(tick_cpu_device, cpu); 153 td->evtdev->broadcast(mask); 154 } 155 } 156 157 /* 158 * Periodic broadcast: 159 * - invoke the broadcast handlers 160 */ 161 static void tick_do_periodic_broadcast(void) 162 { 163 cpumask_t mask; 164 165 spin_lock(&tick_broadcast_lock); 166 167 cpus_and(mask, cpu_online_map, tick_broadcast_mask); 168 tick_do_broadcast(mask); 169 170 spin_unlock(&tick_broadcast_lock); 171 } 172 173 /* 174 * Event handler for periodic broadcast ticks 175 */ 176 static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 177 { 178 ktime_t next; 179 180 tick_do_periodic_broadcast(); 181 182 /* 183 * The device is in periodic mode. No reprogramming necessary: 184 */ 185 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 186 return; 187 188 /* 189 * Setup the next period for devices, which do not have 190 * periodic mode. We read dev->next_event first and add to it 191 * when the event alrady expired. clockevents_program_event() 192 * sets dev->next_event only when the event is really 193 * programmed to the device. 194 */ 195 for (next = dev->next_event; ;) { 196 next = ktime_add(next, tick_period); 197 198 if (!clockevents_program_event(dev, next, ktime_get())) 199 return; 200 tick_do_periodic_broadcast(); 201 } 202 } 203 204 /* 205 * Powerstate information: The system enters/leaves a state, where 206 * affected devices might stop 207 */ 208 static void tick_do_broadcast_on_off(void *why) 209 { 210 struct clock_event_device *bc, *dev; 211 struct tick_device *td; 212 unsigned long flags, *reason = why; 213 int cpu, bc_stopped; 214 215 spin_lock_irqsave(&tick_broadcast_lock, flags); 216 217 cpu = smp_processor_id(); 218 td = &per_cpu(tick_cpu_device, cpu); 219 dev = td->evtdev; 220 bc = tick_broadcast_device.evtdev; 221 222 /* 223 * Is the device not affected by the powerstate ? 224 */ 225 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 226 goto out; 227 228 if (!tick_device_is_functional(dev)) 229 goto out; 230 231 bc_stopped = cpus_empty(tick_broadcast_mask); 232 233 switch (*reason) { 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 236 if (!cpu_isset(cpu, tick_broadcast_mask)) { 237 cpu_set(cpu, tick_broadcast_mask); 238 if (tick_broadcast_device.mode == 239 TICKDEV_MODE_PERIODIC) 240 clockevents_shutdown(dev); 241 } 242 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 243 tick_broadcast_force = 1; 244 break; 245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 246 if (!tick_broadcast_force && 247 cpu_isset(cpu, tick_broadcast_mask)) { 248 cpu_clear(cpu, tick_broadcast_mask); 249 if (tick_broadcast_device.mode == 250 TICKDEV_MODE_PERIODIC) 251 tick_setup_periodic(dev, 0); 252 } 253 break; 254 } 255 256 if (cpus_empty(tick_broadcast_mask)) { 257 if (!bc_stopped) 258 clockevents_shutdown(bc); 259 } else if (bc_stopped) { 260 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 261 tick_broadcast_start_periodic(bc); 262 else 263 tick_broadcast_setup_oneshot(bc); 264 } 265 out: 266 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 267 } 268 269 /* 270 * Powerstate information: The system enters/leaves a state, where 271 * affected devices might stop. 272 */ 273 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 274 { 275 if (!cpu_isset(*oncpu, cpu_online_map)) 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 277 "offline CPU #%d\n", *oncpu); 278 else 279 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 280 &reason, 1); 281 } 282 283 /* 284 * Set the periodic handler depending on broadcast on/off 285 */ 286 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) 287 { 288 if (!broadcast) 289 dev->event_handler = tick_handle_periodic; 290 else 291 dev->event_handler = tick_handle_periodic_broadcast; 292 } 293 294 /* 295 * Remove a CPU from broadcasting 296 */ 297 void tick_shutdown_broadcast(unsigned int *cpup) 298 { 299 struct clock_event_device *bc; 300 unsigned long flags; 301 unsigned int cpu = *cpup; 302 303 spin_lock_irqsave(&tick_broadcast_lock, flags); 304 305 bc = tick_broadcast_device.evtdev; 306 cpu_clear(cpu, tick_broadcast_mask); 307 308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 309 if (bc && cpus_empty(tick_broadcast_mask)) 310 clockevents_shutdown(bc); 311 } 312 313 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 314 } 315 316 void tick_suspend_broadcast(void) 317 { 318 struct clock_event_device *bc; 319 unsigned long flags; 320 321 spin_lock_irqsave(&tick_broadcast_lock, flags); 322 323 bc = tick_broadcast_device.evtdev; 324 if (bc) 325 clockevents_shutdown(bc); 326 327 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 328 } 329 330 int tick_resume_broadcast(void) 331 { 332 struct clock_event_device *bc; 333 unsigned long flags; 334 int broadcast = 0; 335 336 spin_lock_irqsave(&tick_broadcast_lock, flags); 337 338 bc = tick_broadcast_device.evtdev; 339 340 if (bc) { 341 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); 342 343 switch (tick_broadcast_device.mode) { 344 case TICKDEV_MODE_PERIODIC: 345 if(!cpus_empty(tick_broadcast_mask)) 346 tick_broadcast_start_periodic(bc); 347 broadcast = cpu_isset(smp_processor_id(), 348 tick_broadcast_mask); 349 break; 350 case TICKDEV_MODE_ONESHOT: 351 broadcast = tick_resume_broadcast_oneshot(bc); 352 break; 353 } 354 } 355 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 356 357 return broadcast; 358 } 359 360 361 #ifdef CONFIG_TICK_ONESHOT 362 363 static cpumask_t tick_broadcast_oneshot_mask; 364 365 /* 366 * Debugging: see timer_list.c 367 */ 368 cpumask_t *tick_get_broadcast_oneshot_mask(void) 369 { 370 return &tick_broadcast_oneshot_mask; 371 } 372 373 static int tick_broadcast_set_event(ktime_t expires, int force) 374 { 375 struct clock_event_device *bc = tick_broadcast_device.evtdev; 376 377 return tick_dev_program_event(bc, expires, force); 378 } 379 380 int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 381 { 382 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 383 return 0; 384 } 385 386 /* 387 * Called from irq_enter() when idle was interrupted to reenable the 388 * per cpu device. 389 */ 390 void tick_check_oneshot_broadcast(int cpu) 391 { 392 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 394 395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); 396 } 397 } 398 399 /* 400 * Handle oneshot mode broadcasting 401 */ 402 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 403 { 404 struct tick_device *td; 405 cpumask_t mask; 406 ktime_t now, next_event; 407 int cpu; 408 409 spin_lock(&tick_broadcast_lock); 410 again: 411 dev->next_event.tv64 = KTIME_MAX; 412 next_event.tv64 = KTIME_MAX; 413 mask = CPU_MASK_NONE; 414 now = ktime_get(); 415 /* Find all expired events */ 416 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { 417 td = &per_cpu(tick_cpu_device, cpu); 418 if (td->evtdev->next_event.tv64 <= now.tv64) 419 cpu_set(cpu, mask); 420 else if (td->evtdev->next_event.tv64 < next_event.tv64) 421 next_event.tv64 = td->evtdev->next_event.tv64; 422 } 423 424 /* 425 * Wakeup the cpus which have an expired event. 426 */ 427 tick_do_broadcast(mask); 428 429 /* 430 * Two reasons for reprogram: 431 * 432 * - The global event did not expire any CPU local 433 * events. This happens in dyntick mode, as the maximum PIT 434 * delta is quite small. 435 * 436 * - There are pending events on sleeping CPUs which were not 437 * in the event mask 438 */ 439 if (next_event.tv64 != KTIME_MAX) { 440 /* 441 * Rearm the broadcast device. If event expired, 442 * repeat the above 443 */ 444 if (tick_broadcast_set_event(next_event, 0)) 445 goto again; 446 } 447 spin_unlock(&tick_broadcast_lock); 448 } 449 450 /* 451 * Powerstate information: The system enters/leaves a state, where 452 * affected devices might stop 453 */ 454 void tick_broadcast_oneshot_control(unsigned long reason) 455 { 456 struct clock_event_device *bc, *dev; 457 struct tick_device *td; 458 unsigned long flags; 459 int cpu; 460 461 spin_lock_irqsave(&tick_broadcast_lock, flags); 462 463 /* 464 * Periodic mode does not care about the enter/exit of power 465 * states 466 */ 467 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 468 goto out; 469 470 bc = tick_broadcast_device.evtdev; 471 cpu = smp_processor_id(); 472 td = &per_cpu(tick_cpu_device, cpu); 473 dev = td->evtdev; 474 475 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 476 goto out; 477 478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 479 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 480 cpu_set(cpu, tick_broadcast_oneshot_mask); 481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 482 if (dev->next_event.tv64 < bc->next_event.tv64) 483 tick_broadcast_set_event(dev->next_event, 1); 484 } 485 } else { 486 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 487 cpu_clear(cpu, tick_broadcast_oneshot_mask); 488 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 489 if (dev->next_event.tv64 != KTIME_MAX) 490 tick_program_event(dev->next_event, 1); 491 } 492 } 493 494 out: 495 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 496 } 497 498 /* 499 * Reset the one shot broadcast for a cpu 500 * 501 * Called with tick_broadcast_lock held 502 */ 503 static void tick_broadcast_clear_oneshot(int cpu) 504 { 505 cpu_clear(cpu, tick_broadcast_oneshot_mask); 506 } 507 508 static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) 509 { 510 struct tick_device *td; 511 int cpu; 512 513 for_each_cpu_mask_nr(cpu, *mask) { 514 td = &per_cpu(tick_cpu_device, cpu); 515 if (td->evtdev) 516 td->evtdev->next_event = expires; 517 } 518 } 519 520 /** 521 * tick_broadcast_setup_oneshot - setup the broadcast device 522 */ 523 void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 524 { 525 /* Set it up only once ! */ 526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 528 int cpu = smp_processor_id(); 529 cpumask_t mask; 530 531 bc->event_handler = tick_handle_oneshot_broadcast; 532 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 533 534 /* Take the do_timer update */ 535 tick_do_timer_cpu = cpu; 536 537 /* 538 * We must be careful here. There might be other CPUs 539 * waiting for periodic broadcast. We need to set the 540 * oneshot_mask bits for those and program the 541 * broadcast device to fire. 542 */ 543 mask = tick_broadcast_mask; 544 cpu_clear(cpu, mask); 545 cpus_or(tick_broadcast_oneshot_mask, 546 tick_broadcast_oneshot_mask, mask); 547 548 if (was_periodic && !cpus_empty(mask)) { 549 tick_broadcast_init_next_event(&mask, tick_next_period); 550 tick_broadcast_set_event(tick_next_period, 1); 551 } else 552 bc->next_event.tv64 = KTIME_MAX; 553 } 554 } 555 556 /* 557 * Select oneshot operating mode for the broadcast device 558 */ 559 void tick_broadcast_switch_to_oneshot(void) 560 { 561 struct clock_event_device *bc; 562 unsigned long flags; 563 564 spin_lock_irqsave(&tick_broadcast_lock, flags); 565 566 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 567 bc = tick_broadcast_device.evtdev; 568 if (bc) 569 tick_broadcast_setup_oneshot(bc); 570 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 571 } 572 573 574 /* 575 * Remove a dead CPU from broadcasting 576 */ 577 void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 578 { 579 unsigned long flags; 580 unsigned int cpu = *cpup; 581 582 spin_lock_irqsave(&tick_broadcast_lock, flags); 583 584 /* 585 * Clear the broadcast mask flag for the dead cpu, but do not 586 * stop the broadcast device! 587 */ 588 cpu_clear(cpu, tick_broadcast_oneshot_mask); 589 590 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 591 } 592 593 /* 594 * Check, whether the broadcast device is in one shot mode 595 */ 596 int tick_broadcast_oneshot_active(void) 597 { 598 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; 599 } 600 601 #endif 602