1 /* 2 * arch/s390/kernel/vtime.c 3 * Virtual cpu timer based timer functions. 4 * 5 * S390 version 6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 8 */ 9 10 #include <linux/config.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/time.h> 14 #include <linux/delay.h> 15 #include <linux/init.h> 16 #include <linux/smp.h> 17 #include <linux/types.h> 18 #include <linux/timex.h> 19 #include <linux/notifier.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/rcupdate.h> 22 #include <linux/posix-timers.h> 23 24 #include <asm/s390_ext.h> 25 #include <asm/timer.h> 26 27 static ext_int_info_t ext_int_info_timer; 28 DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 29 30 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 31 /* 32 * Update process times based on virtual cpu times stored by entry.S 33 * to the lowcore fields user_timer, system_timer & steal_clock. 34 */ 35 void account_tick_vtime(struct task_struct *tsk) 36 { 37 cputime_t cputime; 38 __u64 timer, clock; 39 int rcu_user_flag; 40 41 timer = S390_lowcore.last_update_timer; 42 clock = S390_lowcore.last_update_clock; 43 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 44 " STCK %1" /* Store current tod clock value */ 45 : "=m" (S390_lowcore.last_update_timer), 46 "=m" (S390_lowcore.last_update_clock) ); 47 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 48 S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; 49 50 cputime = S390_lowcore.user_timer >> 12; 51 rcu_user_flag = cputime != 0; 52 S390_lowcore.user_timer -= cputime << 12; 53 S390_lowcore.steal_clock -= cputime << 12; 54 account_user_time(tsk, cputime); 55 56 cputime = S390_lowcore.system_timer >> 12; 57 S390_lowcore.system_timer -= cputime << 12; 58 S390_lowcore.steal_clock -= cputime << 12; 59 account_system_time(tsk, HARDIRQ_OFFSET, cputime); 60 61 cputime = S390_lowcore.steal_clock; 62 if ((__s64) cputime > 0) { 63 cputime >>= 12; 64 S390_lowcore.steal_clock -= cputime << 12; 65 account_steal_time(tsk, cputime); 66 } 67 68 run_local_timers(); 69 if (rcu_pending(smp_processor_id())) 70 rcu_check_callbacks(smp_processor_id(), rcu_user_flag); 71 scheduler_tick(); 72 run_posix_cpu_timers(tsk); 73 } 74 75 /* 76 * Update process times based on virtual cpu times stored by entry.S 77 * to the lowcore fields user_timer, system_timer & steal_clock. 78 */ 79 void account_vtime(struct task_struct *tsk) 80 { 81 cputime_t cputime; 82 __u64 timer; 83 84 timer = S390_lowcore.last_update_timer; 85 asm volatile (" STPT %0" /* Store current cpu timer value */ 86 : "=m" (S390_lowcore.last_update_timer) ); 87 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 88 89 cputime = S390_lowcore.user_timer >> 12; 90 S390_lowcore.user_timer -= cputime << 12; 91 S390_lowcore.steal_clock -= cputime << 12; 92 account_user_time(tsk, cputime); 93 94 cputime = S390_lowcore.system_timer >> 12; 95 S390_lowcore.system_timer -= cputime << 12; 96 S390_lowcore.steal_clock -= cputime << 12; 97 account_system_time(tsk, 0, cputime); 98 } 99 100 /* 101 * Update process times based on virtual cpu times stored by entry.S 102 * to the lowcore fields user_timer, system_timer & steal_clock. 103 */ 104 void account_system_vtime(struct task_struct *tsk) 105 { 106 cputime_t cputime; 107 __u64 timer; 108 109 timer = S390_lowcore.last_update_timer; 110 asm volatile (" STPT %0" /* Store current cpu timer value */ 111 : "=m" (S390_lowcore.last_update_timer) ); 112 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 113 114 cputime = S390_lowcore.system_timer >> 12; 115 S390_lowcore.system_timer -= cputime << 12; 116 S390_lowcore.steal_clock -= cputime << 12; 117 account_system_time(tsk, 0, cputime); 118 } 119 120 static inline void set_vtimer(__u64 expires) 121 { 122 __u64 timer; 123 124 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 125 " SPT %1" /* Set new value immediatly afterwards */ 126 : "=m" (timer) : "m" (expires) ); 127 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 128 S390_lowcore.last_update_timer = expires; 129 130 /* store expire time for this CPU timer */ 131 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; 132 } 133 #else 134 static inline void set_vtimer(__u64 expires) 135 { 136 S390_lowcore.last_update_timer = expires; 137 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 138 139 /* store expire time for this CPU timer */ 140 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; 141 } 142 #endif 143 144 static void start_cpu_timer(void) 145 { 146 struct vtimer_queue *vt_list; 147 148 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 149 150 /* CPU timer interrupt is pending, don't reprogramm it */ 151 if (vt_list->idle & 1LL<<63) 152 return; 153 154 if (!list_empty(&vt_list->list)) 155 set_vtimer(vt_list->idle); 156 } 157 158 static void stop_cpu_timer(void) 159 { 160 struct vtimer_queue *vt_list; 161 162 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 163 164 /* nothing to do */ 165 if (list_empty(&vt_list->list)) { 166 vt_list->idle = VTIMER_MAX_SLICE; 167 goto fire; 168 } 169 170 /* store the actual expire value */ 171 asm volatile ("STPT %0" : "=m" (vt_list->idle)); 172 173 /* 174 * If the CPU timer is negative we don't reprogramm 175 * it because we will get instantly an interrupt. 176 */ 177 if (vt_list->idle & 1LL<<63) 178 return; 179 180 vt_list->offset += vt_list->to_expire - vt_list->idle; 181 182 /* 183 * We cannot halt the CPU timer, we just write a value that 184 * nearly never expires (only after 71 years) and re-write 185 * the stored expire value if we continue the timer 186 */ 187 fire: 188 set_vtimer(VTIMER_MAX_SLICE); 189 } 190 191 /* 192 * Sorted add to a list. List is linear searched until first bigger 193 * element is found. 194 */ 195 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 196 { 197 struct vtimer_list *event; 198 199 list_for_each_entry(event, head, entry) { 200 if (event->expires > timer->expires) { 201 list_add_tail(&timer->entry, &event->entry); 202 return; 203 } 204 } 205 list_add_tail(&timer->entry, head); 206 } 207 208 /* 209 * Do the callback functions of expired vtimer events. 210 * Called from within the interrupt handler. 211 */ 212 static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) 213 { 214 struct vtimer_queue *vt_list; 215 struct vtimer_list *event, *tmp; 216 void (*fn)(unsigned long, struct pt_regs*); 217 unsigned long data; 218 219 if (list_empty(cb_list)) 220 return; 221 222 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 223 224 list_for_each_entry_safe(event, tmp, cb_list, entry) { 225 fn = event->function; 226 data = event->data; 227 fn(data, regs); 228 229 if (!event->interval) 230 /* delete one shot timer */ 231 list_del_init(&event->entry); 232 else { 233 /* move interval timer back to list */ 234 spin_lock(&vt_list->lock); 235 list_del_init(&event->entry); 236 list_add_sorted(event, &vt_list->list); 237 spin_unlock(&vt_list->lock); 238 } 239 } 240 } 241 242 /* 243 * Handler for the virtual CPU timer. 244 */ 245 static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) 246 { 247 int cpu; 248 __u64 next, delta; 249 struct vtimer_queue *vt_list; 250 struct vtimer_list *event, *tmp; 251 struct list_head *ptr; 252 /* the callback queue */ 253 struct list_head cb_list; 254 255 INIT_LIST_HEAD(&cb_list); 256 cpu = smp_processor_id(); 257 vt_list = &per_cpu(virt_cpu_timer, cpu); 258 259 /* walk timer list, fire all expired events */ 260 spin_lock(&vt_list->lock); 261 262 if (vt_list->to_expire < VTIMER_MAX_SLICE) 263 vt_list->offset += vt_list->to_expire; 264 265 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { 266 if (event->expires > vt_list->offset) 267 /* found first unexpired event, leave */ 268 break; 269 270 /* re-charge interval timer, we have to add the offset */ 271 if (event->interval) 272 event->expires = event->interval + vt_list->offset; 273 274 /* move expired timer to the callback queue */ 275 list_move_tail(&event->entry, &cb_list); 276 } 277 spin_unlock(&vt_list->lock); 278 do_callbacks(&cb_list, regs); 279 280 /* next event is first in list */ 281 spin_lock(&vt_list->lock); 282 if (!list_empty(&vt_list->list)) { 283 ptr = vt_list->list.next; 284 event = list_entry(ptr, struct vtimer_list, entry); 285 next = event->expires - vt_list->offset; 286 287 /* add the expired time from this interrupt handler 288 * and the callback functions 289 */ 290 asm volatile ("STPT %0" : "=m" (delta)); 291 delta = 0xffffffffffffffffLL - delta + 1; 292 vt_list->offset += delta; 293 next -= delta; 294 } else { 295 vt_list->offset = 0; 296 next = VTIMER_MAX_SLICE; 297 } 298 spin_unlock(&vt_list->lock); 299 set_vtimer(next); 300 } 301 302 void init_virt_timer(struct vtimer_list *timer) 303 { 304 timer->function = NULL; 305 INIT_LIST_HEAD(&timer->entry); 306 spin_lock_init(&timer->lock); 307 } 308 EXPORT_SYMBOL(init_virt_timer); 309 310 static inline int vtimer_pending(struct vtimer_list *timer) 311 { 312 return (!list_empty(&timer->entry)); 313 } 314 315 /* 316 * this function should only run on the specified CPU 317 */ 318 static void internal_add_vtimer(struct vtimer_list *timer) 319 { 320 unsigned long flags; 321 __u64 done; 322 struct vtimer_list *event; 323 struct vtimer_queue *vt_list; 324 325 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 326 spin_lock_irqsave(&vt_list->lock, flags); 327 328 if (timer->cpu != smp_processor_id()) 329 printk("internal_add_vtimer: BUG, running on wrong CPU"); 330 331 /* if list is empty we only have to set the timer */ 332 if (list_empty(&vt_list->list)) { 333 /* reset the offset, this may happen if the last timer was 334 * just deleted by mod_virt_timer and the interrupt 335 * didn't happen until here 336 */ 337 vt_list->offset = 0; 338 goto fire; 339 } 340 341 /* save progress */ 342 asm volatile ("STPT %0" : "=m" (done)); 343 344 /* calculate completed work */ 345 done = vt_list->to_expire - done + vt_list->offset; 346 vt_list->offset = 0; 347 348 list_for_each_entry(event, &vt_list->list, entry) 349 event->expires -= done; 350 351 fire: 352 list_add_sorted(timer, &vt_list->list); 353 354 /* get first element, which is the next vtimer slice */ 355 event = list_entry(vt_list->list.next, struct vtimer_list, entry); 356 357 set_vtimer(event->expires); 358 spin_unlock_irqrestore(&vt_list->lock, flags); 359 /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ 360 put_cpu(); 361 } 362 363 static inline int prepare_vtimer(struct vtimer_list *timer) 364 { 365 if (!timer->function) { 366 printk("add_virt_timer: uninitialized timer\n"); 367 return -EINVAL; 368 } 369 370 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { 371 printk("add_virt_timer: invalid timer expire value!\n"); 372 return -EINVAL; 373 } 374 375 if (vtimer_pending(timer)) { 376 printk("add_virt_timer: timer pending\n"); 377 return -EBUSY; 378 } 379 380 timer->cpu = get_cpu(); 381 return 0; 382 } 383 384 /* 385 * add_virt_timer - add an oneshot virtual CPU timer 386 */ 387 void add_virt_timer(void *new) 388 { 389 struct vtimer_list *timer; 390 391 timer = (struct vtimer_list *)new; 392 393 if (prepare_vtimer(timer) < 0) 394 return; 395 396 timer->interval = 0; 397 internal_add_vtimer(timer); 398 } 399 EXPORT_SYMBOL(add_virt_timer); 400 401 /* 402 * add_virt_timer_int - add an interval virtual CPU timer 403 */ 404 void add_virt_timer_periodic(void *new) 405 { 406 struct vtimer_list *timer; 407 408 timer = (struct vtimer_list *)new; 409 410 if (prepare_vtimer(timer) < 0) 411 return; 412 413 timer->interval = timer->expires; 414 internal_add_vtimer(timer); 415 } 416 EXPORT_SYMBOL(add_virt_timer_periodic); 417 418 /* 419 * If we change a pending timer the function must be called on the CPU 420 * where the timer is running on, e.g. by smp_call_function_on() 421 * 422 * The original mod_timer adds the timer if it is not pending. For compatibility 423 * we do the same. The timer will be added on the current CPU as a oneshot timer. 424 * 425 * returns whether it has modified a pending timer (1) or not (0) 426 */ 427 int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 428 { 429 struct vtimer_queue *vt_list; 430 unsigned long flags; 431 int cpu; 432 433 if (!timer->function) { 434 printk("mod_virt_timer: uninitialized timer\n"); 435 return -EINVAL; 436 } 437 438 if (!expires || expires > VTIMER_MAX_SLICE) { 439 printk("mod_virt_timer: invalid expire range\n"); 440 return -EINVAL; 441 } 442 443 /* 444 * This is a common optimization triggered by the 445 * networking code - if the timer is re-modified 446 * to be the same thing then just return: 447 */ 448 if (timer->expires == expires && vtimer_pending(timer)) 449 return 1; 450 451 cpu = get_cpu(); 452 vt_list = &per_cpu(virt_cpu_timer, cpu); 453 454 /* disable interrupts before test if timer is pending */ 455 spin_lock_irqsave(&vt_list->lock, flags); 456 457 /* if timer isn't pending add it on the current CPU */ 458 if (!vtimer_pending(timer)) { 459 spin_unlock_irqrestore(&vt_list->lock, flags); 460 /* we do not activate an interval timer with mod_virt_timer */ 461 timer->interval = 0; 462 timer->expires = expires; 463 timer->cpu = cpu; 464 internal_add_vtimer(timer); 465 return 0; 466 } 467 468 /* check if we run on the right CPU */ 469 if (timer->cpu != cpu) { 470 printk("mod_virt_timer: running on wrong CPU, check your code\n"); 471 spin_unlock_irqrestore(&vt_list->lock, flags); 472 put_cpu(); 473 return -EINVAL; 474 } 475 476 list_del_init(&timer->entry); 477 timer->expires = expires; 478 479 /* also change the interval if we have an interval timer */ 480 if (timer->interval) 481 timer->interval = expires; 482 483 /* the timer can't expire anymore so we can release the lock */ 484 spin_unlock_irqrestore(&vt_list->lock, flags); 485 internal_add_vtimer(timer); 486 return 1; 487 } 488 EXPORT_SYMBOL(mod_virt_timer); 489 490 /* 491 * delete a virtual timer 492 * 493 * returns whether the deleted timer was pending (1) or not (0) 494 */ 495 int del_virt_timer(struct vtimer_list *timer) 496 { 497 unsigned long flags; 498 struct vtimer_queue *vt_list; 499 500 /* check if timer is pending */ 501 if (!vtimer_pending(timer)) 502 return 0; 503 504 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 505 spin_lock_irqsave(&vt_list->lock, flags); 506 507 /* we don't interrupt a running timer, just let it expire! */ 508 list_del_init(&timer->entry); 509 510 /* last timer removed */ 511 if (list_empty(&vt_list->list)) { 512 vt_list->to_expire = 0; 513 vt_list->offset = 0; 514 } 515 516 spin_unlock_irqrestore(&vt_list->lock, flags); 517 return 1; 518 } 519 EXPORT_SYMBOL(del_virt_timer); 520 521 /* 522 * Start the virtual CPU timer on the current CPU. 523 */ 524 void init_cpu_vtimer(void) 525 { 526 struct vtimer_queue *vt_list; 527 unsigned long cr0; 528 529 /* kick the virtual timer */ 530 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 531 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 532 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 533 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 534 __ctl_store(cr0, 0, 0); 535 cr0 |= 0x400; 536 __ctl_load(cr0, 0, 0); 537 538 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 539 INIT_LIST_HEAD(&vt_list->list); 540 spin_lock_init(&vt_list->lock); 541 vt_list->to_expire = 0; 542 vt_list->offset = 0; 543 vt_list->idle = 0; 544 545 } 546 547 static int vtimer_idle_notify(struct notifier_block *self, 548 unsigned long action, void *hcpu) 549 { 550 switch (action) { 551 case CPU_IDLE: 552 stop_cpu_timer(); 553 break; 554 case CPU_NOT_IDLE: 555 start_cpu_timer(); 556 break; 557 } 558 return NOTIFY_OK; 559 } 560 561 static struct notifier_block vtimer_idle_nb = { 562 .notifier_call = vtimer_idle_notify, 563 }; 564 565 void __init vtime_init(void) 566 { 567 /* request the cpu timer external interrupt */ 568 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, 569 &ext_int_info_timer) != 0) 570 panic("Couldn't request external interrupt 0x1005"); 571 572 if (register_idle_notifier(&vtimer_idle_nb)) 573 panic("Couldn't register idle notifier"); 574 575 init_cpu_vtimer(); 576 } 577 578