1 /* 2 * Virtual cpu timer based timer functions. 3 * 4 * Copyright IBM Corp. 2004, 2012 5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 6 */ 7 8 #include <linux/kernel_stat.h> 9 #include <linux/export.h> 10 #include <linux/kernel.h> 11 #include <linux/timex.h> 12 #include <linux/types.h> 13 #include <linux/time.h> 14 15 #include <asm/cputime.h> 16 #include <asm/vtimer.h> 17 #include <asm/vtime.h> 18 #include <asm/cpu_mf.h> 19 #include <asm/smp.h> 20 21 static void virt_timer_expire(void); 22 23 static LIST_HEAD(virt_timer_list); 24 static DEFINE_SPINLOCK(virt_timer_lock); 25 static atomic64_t virt_timer_current; 26 static atomic64_t virt_timer_elapsed; 27 28 DEFINE_PER_CPU(u64, mt_cycles[8]); 29 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; 30 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; 31 static DEFINE_PER_CPU(u64, mt_scaling_jiffies); 32 33 static inline u64 get_vtimer(void) 34 { 35 u64 timer; 36 37 asm volatile("stpt %0" : "=m" (timer)); 38 return timer; 39 } 40 41 static inline void set_vtimer(u64 expires) 42 { 43 u64 timer; 44 45 asm volatile( 46 " stpt %0\n" /* Store current cpu timer value */ 47 " spt %1" /* Set new value imm. afterwards */ 48 : "=m" (timer) : "m" (expires)); 49 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 50 S390_lowcore.last_update_timer = expires; 51 } 52 53 static inline int virt_timer_forward(u64 elapsed) 54 { 55 BUG_ON(!irqs_disabled()); 56 57 if (list_empty(&virt_timer_list)) 58 return 0; 59 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); 60 return elapsed >= atomic64_read(&virt_timer_current); 61 } 62 63 static void update_mt_scaling(void) 64 { 65 u64 cycles_new[8], *cycles_old; 66 u64 delta, fac, mult, div; 67 int i; 68 69 stcctm5(smp_cpu_mtid + 1, cycles_new); 70 cycles_old = this_cpu_ptr(mt_cycles); 71 fac = 1; 72 mult = div = 0; 73 for (i = 0; i <= smp_cpu_mtid; i++) { 74 delta = cycles_new[i] - cycles_old[i]; 75 div += delta; 76 mult *= i + 1; 77 mult += delta * fac; 78 fac *= i + 1; 79 } 80 div *= fac; 81 if (div > 0) { 82 /* Update scaling factor */ 83 __this_cpu_write(mt_scaling_mult, mult); 84 __this_cpu_write(mt_scaling_div, div); 85 memcpy(cycles_old, cycles_new, 86 sizeof(u64) * (smp_cpu_mtid + 1)); 87 } 88 __this_cpu_write(mt_scaling_jiffies, jiffies_64); 89 } 90 91 /* 92 * Update process times based on virtual cpu times stored by entry.S 93 * to the lowcore fields user_timer, system_timer & steal_clock. 94 */ 95 static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) 96 { 97 struct thread_info *ti = task_thread_info(tsk); 98 u64 timer, clock, user, system, steal; 99 u64 user_scaled, system_scaled; 100 101 timer = S390_lowcore.last_update_timer; 102 clock = S390_lowcore.last_update_clock; 103 asm volatile( 104 " stpt %0\n" /* Store current cpu timer value */ 105 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 106 " stckf %1" /* Store current tod clock value */ 107 #else 108 " stck %1" /* Store current tod clock value */ 109 #endif 110 : "=m" (S390_lowcore.last_update_timer), 111 "=m" (S390_lowcore.last_update_clock)); 112 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 113 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 114 115 /* Update MT utilization calculation */ 116 if (smp_cpu_mtid && 117 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 118 update_mt_scaling(); 119 120 user = S390_lowcore.user_timer - ti->user_timer; 121 S390_lowcore.steal_timer -= user; 122 ti->user_timer = S390_lowcore.user_timer; 123 124 system = S390_lowcore.system_timer - ti->system_timer; 125 S390_lowcore.steal_timer -= system; 126 ti->system_timer = S390_lowcore.system_timer; 127 128 user_scaled = user; 129 system_scaled = system; 130 /* Do MT utilization scaling */ 131 if (smp_cpu_mtid) { 132 u64 mult = __this_cpu_read(mt_scaling_mult); 133 u64 div = __this_cpu_read(mt_scaling_div); 134 135 user_scaled = (user_scaled * mult) / div; 136 system_scaled = (system_scaled * mult) / div; 137 } 138 account_user_time(tsk, user, user_scaled); 139 account_system_time(tsk, hardirq_offset, system, system_scaled); 140 141 steal = S390_lowcore.steal_timer; 142 if ((s64) steal > 0) { 143 S390_lowcore.steal_timer = 0; 144 account_steal_time(steal); 145 } 146 147 return virt_timer_forward(user + system); 148 } 149 150 void vtime_task_switch(struct task_struct *prev) 151 { 152 struct thread_info *ti; 153 154 do_account_vtime(prev, 0); 155 ti = task_thread_info(prev); 156 ti->user_timer = S390_lowcore.user_timer; 157 ti->system_timer = S390_lowcore.system_timer; 158 ti = task_thread_info(current); 159 S390_lowcore.user_timer = ti->user_timer; 160 S390_lowcore.system_timer = ti->system_timer; 161 } 162 163 /* 164 * In s390, accounting pending user time also implies 165 * accounting system time in order to correctly compute 166 * the stolen time accounting. 167 */ 168 void vtime_account_user(struct task_struct *tsk) 169 { 170 if (do_account_vtime(tsk, HARDIRQ_OFFSET)) 171 virt_timer_expire(); 172 } 173 174 /* 175 * Update process times based on virtual cpu times stored by entry.S 176 * to the lowcore fields user_timer, system_timer & steal_clock. 177 */ 178 void vtime_account_irq_enter(struct task_struct *tsk) 179 { 180 struct thread_info *ti = task_thread_info(tsk); 181 u64 timer, system, system_scaled; 182 183 timer = S390_lowcore.last_update_timer; 184 S390_lowcore.last_update_timer = get_vtimer(); 185 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 186 187 /* Update MT utilization calculation */ 188 if (smp_cpu_mtid && 189 time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 190 update_mt_scaling(); 191 192 system = S390_lowcore.system_timer - ti->system_timer; 193 S390_lowcore.steal_timer -= system; 194 ti->system_timer = S390_lowcore.system_timer; 195 system_scaled = system; 196 /* Do MT utilization scaling */ 197 if (smp_cpu_mtid) { 198 u64 mult = __this_cpu_read(mt_scaling_mult); 199 u64 div = __this_cpu_read(mt_scaling_div); 200 201 system_scaled = (system_scaled * mult) / div; 202 } 203 account_system_time(tsk, 0, system, system_scaled); 204 205 virt_timer_forward(system); 206 } 207 EXPORT_SYMBOL_GPL(vtime_account_irq_enter); 208 209 void vtime_account_system(struct task_struct *tsk) 210 __attribute__((alias("vtime_account_irq_enter"))); 211 EXPORT_SYMBOL_GPL(vtime_account_system); 212 213 /* 214 * Sorted add to a list. List is linear searched until first bigger 215 * element is found. 216 */ 217 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 218 { 219 struct vtimer_list *tmp; 220 221 list_for_each_entry(tmp, head, entry) { 222 if (tmp->expires > timer->expires) { 223 list_add_tail(&timer->entry, &tmp->entry); 224 return; 225 } 226 } 227 list_add_tail(&timer->entry, head); 228 } 229 230 /* 231 * Handler for expired virtual CPU timer. 232 */ 233 static void virt_timer_expire(void) 234 { 235 struct vtimer_list *timer, *tmp; 236 unsigned long elapsed; 237 LIST_HEAD(cb_list); 238 239 /* walk timer list, fire all expired timers */ 240 spin_lock(&virt_timer_lock); 241 elapsed = atomic64_read(&virt_timer_elapsed); 242 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { 243 if (timer->expires < elapsed) 244 /* move expired timer to the callback queue */ 245 list_move_tail(&timer->entry, &cb_list); 246 else 247 timer->expires -= elapsed; 248 } 249 if (!list_empty(&virt_timer_list)) { 250 timer = list_first_entry(&virt_timer_list, 251 struct vtimer_list, entry); 252 atomic64_set(&virt_timer_current, timer->expires); 253 } 254 atomic64_sub(elapsed, &virt_timer_elapsed); 255 spin_unlock(&virt_timer_lock); 256 257 /* Do callbacks and recharge periodic timers */ 258 list_for_each_entry_safe(timer, tmp, &cb_list, entry) { 259 list_del_init(&timer->entry); 260 timer->function(timer->data); 261 if (timer->interval) { 262 /* Recharge interval timer */ 263 timer->expires = timer->interval + 264 atomic64_read(&virt_timer_elapsed); 265 spin_lock(&virt_timer_lock); 266 list_add_sorted(timer, &virt_timer_list); 267 spin_unlock(&virt_timer_lock); 268 } 269 } 270 } 271 272 void init_virt_timer(struct vtimer_list *timer) 273 { 274 timer->function = NULL; 275 INIT_LIST_HEAD(&timer->entry); 276 } 277 EXPORT_SYMBOL(init_virt_timer); 278 279 static inline int vtimer_pending(struct vtimer_list *timer) 280 { 281 return !list_empty(&timer->entry); 282 } 283 284 static void internal_add_vtimer(struct vtimer_list *timer) 285 { 286 if (list_empty(&virt_timer_list)) { 287 /* First timer, just program it. */ 288 atomic64_set(&virt_timer_current, timer->expires); 289 atomic64_set(&virt_timer_elapsed, 0); 290 list_add(&timer->entry, &virt_timer_list); 291 } else { 292 /* Update timer against current base. */ 293 timer->expires += atomic64_read(&virt_timer_elapsed); 294 if (likely((s64) timer->expires < 295 (s64) atomic64_read(&virt_timer_current))) 296 /* The new timer expires before the current timer. */ 297 atomic64_set(&virt_timer_current, timer->expires); 298 /* Insert new timer into the list. */ 299 list_add_sorted(timer, &virt_timer_list); 300 } 301 } 302 303 static void __add_vtimer(struct vtimer_list *timer, int periodic) 304 { 305 unsigned long flags; 306 307 timer->interval = periodic ? timer->expires : 0; 308 spin_lock_irqsave(&virt_timer_lock, flags); 309 internal_add_vtimer(timer); 310 spin_unlock_irqrestore(&virt_timer_lock, flags); 311 } 312 313 /* 314 * add_virt_timer - add an oneshot virtual CPU timer 315 */ 316 void add_virt_timer(struct vtimer_list *timer) 317 { 318 __add_vtimer(timer, 0); 319 } 320 EXPORT_SYMBOL(add_virt_timer); 321 322 /* 323 * add_virt_timer_int - add an interval virtual CPU timer 324 */ 325 void add_virt_timer_periodic(struct vtimer_list *timer) 326 { 327 __add_vtimer(timer, 1); 328 } 329 EXPORT_SYMBOL(add_virt_timer_periodic); 330 331 static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) 332 { 333 unsigned long flags; 334 int rc; 335 336 BUG_ON(!timer->function); 337 338 if (timer->expires == expires && vtimer_pending(timer)) 339 return 1; 340 spin_lock_irqsave(&virt_timer_lock, flags); 341 rc = vtimer_pending(timer); 342 if (rc) 343 list_del_init(&timer->entry); 344 timer->interval = periodic ? expires : 0; 345 timer->expires = expires; 346 internal_add_vtimer(timer); 347 spin_unlock_irqrestore(&virt_timer_lock, flags); 348 return rc; 349 } 350 351 /* 352 * returns whether it has modified a pending timer (1) or not (0) 353 */ 354 int mod_virt_timer(struct vtimer_list *timer, u64 expires) 355 { 356 return __mod_vtimer(timer, expires, 0); 357 } 358 EXPORT_SYMBOL(mod_virt_timer); 359 360 /* 361 * returns whether it has modified a pending timer (1) or not (0) 362 */ 363 int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) 364 { 365 return __mod_vtimer(timer, expires, 1); 366 } 367 EXPORT_SYMBOL(mod_virt_timer_periodic); 368 369 /* 370 * Delete a virtual timer. 371 * 372 * returns whether the deleted timer was pending (1) or not (0) 373 */ 374 int del_virt_timer(struct vtimer_list *timer) 375 { 376 unsigned long flags; 377 378 if (!vtimer_pending(timer)) 379 return 0; 380 spin_lock_irqsave(&virt_timer_lock, flags); 381 list_del_init(&timer->entry); 382 spin_unlock_irqrestore(&virt_timer_lock, flags); 383 return 1; 384 } 385 EXPORT_SYMBOL(del_virt_timer); 386 387 /* 388 * Start the virtual CPU timer on the current CPU. 389 */ 390 void vtime_init(void) 391 { 392 /* set initial cpu timer */ 393 set_vtimer(VTIMER_MAX_SLICE); 394 /* Setup initial MT scaling values */ 395 if (smp_cpu_mtid) { 396 __this_cpu_write(mt_scaling_jiffies, jiffies); 397 __this_cpu_write(mt_scaling_mult, 1); 398 __this_cpu_write(mt_scaling_div, 1); 399 stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); 400 } 401 } 402