1 /* 2 * Virtual cpu timer based timer functions. 3 * 4 * Copyright IBM Corp. 2004, 2012 5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 6 */ 7 8 #include <linux/kernel_stat.h> 9 #include <linux/export.h> 10 #include <linux/kernel.h> 11 #include <linux/timex.h> 12 #include <linux/types.h> 13 #include <linux/time.h> 14 15 #include <asm/cputime.h> 16 #include <asm/vtimer.h> 17 #include <asm/vtime.h> 18 19 static void virt_timer_expire(void); 20 21 static LIST_HEAD(virt_timer_list); 22 static DEFINE_SPINLOCK(virt_timer_lock); 23 static atomic64_t virt_timer_current; 24 static atomic64_t virt_timer_elapsed; 25 26 static inline u64 get_vtimer(void) 27 { 28 u64 timer; 29 30 asm volatile("stpt %0" : "=m" (timer)); 31 return timer; 32 } 33 34 static inline void set_vtimer(u64 expires) 35 { 36 u64 timer; 37 38 asm volatile( 39 " stpt %0\n" /* Store current cpu timer value */ 40 " spt %1" /* Set new value imm. afterwards */ 41 : "=m" (timer) : "m" (expires)); 42 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 43 S390_lowcore.last_update_timer = expires; 44 } 45 46 static inline int virt_timer_forward(u64 elapsed) 47 { 48 BUG_ON(!irqs_disabled()); 49 50 if (list_empty(&virt_timer_list)) 51 return 0; 52 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); 53 return elapsed >= atomic64_read(&virt_timer_current); 54 } 55 56 /* 57 * Update process times based on virtual cpu times stored by entry.S 58 * to the lowcore fields user_timer, system_timer & steal_clock. 59 */ 60 static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) 61 { 62 struct thread_info *ti = task_thread_info(tsk); 63 u64 timer, clock, user, system, steal; 64 65 timer = S390_lowcore.last_update_timer; 66 clock = S390_lowcore.last_update_clock; 67 asm volatile( 68 " stpt %0\n" /* Store current cpu timer value */ 69 " stck %1" /* Store current tod clock value */ 70 : "=m" (S390_lowcore.last_update_timer), 71 "=m" (S390_lowcore.last_update_clock)); 72 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 73 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 74 75 user = S390_lowcore.user_timer - ti->user_timer; 76 S390_lowcore.steal_timer -= user; 77 ti->user_timer = S390_lowcore.user_timer; 78 account_user_time(tsk, user, user); 79 80 system = S390_lowcore.system_timer - ti->system_timer; 81 S390_lowcore.steal_timer -= system; 82 ti->system_timer = S390_lowcore.system_timer; 83 account_system_time(tsk, hardirq_offset, system, system); 84 85 steal = S390_lowcore.steal_timer; 86 if ((s64) steal > 0) { 87 S390_lowcore.steal_timer = 0; 88 account_steal_time(steal); 89 } 90 91 return virt_timer_forward(user + system); 92 } 93 94 void vtime_task_switch(struct task_struct *prev) 95 { 96 struct thread_info *ti; 97 98 do_account_vtime(prev, 0); 99 ti = task_thread_info(prev); 100 ti->user_timer = S390_lowcore.user_timer; 101 ti->system_timer = S390_lowcore.system_timer; 102 ti = task_thread_info(current); 103 S390_lowcore.user_timer = ti->user_timer; 104 S390_lowcore.system_timer = ti->system_timer; 105 } 106 107 /* 108 * In s390, accounting pending user time also implies 109 * accounting system time in order to correctly compute 110 * the stolen time accounting. 111 */ 112 void vtime_account_user(struct task_struct *tsk) 113 { 114 if (do_account_vtime(tsk, HARDIRQ_OFFSET)) 115 virt_timer_expire(); 116 } 117 118 /* 119 * Update process times based on virtual cpu times stored by entry.S 120 * to the lowcore fields user_timer, system_timer & steal_clock. 121 */ 122 void vtime_account_irq_enter(struct task_struct *tsk) 123 { 124 struct thread_info *ti = task_thread_info(tsk); 125 u64 timer, system; 126 127 WARN_ON_ONCE(!irqs_disabled()); 128 129 timer = S390_lowcore.last_update_timer; 130 S390_lowcore.last_update_timer = get_vtimer(); 131 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 132 133 system = S390_lowcore.system_timer - ti->system_timer; 134 S390_lowcore.steal_timer -= system; 135 ti->system_timer = S390_lowcore.system_timer; 136 account_system_time(tsk, 0, system, system); 137 138 virt_timer_forward(system); 139 } 140 EXPORT_SYMBOL_GPL(vtime_account_irq_enter); 141 142 void vtime_account_system(struct task_struct *tsk) 143 __attribute__((alias("vtime_account_irq_enter"))); 144 EXPORT_SYMBOL_GPL(vtime_account_system); 145 146 /* 147 * Sorted add to a list. List is linear searched until first bigger 148 * element is found. 149 */ 150 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 151 { 152 struct vtimer_list *tmp; 153 154 list_for_each_entry(tmp, head, entry) { 155 if (tmp->expires > timer->expires) { 156 list_add_tail(&timer->entry, &tmp->entry); 157 return; 158 } 159 } 160 list_add_tail(&timer->entry, head); 161 } 162 163 /* 164 * Handler for expired virtual CPU timer. 165 */ 166 static void virt_timer_expire(void) 167 { 168 struct vtimer_list *timer, *tmp; 169 unsigned long elapsed; 170 LIST_HEAD(cb_list); 171 172 /* walk timer list, fire all expired timers */ 173 spin_lock(&virt_timer_lock); 174 elapsed = atomic64_read(&virt_timer_elapsed); 175 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { 176 if (timer->expires < elapsed) 177 /* move expired timer to the callback queue */ 178 list_move_tail(&timer->entry, &cb_list); 179 else 180 timer->expires -= elapsed; 181 } 182 if (!list_empty(&virt_timer_list)) { 183 timer = list_first_entry(&virt_timer_list, 184 struct vtimer_list, entry); 185 atomic64_set(&virt_timer_current, timer->expires); 186 } 187 atomic64_sub(elapsed, &virt_timer_elapsed); 188 spin_unlock(&virt_timer_lock); 189 190 /* Do callbacks and recharge periodic timers */ 191 list_for_each_entry_safe(timer, tmp, &cb_list, entry) { 192 list_del_init(&timer->entry); 193 timer->function(timer->data); 194 if (timer->interval) { 195 /* Recharge interval timer */ 196 timer->expires = timer->interval + 197 atomic64_read(&virt_timer_elapsed); 198 spin_lock(&virt_timer_lock); 199 list_add_sorted(timer, &virt_timer_list); 200 spin_unlock(&virt_timer_lock); 201 } 202 } 203 } 204 205 void init_virt_timer(struct vtimer_list *timer) 206 { 207 timer->function = NULL; 208 INIT_LIST_HEAD(&timer->entry); 209 } 210 EXPORT_SYMBOL(init_virt_timer); 211 212 static inline int vtimer_pending(struct vtimer_list *timer) 213 { 214 return !list_empty(&timer->entry); 215 } 216 217 static void internal_add_vtimer(struct vtimer_list *timer) 218 { 219 if (list_empty(&virt_timer_list)) { 220 /* First timer, just program it. */ 221 atomic64_set(&virt_timer_current, timer->expires); 222 atomic64_set(&virt_timer_elapsed, 0); 223 list_add(&timer->entry, &virt_timer_list); 224 } else { 225 /* Update timer against current base. */ 226 timer->expires += atomic64_read(&virt_timer_elapsed); 227 if (likely((s64) timer->expires < 228 (s64) atomic64_read(&virt_timer_current))) 229 /* The new timer expires before the current timer. */ 230 atomic64_set(&virt_timer_current, timer->expires); 231 /* Insert new timer into the list. */ 232 list_add_sorted(timer, &virt_timer_list); 233 } 234 } 235 236 static void __add_vtimer(struct vtimer_list *timer, int periodic) 237 { 238 unsigned long flags; 239 240 timer->interval = periodic ? timer->expires : 0; 241 spin_lock_irqsave(&virt_timer_lock, flags); 242 internal_add_vtimer(timer); 243 spin_unlock_irqrestore(&virt_timer_lock, flags); 244 } 245 246 /* 247 * add_virt_timer - add an oneshot virtual CPU timer 248 */ 249 void add_virt_timer(struct vtimer_list *timer) 250 { 251 __add_vtimer(timer, 0); 252 } 253 EXPORT_SYMBOL(add_virt_timer); 254 255 /* 256 * add_virt_timer_int - add an interval virtual CPU timer 257 */ 258 void add_virt_timer_periodic(struct vtimer_list *timer) 259 { 260 __add_vtimer(timer, 1); 261 } 262 EXPORT_SYMBOL(add_virt_timer_periodic); 263 264 static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) 265 { 266 unsigned long flags; 267 int rc; 268 269 BUG_ON(!timer->function); 270 271 if (timer->expires == expires && vtimer_pending(timer)) 272 return 1; 273 spin_lock_irqsave(&virt_timer_lock, flags); 274 rc = vtimer_pending(timer); 275 if (rc) 276 list_del_init(&timer->entry); 277 timer->interval = periodic ? expires : 0; 278 timer->expires = expires; 279 internal_add_vtimer(timer); 280 spin_unlock_irqrestore(&virt_timer_lock, flags); 281 return rc; 282 } 283 284 /* 285 * returns whether it has modified a pending timer (1) or not (0) 286 */ 287 int mod_virt_timer(struct vtimer_list *timer, u64 expires) 288 { 289 return __mod_vtimer(timer, expires, 0); 290 } 291 EXPORT_SYMBOL(mod_virt_timer); 292 293 /* 294 * returns whether it has modified a pending timer (1) or not (0) 295 */ 296 int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) 297 { 298 return __mod_vtimer(timer, expires, 1); 299 } 300 EXPORT_SYMBOL(mod_virt_timer_periodic); 301 302 /* 303 * Delete a virtual timer. 304 * 305 * returns whether the deleted timer was pending (1) or not (0) 306 */ 307 int del_virt_timer(struct vtimer_list *timer) 308 { 309 unsigned long flags; 310 311 if (!vtimer_pending(timer)) 312 return 0; 313 spin_lock_irqsave(&virt_timer_lock, flags); 314 list_del_init(&timer->entry); 315 spin_unlock_irqrestore(&virt_timer_lock, flags); 316 return 1; 317 } 318 EXPORT_SYMBOL(del_virt_timer); 319 320 /* 321 * Start the virtual CPU timer on the current CPU. 322 */ 323 void vtime_init(void) 324 { 325 /* set initial cpu timer */ 326 set_vtimer(VTIMER_MAX_SLICE); 327 } 328