1 /* 2 * Virtual cpu timer based timer functions. 3 * 4 * Copyright IBM Corp. 2004, 2012 5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 6 */ 7 8 #include <linux/kernel_stat.h> 9 #include <linux/export.h> 10 #include <linux/kernel.h> 11 #include <linux/timex.h> 12 #include <linux/types.h> 13 #include <linux/time.h> 14 15 #include <asm/cputime.h> 16 #include <asm/vtimer.h> 17 #include <asm/vtime.h> 18 19 static void virt_timer_expire(void); 20 21 static LIST_HEAD(virt_timer_list); 22 static DEFINE_SPINLOCK(virt_timer_lock); 23 static atomic64_t virt_timer_current; 24 static atomic64_t virt_timer_elapsed; 25 26 static inline u64 get_vtimer(void) 27 { 28 u64 timer; 29 30 asm volatile("stpt %0" : "=m" (timer)); 31 return timer; 32 } 33 34 static inline void set_vtimer(u64 expires) 35 { 36 u64 timer; 37 38 asm volatile( 39 " stpt %0\n" /* Store current cpu timer value */ 40 " spt %1" /* Set new value imm. afterwards */ 41 : "=m" (timer) : "m" (expires)); 42 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 43 S390_lowcore.last_update_timer = expires; 44 } 45 46 static inline int virt_timer_forward(u64 elapsed) 47 { 48 BUG_ON(!irqs_disabled()); 49 50 if (list_empty(&virt_timer_list)) 51 return 0; 52 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); 53 return elapsed >= atomic64_read(&virt_timer_current); 54 } 55 56 /* 57 * Update process times based on virtual cpu times stored by entry.S 58 * to the lowcore fields user_timer, system_timer & steal_clock. 59 */ 60 static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) 61 { 62 struct thread_info *ti = task_thread_info(tsk); 63 u64 timer, clock, user, system, steal; 64 65 timer = S390_lowcore.last_update_timer; 66 clock = S390_lowcore.last_update_clock; 67 asm volatile( 68 " stpt %0\n" /* Store current cpu timer value */ 69 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 70 " stckf %1" /* Store current tod clock value */ 71 #else 72 " stck %1" /* Store current tod clock value */ 73 #endif 74 : "=m" (S390_lowcore.last_update_timer), 75 "=m" (S390_lowcore.last_update_clock)); 76 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 77 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 78 79 user = S390_lowcore.user_timer - ti->user_timer; 80 S390_lowcore.steal_timer -= user; 81 ti->user_timer = S390_lowcore.user_timer; 82 account_user_time(tsk, user, user); 83 84 system = S390_lowcore.system_timer - ti->system_timer; 85 S390_lowcore.steal_timer -= system; 86 ti->system_timer = S390_lowcore.system_timer; 87 account_system_time(tsk, hardirq_offset, system, system); 88 89 steal = S390_lowcore.steal_timer; 90 if ((s64) steal > 0) { 91 S390_lowcore.steal_timer = 0; 92 account_steal_time(steal); 93 } 94 95 return virt_timer_forward(user + system); 96 } 97 98 void vtime_task_switch(struct task_struct *prev) 99 { 100 struct thread_info *ti; 101 102 do_account_vtime(prev, 0); 103 ti = task_thread_info(prev); 104 ti->user_timer = S390_lowcore.user_timer; 105 ti->system_timer = S390_lowcore.system_timer; 106 ti = task_thread_info(current); 107 S390_lowcore.user_timer = ti->user_timer; 108 S390_lowcore.system_timer = ti->system_timer; 109 } 110 111 /* 112 * In s390, accounting pending user time also implies 113 * accounting system time in order to correctly compute 114 * the stolen time accounting. 115 */ 116 void vtime_account_user(struct task_struct *tsk) 117 { 118 if (do_account_vtime(tsk, HARDIRQ_OFFSET)) 119 virt_timer_expire(); 120 } 121 122 /* 123 * Update process times based on virtual cpu times stored by entry.S 124 * to the lowcore fields user_timer, system_timer & steal_clock. 125 */ 126 void vtime_account_irq_enter(struct task_struct *tsk) 127 { 128 struct thread_info *ti = task_thread_info(tsk); 129 u64 timer, system; 130 131 timer = S390_lowcore.last_update_timer; 132 S390_lowcore.last_update_timer = get_vtimer(); 133 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 134 135 system = S390_lowcore.system_timer - ti->system_timer; 136 S390_lowcore.steal_timer -= system; 137 ti->system_timer = S390_lowcore.system_timer; 138 account_system_time(tsk, 0, system, system); 139 140 virt_timer_forward(system); 141 } 142 EXPORT_SYMBOL_GPL(vtime_account_irq_enter); 143 144 void vtime_account_system(struct task_struct *tsk) 145 __attribute__((alias("vtime_account_irq_enter"))); 146 EXPORT_SYMBOL_GPL(vtime_account_system); 147 148 /* 149 * Sorted add to a list. List is linear searched until first bigger 150 * element is found. 151 */ 152 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 153 { 154 struct vtimer_list *tmp; 155 156 list_for_each_entry(tmp, head, entry) { 157 if (tmp->expires > timer->expires) { 158 list_add_tail(&timer->entry, &tmp->entry); 159 return; 160 } 161 } 162 list_add_tail(&timer->entry, head); 163 } 164 165 /* 166 * Handler for expired virtual CPU timer. 167 */ 168 static void virt_timer_expire(void) 169 { 170 struct vtimer_list *timer, *tmp; 171 unsigned long elapsed; 172 LIST_HEAD(cb_list); 173 174 /* walk timer list, fire all expired timers */ 175 spin_lock(&virt_timer_lock); 176 elapsed = atomic64_read(&virt_timer_elapsed); 177 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { 178 if (timer->expires < elapsed) 179 /* move expired timer to the callback queue */ 180 list_move_tail(&timer->entry, &cb_list); 181 else 182 timer->expires -= elapsed; 183 } 184 if (!list_empty(&virt_timer_list)) { 185 timer = list_first_entry(&virt_timer_list, 186 struct vtimer_list, entry); 187 atomic64_set(&virt_timer_current, timer->expires); 188 } 189 atomic64_sub(elapsed, &virt_timer_elapsed); 190 spin_unlock(&virt_timer_lock); 191 192 /* Do callbacks and recharge periodic timers */ 193 list_for_each_entry_safe(timer, tmp, &cb_list, entry) { 194 list_del_init(&timer->entry); 195 timer->function(timer->data); 196 if (timer->interval) { 197 /* Recharge interval timer */ 198 timer->expires = timer->interval + 199 atomic64_read(&virt_timer_elapsed); 200 spin_lock(&virt_timer_lock); 201 list_add_sorted(timer, &virt_timer_list); 202 spin_unlock(&virt_timer_lock); 203 } 204 } 205 } 206 207 void init_virt_timer(struct vtimer_list *timer) 208 { 209 timer->function = NULL; 210 INIT_LIST_HEAD(&timer->entry); 211 } 212 EXPORT_SYMBOL(init_virt_timer); 213 214 static inline int vtimer_pending(struct vtimer_list *timer) 215 { 216 return !list_empty(&timer->entry); 217 } 218 219 static void internal_add_vtimer(struct vtimer_list *timer) 220 { 221 if (list_empty(&virt_timer_list)) { 222 /* First timer, just program it. */ 223 atomic64_set(&virt_timer_current, timer->expires); 224 atomic64_set(&virt_timer_elapsed, 0); 225 list_add(&timer->entry, &virt_timer_list); 226 } else { 227 /* Update timer against current base. */ 228 timer->expires += atomic64_read(&virt_timer_elapsed); 229 if (likely((s64) timer->expires < 230 (s64) atomic64_read(&virt_timer_current))) 231 /* The new timer expires before the current timer. */ 232 atomic64_set(&virt_timer_current, timer->expires); 233 /* Insert new timer into the list. */ 234 list_add_sorted(timer, &virt_timer_list); 235 } 236 } 237 238 static void __add_vtimer(struct vtimer_list *timer, int periodic) 239 { 240 unsigned long flags; 241 242 timer->interval = periodic ? timer->expires : 0; 243 spin_lock_irqsave(&virt_timer_lock, flags); 244 internal_add_vtimer(timer); 245 spin_unlock_irqrestore(&virt_timer_lock, flags); 246 } 247 248 /* 249 * add_virt_timer - add an oneshot virtual CPU timer 250 */ 251 void add_virt_timer(struct vtimer_list *timer) 252 { 253 __add_vtimer(timer, 0); 254 } 255 EXPORT_SYMBOL(add_virt_timer); 256 257 /* 258 * add_virt_timer_int - add an interval virtual CPU timer 259 */ 260 void add_virt_timer_periodic(struct vtimer_list *timer) 261 { 262 __add_vtimer(timer, 1); 263 } 264 EXPORT_SYMBOL(add_virt_timer_periodic); 265 266 static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) 267 { 268 unsigned long flags; 269 int rc; 270 271 BUG_ON(!timer->function); 272 273 if (timer->expires == expires && vtimer_pending(timer)) 274 return 1; 275 spin_lock_irqsave(&virt_timer_lock, flags); 276 rc = vtimer_pending(timer); 277 if (rc) 278 list_del_init(&timer->entry); 279 timer->interval = periodic ? expires : 0; 280 timer->expires = expires; 281 internal_add_vtimer(timer); 282 spin_unlock_irqrestore(&virt_timer_lock, flags); 283 return rc; 284 } 285 286 /* 287 * returns whether it has modified a pending timer (1) or not (0) 288 */ 289 int mod_virt_timer(struct vtimer_list *timer, u64 expires) 290 { 291 return __mod_vtimer(timer, expires, 0); 292 } 293 EXPORT_SYMBOL(mod_virt_timer); 294 295 /* 296 * returns whether it has modified a pending timer (1) or not (0) 297 */ 298 int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) 299 { 300 return __mod_vtimer(timer, expires, 1); 301 } 302 EXPORT_SYMBOL(mod_virt_timer_periodic); 303 304 /* 305 * Delete a virtual timer. 306 * 307 * returns whether the deleted timer was pending (1) or not (0) 308 */ 309 int del_virt_timer(struct vtimer_list *timer) 310 { 311 unsigned long flags; 312 313 if (!vtimer_pending(timer)) 314 return 0; 315 spin_lock_irqsave(&virt_timer_lock, flags); 316 list_del_init(&timer->entry); 317 spin_unlock_irqrestore(&virt_timer_lock, flags); 318 return 1; 319 } 320 EXPORT_SYMBOL(del_virt_timer); 321 322 /* 323 * Start the virtual CPU timer on the current CPU. 324 */ 325 void vtime_init(void) 326 { 327 /* set initial cpu timer */ 328 set_vtimer(VTIMER_MAX_SLICE); 329 } 330