1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Idle functions for s390. 4 * 5 * Copyright IBM Corp. 2014 6 * 7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/kprobes.h> 13 #include <linux/notifier.h> 14 #include <linux/init.h> 15 #include <linux/cpu.h> 16 #include <linux/sched/cputime.h> 17 #include <trace/events/power.h> 18 #include <asm/nmi.h> 19 #include <asm/smp.h> 20 #include "entry.h" 21 22 static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 23 24 void enabled_wait(void) 25 { 26 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 27 unsigned long long idle_time; 28 unsigned long psw_mask, flags; 29 30 31 /* Wait for external, I/O or machine check interrupt. */ 32 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | 33 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 34 clear_cpu_flag(CIF_NOHZ_DELAY); 35 36 local_irq_save(flags); 37 /* Call the assembler magic in entry.S */ 38 psw_idle(idle, psw_mask); 39 local_irq_restore(flags); 40 41 /* Account time spent with enabled wait psw loaded as idle time. */ 42 /* XXX seqcount has tracepoints that require RCU */ 43 write_seqcount_begin(&idle->seqcount); 44 idle_time = idle->clock_idle_exit - idle->clock_idle_enter; 45 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; 46 idle->idle_time += idle_time; 47 idle->idle_count++; 48 account_idle_time(cputime_to_nsecs(idle_time)); 49 write_seqcount_end(&idle->seqcount); 50 } 51 NOKPROBE_SYMBOL(enabled_wait); 52 53 static ssize_t show_idle_count(struct device *dev, 54 struct device_attribute *attr, char *buf) 55 { 56 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 57 unsigned long long idle_count; 58 unsigned int seq; 59 60 do { 61 seq = read_seqcount_begin(&idle->seqcount); 62 idle_count = READ_ONCE(idle->idle_count); 63 if (READ_ONCE(idle->clock_idle_enter)) 64 idle_count++; 65 } while (read_seqcount_retry(&idle->seqcount, seq)); 66 return sprintf(buf, "%llu\n", idle_count); 67 } 68 DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 69 70 static ssize_t show_idle_time(struct device *dev, 71 struct device_attribute *attr, char *buf) 72 { 73 unsigned long long now, idle_time, idle_enter, idle_exit, in_idle; 74 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 75 unsigned int seq; 76 77 do { 78 seq = read_seqcount_begin(&idle->seqcount); 79 idle_time = READ_ONCE(idle->idle_time); 80 idle_enter = READ_ONCE(idle->clock_idle_enter); 81 idle_exit = READ_ONCE(idle->clock_idle_exit); 82 } while (read_seqcount_retry(&idle->seqcount, seq)); 83 in_idle = 0; 84 now = get_tod_clock(); 85 if (idle_enter) { 86 if (idle_exit) { 87 in_idle = idle_exit - idle_enter; 88 } else if (now > idle_enter) { 89 in_idle = now - idle_enter; 90 } 91 } 92 idle_time += in_idle; 93 return sprintf(buf, "%llu\n", idle_time >> 12); 94 } 95 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); 96 97 u64 arch_cpu_idle_time(int cpu) 98 { 99 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 100 unsigned long long now, idle_enter, idle_exit, in_idle; 101 unsigned int seq; 102 103 do { 104 seq = read_seqcount_begin(&idle->seqcount); 105 idle_enter = READ_ONCE(idle->clock_idle_enter); 106 idle_exit = READ_ONCE(idle->clock_idle_exit); 107 } while (read_seqcount_retry(&idle->seqcount, seq)); 108 in_idle = 0; 109 now = get_tod_clock(); 110 if (idle_enter) { 111 if (idle_exit) { 112 in_idle = idle_exit - idle_enter; 113 } else if (now > idle_enter) { 114 in_idle = now - idle_enter; 115 } 116 } 117 return cputime_to_nsecs(in_idle); 118 } 119 120 void arch_cpu_idle_enter(void) 121 { 122 } 123 124 void arch_cpu_idle(void) 125 { 126 enabled_wait(); 127 local_irq_enable(); 128 } 129 130 void arch_cpu_idle_exit(void) 131 { 132 } 133 134 void arch_cpu_idle_dead(void) 135 { 136 cpu_die(); 137 } 138