1 /* 2 * drivers/base/cpu.c - basic CPU class support 3 */ 4 5 #include <linux/sysdev.h> 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/sched.h> 9 #include <linux/cpu.h> 10 #include <linux/topology.h> 11 #include <linux/device.h> 12 #include <linux/node.h> 13 #include <linux/gfp.h> 14 15 #include "base.h" 16 17 static struct sysdev_class_attribute *cpu_sysdev_class_attrs[]; 18 19 struct sysdev_class cpu_sysdev_class = { 20 .name = "cpu", 21 .attrs = cpu_sysdev_class_attrs, 22 }; 23 EXPORT_SYMBOL(cpu_sysdev_class); 24 25 static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices); 26 27 #ifdef CONFIG_HOTPLUG_CPU 28 static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr, 29 char *buf) 30 { 31 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 32 33 return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id)); 34 } 35 36 static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr, 37 const char *buf, size_t count) 38 { 39 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 40 ssize_t ret; 41 42 cpu_hotplug_driver_lock(); 43 switch (buf[0]) { 44 case '0': 45 ret = cpu_down(cpu->sysdev.id); 46 if (!ret) 47 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 48 break; 49 case '1': 50 ret = cpu_up(cpu->sysdev.id); 51 if (!ret) 52 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 53 break; 54 default: 55 ret = -EINVAL; 56 } 57 cpu_hotplug_driver_unlock(); 58 59 if (ret >= 0) 60 ret = count; 61 return ret; 62 } 63 static SYSDEV_ATTR(online, 0644, show_online, store_online); 64 65 static void __cpuinit register_cpu_control(struct cpu *cpu) 66 { 67 sysdev_create_file(&cpu->sysdev, &attr_online); 68 } 69 void unregister_cpu(struct cpu *cpu) 70 { 71 int logical_cpu = cpu->sysdev.id; 72 73 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); 74 75 sysdev_remove_file(&cpu->sysdev, &attr_online); 76 77 sysdev_unregister(&cpu->sysdev); 78 per_cpu(cpu_sys_devices, logical_cpu) = NULL; 79 return; 80 } 81 82 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 83 static ssize_t cpu_probe_store(struct sysdev_class *class, 84 struct sysdev_class_attribute *attr, 85 const char *buf, 86 size_t count) 87 { 88 return arch_cpu_probe(buf, count); 89 } 90 91 static ssize_t cpu_release_store(struct sysdev_class *class, 92 struct sysdev_class_attribute *attr, 93 const char *buf, 94 size_t count) 95 { 96 return arch_cpu_release(buf, count); 97 } 98 99 static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); 100 static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); 101 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 102 103 #else /* ... !CONFIG_HOTPLUG_CPU */ 104 static inline void register_cpu_control(struct cpu *cpu) 105 { 106 } 107 #endif /* CONFIG_HOTPLUG_CPU */ 108 109 #ifdef CONFIG_KEXEC 110 #include <linux/kexec.h> 111 112 static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr, 113 char *buf) 114 { 115 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 116 ssize_t rc; 117 unsigned long long addr; 118 int cpunum; 119 120 cpunum = cpu->sysdev.id; 121 122 /* 123 * Might be reading other cpu's data based on which cpu read thread 124 * has been scheduled. But cpu data (memory) is allocated once during 125 * boot up and this data does not change there after. Hence this 126 * operation should be safe. No locking required. 127 */ 128 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); 129 rc = sprintf(buf, "%Lx\n", addr); 130 return rc; 131 } 132 static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); 133 #endif 134 135 /* 136 * Print cpu online, possible, present, and system maps 137 */ 138 139 struct cpu_attr { 140 struct sysdev_class_attribute attr; 141 const struct cpumask *const * const map; 142 }; 143 144 static ssize_t show_cpus_attr(struct sysdev_class *class, 145 struct sysdev_class_attribute *attr, 146 char *buf) 147 { 148 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); 149 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map)); 150 151 buf[n++] = '\n'; 152 buf[n] = '\0'; 153 return n; 154 } 155 156 #define _CPU_ATTR(name, map) \ 157 { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map } 158 159 /* Keep in sync with cpu_sysdev_class_attrs */ 160 static struct cpu_attr cpu_attrs[] = { 161 _CPU_ATTR(online, &cpu_online_mask), 162 _CPU_ATTR(possible, &cpu_possible_mask), 163 _CPU_ATTR(present, &cpu_present_mask), 164 }; 165 166 /* 167 * Print values for NR_CPUS and offlined cpus 168 */ 169 static ssize_t print_cpus_kernel_max(struct sysdev_class *class, 170 struct sysdev_class_attribute *attr, char *buf) 171 { 172 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); 173 return n; 174 } 175 static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); 176 177 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ 178 unsigned int total_cpus; 179 180 static ssize_t print_cpus_offline(struct sysdev_class *class, 181 struct sysdev_class_attribute *attr, char *buf) 182 { 183 int n = 0, len = PAGE_SIZE-2; 184 cpumask_var_t offline; 185 186 /* display offline cpus < nr_cpu_ids */ 187 if (!alloc_cpumask_var(&offline, GFP_KERNEL)) 188 return -ENOMEM; 189 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); 190 n = cpulist_scnprintf(buf, len, offline); 191 free_cpumask_var(offline); 192 193 /* display offline cpus >= nr_cpu_ids */ 194 if (total_cpus && nr_cpu_ids < total_cpus) { 195 if (n && n < len) 196 buf[n++] = ','; 197 198 if (nr_cpu_ids == total_cpus-1) 199 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); 200 else 201 n += snprintf(&buf[n], len - n, "%d-%d", 202 nr_cpu_ids, total_cpus-1); 203 } 204 205 n += snprintf(&buf[n], len - n, "\n"); 206 return n; 207 } 208 static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); 209 210 /* 211 * register_cpu - Setup a sysfs device for a CPU. 212 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in 213 * sysfs for this CPU. 214 * @num - CPU number to use when creating the device. 215 * 216 * Initialize and register the CPU device. 217 */ 218 int __cpuinit register_cpu(struct cpu *cpu, int num) 219 { 220 int error; 221 cpu->node_id = cpu_to_node(num); 222 cpu->sysdev.id = num; 223 cpu->sysdev.cls = &cpu_sysdev_class; 224 225 error = sysdev_register(&cpu->sysdev); 226 227 if (!error && cpu->hotpluggable) 228 register_cpu_control(cpu); 229 if (!error) 230 per_cpu(cpu_sys_devices, num) = &cpu->sysdev; 231 if (!error) 232 register_cpu_under_node(num, cpu_to_node(num)); 233 234 #ifdef CONFIG_KEXEC 235 if (!error) 236 error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes); 237 #endif 238 return error; 239 } 240 241 struct sys_device *get_cpu_sysdev(unsigned cpu) 242 { 243 if (cpu < nr_cpu_ids && cpu_possible(cpu)) 244 return per_cpu(cpu_sys_devices, cpu); 245 else 246 return NULL; 247 } 248 EXPORT_SYMBOL_GPL(get_cpu_sysdev); 249 250 int __init cpu_dev_init(void) 251 { 252 int err; 253 254 err = sysdev_class_register(&cpu_sysdev_class); 255 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 256 if (!err) 257 err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); 258 #endif 259 260 return err; 261 } 262 263 static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = { 264 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 265 &attr_probe, 266 &attr_release, 267 #endif 268 &cpu_attrs[0].attr, 269 &cpu_attrs[1].attr, 270 &cpu_attrs[2].attr, 271 &attr_kernel_max, 272 &attr_offline, 273 NULL 274 }; 275