1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which 4 * are not related to any other subsystem 5 * 6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org> 7 */ 8 9 #include <asm/byteorder.h> 10 #include <linux/kobject.h> 11 #include <linux/string.h> 12 #include <linux/sysfs.h> 13 #include <linux/export.h> 14 #include <linux/init.h> 15 #include <linux/kexec.h> 16 #include <linux/profile.h> 17 #include <linux/stat.h> 18 #include <linux/sched.h> 19 #include <linux/capability.h> 20 #include <linux/compiler.h> 21 22 #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */ 23 24 #if defined(__LITTLE_ENDIAN) 25 #define CPU_BYTEORDER_STRING "little" 26 #elif defined(__BIG_ENDIAN) 27 #define CPU_BYTEORDER_STRING "big" 28 #else 29 #error Unknown byteorder 30 #endif 31 32 #define KERNEL_ATTR_RO(_name) \ 33 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 34 35 #define KERNEL_ATTR_RW(_name) \ 36 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 37 38 /* current uevent sequence number */ 39 static ssize_t uevent_seqnum_show(struct kobject *kobj, 40 struct kobj_attribute *attr, char *buf) 41 { 42 return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&uevent_seqnum)); 43 } 44 KERNEL_ATTR_RO(uevent_seqnum); 45 46 /* cpu byteorder */ 47 static ssize_t cpu_byteorder_show(struct kobject *kobj, 48 struct kobj_attribute *attr, char *buf) 49 { 50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING); 51 } 52 KERNEL_ATTR_RO(cpu_byteorder); 53 54 /* address bits */ 55 static ssize_t address_bits_show(struct kobject *kobj, 56 struct kobj_attribute *attr, char *buf) 57 { 58 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */); 59 } 60 KERNEL_ATTR_RO(address_bits); 61 62 #ifdef CONFIG_UEVENT_HELPER 63 /* uevent helper program, used during early boot */ 64 static ssize_t uevent_helper_show(struct kobject *kobj, 65 struct kobj_attribute *attr, char *buf) 66 { 67 return sysfs_emit(buf, "%s\n", uevent_helper); 68 } 69 static ssize_t uevent_helper_store(struct kobject *kobj, 70 struct kobj_attribute *attr, 71 const char *buf, size_t count) 72 { 73 if (count+1 > UEVENT_HELPER_PATH_LEN) 74 return -ENOENT; 75 memcpy(uevent_helper, buf, count); 76 uevent_helper[count] = '\0'; 77 if (count && uevent_helper[count-1] == '\n') 78 uevent_helper[count-1] = '\0'; 79 return count; 80 } 81 KERNEL_ATTR_RW(uevent_helper); 82 #endif 83 84 #ifdef CONFIG_PROFILING 85 static ssize_t profiling_show(struct kobject *kobj, 86 struct kobj_attribute *attr, char *buf) 87 { 88 return sysfs_emit(buf, "%d\n", prof_on); 89 } 90 static ssize_t profiling_store(struct kobject *kobj, 91 struct kobj_attribute *attr, 92 const char *buf, size_t count) 93 { 94 int ret; 95 static DEFINE_MUTEX(lock); 96 97 /* 98 * We need serialization, for profile_setup() initializes prof_on 99 * value and profile_init() must not reallocate prof_buffer after 100 * once allocated. 101 */ 102 guard(mutex)(&lock); 103 if (prof_on) 104 return -EEXIST; 105 /* 106 * This eventually calls into get_option() which 107 * has a ton of callers and is not const. It is 108 * easiest to cast it away here. 109 */ 110 profile_setup((char *)buf); 111 ret = profile_init(); 112 if (ret) 113 return ret; 114 ret = create_proc_profile(); 115 if (ret) 116 return ret; 117 return count; 118 } 119 KERNEL_ATTR_RW(profiling); 120 #endif 121 122 #ifdef CONFIG_KEXEC_CORE 123 static ssize_t kexec_loaded_show(struct kobject *kobj, 124 struct kobj_attribute *attr, char *buf) 125 { 126 return sysfs_emit(buf, "%d\n", !!kexec_image); 127 } 128 KERNEL_ATTR_RO(kexec_loaded); 129 130 #ifdef CONFIG_CRASH_DUMP 131 static ssize_t kexec_crash_loaded_show(struct kobject *kobj, 132 struct kobj_attribute *attr, char *buf) 133 { 134 return sysfs_emit(buf, "%d\n", kexec_crash_loaded()); 135 } 136 KERNEL_ATTR_RO(kexec_crash_loaded); 137 138 #ifdef CONFIG_CRASH_RESERVE 139 static ssize_t kexec_crash_cma_ranges_show(struct kobject *kobj, 140 struct kobj_attribute *attr, char *buf) 141 { 142 143 ssize_t len = 0; 144 int i; 145 146 for (i = 0; i < crashk_cma_cnt; ++i) { 147 len += sysfs_emit_at(buf, len, "%08llx-%08llx\n", 148 crashk_cma_ranges[i].start, 149 crashk_cma_ranges[i].end); 150 } 151 return len; 152 } 153 KERNEL_ATTR_RO(kexec_crash_cma_ranges); 154 #endif /* CONFIG_CRASH_RESERVE */ 155 156 static ssize_t kexec_crash_size_show(struct kobject *kobj, 157 struct kobj_attribute *attr, char *buf) 158 { 159 ssize_t size = crash_get_memory_size(); 160 161 if (size < 0) 162 return size; 163 164 return sysfs_emit(buf, "%zd\n", size); 165 } 166 static ssize_t kexec_crash_size_store(struct kobject *kobj, 167 struct kobj_attribute *attr, 168 const char *buf, size_t count) 169 { 170 unsigned long cnt; 171 int ret; 172 173 if (kstrtoul(buf, 0, &cnt)) 174 return -EINVAL; 175 176 ret = crash_shrink_memory(cnt); 177 return ret < 0 ? ret : count; 178 } 179 KERNEL_ATTR_RW(kexec_crash_size); 180 181 #endif /* CONFIG_CRASH_DUMP*/ 182 #endif /* CONFIG_KEXEC_CORE */ 183 184 #ifdef CONFIG_VMCORE_INFO 185 186 static ssize_t vmcoreinfo_show(struct kobject *kobj, 187 struct kobj_attribute *attr, char *buf) 188 { 189 phys_addr_t vmcore_base = paddr_vmcoreinfo_note(); 190 return sysfs_emit(buf, "%pa %x\n", &vmcore_base, 191 (unsigned int)VMCOREINFO_NOTE_SIZE); 192 } 193 KERNEL_ATTR_RO(vmcoreinfo); 194 195 #ifdef CONFIG_CRASH_HOTPLUG 196 static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj, 197 struct kobj_attribute *attr, char *buf) 198 { 199 unsigned int sz = crash_get_elfcorehdr_size(); 200 201 return sysfs_emit(buf, "%u\n", sz); 202 } 203 KERNEL_ATTR_RO(crash_elfcorehdr_size); 204 205 #endif 206 207 #endif /* CONFIG_VMCORE_INFO */ 208 209 /* whether file capabilities are enabled */ 210 static ssize_t fscaps_show(struct kobject *kobj, 211 struct kobj_attribute *attr, char *buf) 212 { 213 return sysfs_emit(buf, "%d\n", file_caps_enabled); 214 } 215 KERNEL_ATTR_RO(fscaps); 216 217 #ifndef CONFIG_TINY_RCU 218 int rcu_expedited; 219 static ssize_t rcu_expedited_show(struct kobject *kobj, 220 struct kobj_attribute *attr, char *buf) 221 { 222 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited)); 223 } 224 static ssize_t rcu_expedited_store(struct kobject *kobj, 225 struct kobj_attribute *attr, 226 const char *buf, size_t count) 227 { 228 if (kstrtoint(buf, 0, &rcu_expedited)) 229 return -EINVAL; 230 231 return count; 232 } 233 KERNEL_ATTR_RW(rcu_expedited); 234 235 int rcu_normal; 236 static ssize_t rcu_normal_show(struct kobject *kobj, 237 struct kobj_attribute *attr, char *buf) 238 { 239 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal)); 240 } 241 static ssize_t rcu_normal_store(struct kobject *kobj, 242 struct kobj_attribute *attr, 243 const char *buf, size_t count) 244 { 245 if (kstrtoint(buf, 0, &rcu_normal)) 246 return -EINVAL; 247 248 return count; 249 } 250 KERNEL_ATTR_RW(rcu_normal); 251 #endif /* #ifndef CONFIG_TINY_RCU */ 252 253 /* 254 * Make /sys/kernel/notes give the raw contents of our kernel .notes section. 255 */ 256 extern const void __start_notes; 257 extern const void __stop_notes; 258 #define notes_size (&__stop_notes - &__start_notes) 259 260 static __ro_after_init BIN_ATTR_SIMPLE_RO(notes); 261 262 struct kobject *kernel_kobj; 263 EXPORT_SYMBOL_GPL(kernel_kobj); 264 265 static struct attribute * kernel_attrs[] = { 266 &fscaps_attr.attr, 267 &uevent_seqnum_attr.attr, 268 &cpu_byteorder_attr.attr, 269 &address_bits_attr.attr, 270 #ifdef CONFIG_UEVENT_HELPER 271 &uevent_helper_attr.attr, 272 #endif 273 #ifdef CONFIG_PROFILING 274 &profiling_attr.attr, 275 #endif 276 #ifdef CONFIG_KEXEC_CORE 277 &kexec_loaded_attr.attr, 278 #ifdef CONFIG_CRASH_DUMP 279 &kexec_crash_loaded_attr.attr, 280 &kexec_crash_size_attr.attr, 281 #ifdef CONFIG_CRASH_RESERVE 282 &kexec_crash_cma_ranges_attr.attr, 283 #endif 284 #endif 285 #endif 286 #ifdef CONFIG_VMCORE_INFO 287 &vmcoreinfo_attr.attr, 288 #ifdef CONFIG_CRASH_HOTPLUG 289 &crash_elfcorehdr_size_attr.attr, 290 #endif 291 #endif 292 #ifndef CONFIG_TINY_RCU 293 &rcu_expedited_attr.attr, 294 &rcu_normal_attr.attr, 295 #endif 296 NULL 297 }; 298 299 static const struct attribute_group kernel_attr_group = { 300 .attrs = kernel_attrs, 301 }; 302 303 static int __init ksysfs_init(void) 304 { 305 int error; 306 307 kernel_kobj = kobject_create_and_add("kernel", NULL); 308 if (!kernel_kobj) { 309 error = -ENOMEM; 310 goto exit; 311 } 312 error = sysfs_create_group(kernel_kobj, &kernel_attr_group); 313 if (error) 314 goto kset_exit; 315 316 if (notes_size > 0) { 317 bin_attr_notes.private = (void *)&__start_notes; 318 bin_attr_notes.size = notes_size; 319 error = sysfs_create_bin_file(kernel_kobj, &bin_attr_notes); 320 if (error) 321 goto group_exit; 322 } 323 324 return 0; 325 326 group_exit: 327 sysfs_remove_group(kernel_kobj, &kernel_attr_group); 328 kset_exit: 329 kobject_put(kernel_kobj); 330 exit: 331 return error; 332 } 333 334 core_initcall(ksysfs_init); 335