1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9 #include <asm/byteorder.h>
10 #include <linux/kobject.h>
11 #include <linux/string.h>
12 #include <linux/sysfs.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/vmcore_info.h>
16 #include <linux/profile.h>
17 #include <linux/stat.h>
18 #include <linux/sched.h>
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21
22 #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24 #if defined(__LITTLE_ENDIAN)
25 #define CPU_BYTEORDER_STRING "little"
26 #elif defined(__BIG_ENDIAN)
27 #define CPU_BYTEORDER_STRING "big"
28 #else
29 #error Unknown byteorder
30 #endif
31
32 #define KERNEL_ATTR_RO(_name) \
33 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35 #define KERNEL_ATTR_RW(_name) \
36 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38 /* current uevent sequence number */
uevent_seqnum_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)39 static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41 {
42 return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&uevent_seqnum));
43 }
44 KERNEL_ATTR_RO(uevent_seqnum);
45
46 /* cpu byteorder */
cpu_byteorder_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)47 static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49 {
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51 }
52 KERNEL_ATTR_RO(cpu_byteorder);
53
54 /* address bits */
address_bits_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)55 static ssize_t address_bits_show(struct kobject *kobj,
56 struct kobj_attribute *attr, char *buf)
57 {
58 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */);
59 }
60 KERNEL_ATTR_RO(address_bits);
61
62 #ifdef CONFIG_UEVENT_HELPER
63 /* uevent helper program, used during early boot */
uevent_helper_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)64 static ssize_t uevent_helper_show(struct kobject *kobj,
65 struct kobj_attribute *attr, char *buf)
66 {
67 return sysfs_emit(buf, "%s\n", uevent_helper);
68 }
uevent_helper_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)69 static ssize_t uevent_helper_store(struct kobject *kobj,
70 struct kobj_attribute *attr,
71 const char *buf, size_t count)
72 {
73 if (count+1 > UEVENT_HELPER_PATH_LEN)
74 return -ENOENT;
75 memcpy(uevent_helper, buf, count);
76 uevent_helper[count] = '\0';
77 if (count && uevent_helper[count-1] == '\n')
78 uevent_helper[count-1] = '\0';
79 return count;
80 }
81 KERNEL_ATTR_RW(uevent_helper);
82 #endif
83
84 #ifdef CONFIG_PROFILING
profiling_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)85 static ssize_t profiling_show(struct kobject *kobj,
86 struct kobj_attribute *attr, char *buf)
87 {
88 return sysfs_emit(buf, "%d\n", prof_on);
89 }
profiling_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)90 static ssize_t profiling_store(struct kobject *kobj,
91 struct kobj_attribute *attr,
92 const char *buf, size_t count)
93 {
94 int ret;
95 static DEFINE_MUTEX(lock);
96
97 /*
98 * We need serialization, for profile_setup() initializes prof_on
99 * value and profile_init() must not reallocate prof_buffer after
100 * once allocated.
101 */
102 guard(mutex)(&lock);
103 if (prof_on)
104 return -EEXIST;
105 /*
106 * This eventually calls into get_option() which
107 * has a ton of callers and is not const. It is
108 * easiest to cast it away here.
109 */
110 profile_setup((char *)buf);
111 ret = profile_init();
112 if (ret)
113 return ret;
114 ret = create_proc_profile();
115 if (ret)
116 return ret;
117 return count;
118 }
119 KERNEL_ATTR_RW(profiling);
120 #endif
121
122 #ifdef CONFIG_VMCORE_INFO
123
vmcoreinfo_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)124 static ssize_t vmcoreinfo_show(struct kobject *kobj,
125 struct kobj_attribute *attr, char *buf)
126 {
127 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
128 return sysfs_emit(buf, "%pa %x\n", &vmcore_base,
129 (unsigned int)VMCOREINFO_NOTE_SIZE);
130 }
131 KERNEL_ATTR_RO(vmcoreinfo);
132
133 #endif /* CONFIG_VMCORE_INFO */
134
135 /* whether file capabilities are enabled */
fscaps_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)136 static ssize_t fscaps_show(struct kobject *kobj,
137 struct kobj_attribute *attr, char *buf)
138 {
139 return sysfs_emit(buf, "%d\n", file_caps_enabled);
140 }
141 KERNEL_ATTR_RO(fscaps);
142
143 #ifndef CONFIG_TINY_RCU
144 int rcu_expedited;
rcu_expedited_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)145 static ssize_t rcu_expedited_show(struct kobject *kobj,
146 struct kobj_attribute *attr, char *buf)
147 {
148 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited));
149 }
rcu_expedited_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)150 static ssize_t rcu_expedited_store(struct kobject *kobj,
151 struct kobj_attribute *attr,
152 const char *buf, size_t count)
153 {
154 if (kstrtoint(buf, 0, &rcu_expedited))
155 return -EINVAL;
156
157 return count;
158 }
159 KERNEL_ATTR_RW(rcu_expedited);
160
161 int rcu_normal;
rcu_normal_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)162 static ssize_t rcu_normal_show(struct kobject *kobj,
163 struct kobj_attribute *attr, char *buf)
164 {
165 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal));
166 }
rcu_normal_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)167 static ssize_t rcu_normal_store(struct kobject *kobj,
168 struct kobj_attribute *attr,
169 const char *buf, size_t count)
170 {
171 if (kstrtoint(buf, 0, &rcu_normal))
172 return -EINVAL;
173
174 return count;
175 }
176 KERNEL_ATTR_RW(rcu_normal);
177 #endif /* #ifndef CONFIG_TINY_RCU */
178
179 /*
180 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
181 */
182 extern const void __start_notes;
183 extern const void __stop_notes;
184 #define notes_size (&__stop_notes - &__start_notes)
185
186 static __ro_after_init BIN_ATTR_SIMPLE_RO(notes);
187
188 struct kobject *kernel_kobj;
189 EXPORT_SYMBOL_GPL(kernel_kobj);
190
191 static struct attribute * kernel_attrs[] = {
192 &fscaps_attr.attr,
193 &uevent_seqnum_attr.attr,
194 &cpu_byteorder_attr.attr,
195 &address_bits_attr.attr,
196 #ifdef CONFIG_UEVENT_HELPER
197 &uevent_helper_attr.attr,
198 #endif
199 #ifdef CONFIG_PROFILING
200 &profiling_attr.attr,
201 #endif
202 #ifdef CONFIG_VMCORE_INFO
203 &vmcoreinfo_attr.attr,
204 #endif
205 #ifndef CONFIG_TINY_RCU
206 &rcu_expedited_attr.attr,
207 &rcu_normal_attr.attr,
208 #endif
209 NULL
210 };
211
212 static const struct attribute_group kernel_attr_group = {
213 .attrs = kernel_attrs,
214 };
215
ksysfs_init(void)216 static int __init ksysfs_init(void)
217 {
218 int error;
219
220 kernel_kobj = kobject_create_and_add("kernel", NULL);
221 if (!kernel_kobj) {
222 error = -ENOMEM;
223 goto exit;
224 }
225 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
226 if (error)
227 goto kset_exit;
228
229 if (notes_size > 0) {
230 bin_attr_notes.private = (void *)&__start_notes;
231 bin_attr_notes.size = notes_size;
232 error = sysfs_create_bin_file(kernel_kobj, &bin_attr_notes);
233 if (error)
234 goto group_exit;
235 }
236
237 return 0;
238
239 group_exit:
240 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
241 kset_exit:
242 kobject_put(kernel_kobj);
243 exit:
244 return error;
245 }
246
247 core_initcall(ksysfs_init);
248