xref: /linux/drivers/firmware/dmi-sysfs.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * dmi-sysfs.c
4  *
5  * This module exports the DMI tables read-only to userspace through the
6  * sysfs file system.
7  *
8  * Data is currently found below
9  *    /sys/firmware/dmi/...
10  *
11  * DMI attributes are presented in attribute files with names
12  * formatted using %d-%d, so that the first integer indicates the
13  * structure type (0-255), and the second field is the instance of that
14  * entry.
15  *
16  * Copyright 2011 Google, Inc.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kobject.h>
24 #include <linux/dmi.h>
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/io.h>
29 #include <asm/dmi.h>
30 
31 #define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
32 			      the top entry type is only 8 bits */
33 
34 struct dmi_sysfs_entry {
35 	struct dmi_header dh;
36 	struct kobject kobj;
37 	int instance;
38 	int position;
39 	struct list_head list;
40 	struct kobject *child;
41 };
42 
43 /*
44  * Global list of dmi_sysfs_entry.  Even though this should only be
45  * manipulated at setup and teardown, the lazy nature of the kobject
46  * system means we get lazy removes.
47  */
48 static LIST_HEAD(entry_list);
49 static DEFINE_SPINLOCK(entry_list_lock);
50 
51 /* dmi_sysfs_attribute - Top level attribute. used by all entries. */
52 struct dmi_sysfs_attribute {
53 	struct attribute attr;
54 	ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
55 };
56 
57 #define DMI_SYSFS_ATTR(_entry, _name) \
58 struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \
59 	.attr = {.name = __stringify(_name), .mode = 0400}, \
60 	.show = dmi_sysfs_##_entry##_##_name, \
61 }
62 
63 /*
64  * dmi_sysfs_mapped_attribute - Attribute where we require the entry be
65  * mapped in.  Use in conjunction with dmi_sysfs_specialize_attr_ops.
66  */
67 struct dmi_sysfs_mapped_attribute {
68 	struct attribute attr;
69 	ssize_t (*show)(struct dmi_sysfs_entry *entry,
70 			const struct dmi_header *dh,
71 			char *buf);
72 };
73 
74 #define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \
75 struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \
76 	.attr = {.name = __stringify(_name), .mode = 0400}, \
77 	.show = dmi_sysfs_##_entry##_##_name, \
78 }
79 
80 /*************************************************
81  * Generic DMI entry support.
82  *************************************************/
dmi_entry_free(struct kobject * kobj)83 static void dmi_entry_free(struct kobject *kobj)
84 {
85 	kfree(kobj);
86 }
87 
to_entry(struct kobject * kobj)88 static struct dmi_sysfs_entry *to_entry(struct kobject *kobj)
89 {
90 	return container_of(kobj, struct dmi_sysfs_entry, kobj);
91 }
92 
to_attr(struct attribute * attr)93 static struct dmi_sysfs_attribute *to_attr(struct attribute *attr)
94 {
95 	return container_of(attr, struct dmi_sysfs_attribute, attr);
96 }
97 
dmi_sysfs_attr_show(struct kobject * kobj,struct attribute * _attr,char * buf)98 static ssize_t dmi_sysfs_attr_show(struct kobject *kobj,
99 				   struct attribute *_attr, char *buf)
100 {
101 	struct dmi_sysfs_entry *entry = to_entry(kobj);
102 	struct dmi_sysfs_attribute *attr = to_attr(_attr);
103 
104 	/* DMI stuff is only ever admin visible */
105 	if (!capable(CAP_SYS_ADMIN))
106 		return -EACCES;
107 
108 	return attr->show(entry, buf);
109 }
110 
111 static const struct sysfs_ops dmi_sysfs_attr_ops = {
112 	.show = dmi_sysfs_attr_show,
113 };
114 
115 typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *,
116 				const struct dmi_header *dh, void *);
117 
118 struct find_dmi_data {
119 	struct dmi_sysfs_entry	*entry;
120 	dmi_callback		callback;
121 	void			*private;
122 	int			instance_countdown;
123 	ssize_t			ret;
124 };
125 
find_dmi_entry_helper(const struct dmi_header * dh,void * _data)126 static void find_dmi_entry_helper(const struct dmi_header *dh,
127 				  void *_data)
128 {
129 	struct find_dmi_data *data = _data;
130 	struct dmi_sysfs_entry *entry = data->entry;
131 
132 	/* Is this the entry we want? */
133 	if (dh->type != entry->dh.type)
134 		return;
135 
136 	if (data->instance_countdown != 0) {
137 		/* try the next instance? */
138 		data->instance_countdown--;
139 		return;
140 	}
141 
142 	/*
143 	 * Don't ever revisit the instance.  Short circuit later
144 	 * instances by letting the instance_countdown run negative
145 	 */
146 	data->instance_countdown--;
147 
148 	/* Found the entry */
149 	data->ret = data->callback(entry, dh, data->private);
150 }
151 
152 /* State for passing the read parameters through dmi_find_entry() */
153 struct dmi_read_state {
154 	char *buf;
155 	loff_t pos;
156 	size_t count;
157 };
158 
find_dmi_entry(struct dmi_sysfs_entry * entry,dmi_callback callback,void * private)159 static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry,
160 			      dmi_callback callback, void *private)
161 {
162 	struct find_dmi_data data = {
163 		.entry = entry,
164 		.callback = callback,
165 		.private = private,
166 		.instance_countdown = entry->instance,
167 		.ret = -EIO,  /* To signal the entry disappeared */
168 	};
169 	int ret;
170 
171 	ret = dmi_walk(find_dmi_entry_helper, &data);
172 	/* This shouldn't happen, but just in case. */
173 	if (ret)
174 		return -EINVAL;
175 	return data.ret;
176 }
177 
178 /*
179  * Calculate and return the byte length of the dmi entry identified by
180  * dh.  This includes both the formatted portion as well as the
181  * unformatted string space, including the two trailing nul characters.
182  */
dmi_entry_length(const struct dmi_header * dh)183 static size_t dmi_entry_length(const struct dmi_header *dh)
184 {
185 	const char *p = (const char *)dh;
186 
187 	p += dh->length;
188 
189 	while (p[0] || p[1])
190 		p++;
191 
192 	return 2 + p - (const char *)dh;
193 }
194 
195 /*************************************************
196  * Support bits for specialized DMI entry support
197  *************************************************/
198 struct dmi_entry_attr_show_data {
199 	struct attribute *attr;
200 	char *buf;
201 };
202 
dmi_entry_attr_show_helper(struct dmi_sysfs_entry * entry,const struct dmi_header * dh,void * _data)203 static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry,
204 					  const struct dmi_header *dh,
205 					  void *_data)
206 {
207 	struct dmi_entry_attr_show_data *data = _data;
208 	struct dmi_sysfs_mapped_attribute *attr;
209 
210 	attr = container_of(data->attr,
211 			    struct dmi_sysfs_mapped_attribute, attr);
212 	return attr->show(entry, dh, data->buf);
213 }
214 
dmi_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)215 static ssize_t dmi_entry_attr_show(struct kobject *kobj,
216 				   struct attribute *attr,
217 				   char *buf)
218 {
219 	struct dmi_entry_attr_show_data data = {
220 		.attr = attr,
221 		.buf  = buf,
222 	};
223 	/* Find the entry according to our parent and call the
224 	 * normalized show method hanging off of the attribute */
225 	return find_dmi_entry(to_entry(kobj->parent),
226 			      dmi_entry_attr_show_helper, &data);
227 }
228 
229 static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = {
230 	.show = dmi_entry_attr_show,
231 };
232 
233 /*************************************************
234  * Specialized DMI entry support.
235  *************************************************/
236 
237 /*** Type 15 - System Event Table ***/
238 
239 #define DMI_SEL_ACCESS_METHOD_IO8	0x00
240 #define DMI_SEL_ACCESS_METHOD_IO2x8	0x01
241 #define DMI_SEL_ACCESS_METHOD_IO16	0x02
242 #define DMI_SEL_ACCESS_METHOD_PHYS32	0x03
243 #define DMI_SEL_ACCESS_METHOD_GPNV	0x04
244 
245 struct dmi_system_event_log {
246 	struct dmi_header header;
247 	u16	area_length;
248 	u16	header_start_offset;
249 	u16	data_start_offset;
250 	u8	access_method;
251 	u8	status;
252 	u32	change_token;
253 	union {
254 		struct {
255 			u16 index_addr;
256 			u16 data_addr;
257 		} io;
258 		u32	phys_addr32;
259 		u16	gpnv_handle;
260 		u32	access_method_address;
261 	};
262 	u8	header_format;
263 	u8	type_descriptors_supported_count;
264 	u8	per_log_type_descriptor_length;
265 	u8	supported_log_type_descriptos[];
266 } __packed;
267 
268 #define DMI_SYSFS_SEL_FIELD(_field) \
269 static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
270 				      const struct dmi_header *dh, \
271 				      char *buf) \
272 { \
273 	struct dmi_system_event_log sel; \
274 	if (sizeof(sel) > dmi_entry_length(dh)) \
275 		return -EIO; \
276 	memcpy(&sel, dh, sizeof(sel)); \
277 	return sprintf(buf, "%u\n", sel._field); \
278 } \
279 static DMI_SYSFS_MAPPED_ATTR(sel, _field)
280 
281 DMI_SYSFS_SEL_FIELD(area_length);
282 DMI_SYSFS_SEL_FIELD(header_start_offset);
283 DMI_SYSFS_SEL_FIELD(data_start_offset);
284 DMI_SYSFS_SEL_FIELD(access_method);
285 DMI_SYSFS_SEL_FIELD(status);
286 DMI_SYSFS_SEL_FIELD(change_token);
287 DMI_SYSFS_SEL_FIELD(access_method_address);
288 DMI_SYSFS_SEL_FIELD(header_format);
289 DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count);
290 DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length);
291 
292 static struct attribute *dmi_sysfs_sel_attrs[] = {
293 	&dmi_sysfs_attr_sel_area_length.attr,
294 	&dmi_sysfs_attr_sel_header_start_offset.attr,
295 	&dmi_sysfs_attr_sel_data_start_offset.attr,
296 	&dmi_sysfs_attr_sel_access_method.attr,
297 	&dmi_sysfs_attr_sel_status.attr,
298 	&dmi_sysfs_attr_sel_change_token.attr,
299 	&dmi_sysfs_attr_sel_access_method_address.attr,
300 	&dmi_sysfs_attr_sel_header_format.attr,
301 	&dmi_sysfs_attr_sel_type_descriptors_supported_count.attr,
302 	&dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
303 	NULL,
304 };
305 ATTRIBUTE_GROUPS(dmi_sysfs_sel);
306 
307 static const struct kobj_type dmi_system_event_log_ktype = {
308 	.release = dmi_entry_free,
309 	.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
310 	.default_groups = dmi_sysfs_sel_groups,
311 };
312 
313 #ifdef CONFIG_HAS_IOPORT
314 typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
315 			    loff_t offset);
316 
317 static DEFINE_MUTEX(io_port_lock);
318 
read_sel_8bit_indexed_io(const struct dmi_system_event_log * sel,loff_t offset)319 static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel,
320 				   loff_t offset)
321 {
322 	u8 ret;
323 
324 	mutex_lock(&io_port_lock);
325 	outb((u8)offset, sel->io.index_addr);
326 	ret = inb(sel->io.data_addr);
327 	mutex_unlock(&io_port_lock);
328 	return ret;
329 }
330 
read_sel_2x8bit_indexed_io(const struct dmi_system_event_log * sel,loff_t offset)331 static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel,
332 				     loff_t offset)
333 {
334 	u8 ret;
335 
336 	mutex_lock(&io_port_lock);
337 	outb((u8)offset, sel->io.index_addr);
338 	outb((u8)(offset >> 8), sel->io.index_addr + 1);
339 	ret = inb(sel->io.data_addr);
340 	mutex_unlock(&io_port_lock);
341 	return ret;
342 }
343 
read_sel_16bit_indexed_io(const struct dmi_system_event_log * sel,loff_t offset)344 static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel,
345 				    loff_t offset)
346 {
347 	u8 ret;
348 
349 	mutex_lock(&io_port_lock);
350 	outw((u16)offset, sel->io.index_addr);
351 	ret = inb(sel->io.data_addr);
352 	mutex_unlock(&io_port_lock);
353 	return ret;
354 }
355 
356 static sel_io_reader sel_io_readers[] = {
357 	[DMI_SEL_ACCESS_METHOD_IO8]	= read_sel_8bit_indexed_io,
358 	[DMI_SEL_ACCESS_METHOD_IO2x8]	= read_sel_2x8bit_indexed_io,
359 	[DMI_SEL_ACCESS_METHOD_IO16]	= read_sel_16bit_indexed_io,
360 };
361 
dmi_sel_raw_read_io(struct dmi_sysfs_entry * entry,const struct dmi_system_event_log * sel,char * buf,loff_t pos,size_t count)362 static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
363 				   const struct dmi_system_event_log *sel,
364 				   char *buf, loff_t pos, size_t count)
365 {
366 	ssize_t wrote = 0;
367 
368 	sel_io_reader io_reader = sel_io_readers[sel->access_method];
369 
370 	while (count && pos < sel->area_length) {
371 		count--;
372 		*(buf++) = io_reader(sel, pos++);
373 		wrote++;
374 	}
375 
376 	return wrote;
377 }
378 #endif
379 
dmi_sel_raw_read_phys32(struct dmi_sysfs_entry * entry,const struct dmi_system_event_log * sel,char * buf,loff_t pos,size_t count)380 static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
381 				       const struct dmi_system_event_log *sel,
382 				       char *buf, loff_t pos, size_t count)
383 {
384 	u8 __iomem *mapped;
385 	ssize_t wrote = 0;
386 
387 	mapped = dmi_remap(sel->access_method_address, sel->area_length);
388 	if (!mapped)
389 		return -EIO;
390 
391 	while (count && pos < sel->area_length) {
392 		count--;
393 		*(buf++) = readb(mapped + pos++);
394 		wrote++;
395 	}
396 
397 	dmi_unmap(mapped);
398 	return wrote;
399 }
400 
dmi_sel_raw_read_helper(struct dmi_sysfs_entry * entry,const struct dmi_header * dh,void * _state)401 static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
402 				       const struct dmi_header *dh,
403 				       void *_state)
404 {
405 	struct dmi_read_state *state = _state;
406 	struct dmi_system_event_log sel;
407 
408 	if (sizeof(sel) > dmi_entry_length(dh))
409 		return -EIO;
410 
411 	memcpy(&sel, dh, sizeof(sel));
412 
413 	switch (sel.access_method) {
414 #ifdef CONFIG_HAS_IOPORT
415 	case DMI_SEL_ACCESS_METHOD_IO8:
416 	case DMI_SEL_ACCESS_METHOD_IO2x8:
417 	case DMI_SEL_ACCESS_METHOD_IO16:
418 		return dmi_sel_raw_read_io(entry, &sel, state->buf,
419 					   state->pos, state->count);
420 #endif
421 	case DMI_SEL_ACCESS_METHOD_PHYS32:
422 		return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
423 					       state->pos, state->count);
424 	case DMI_SEL_ACCESS_METHOD_GPNV:
425 		pr_info_ratelimited("dmi-sysfs: GPNV support missing.\n");
426 		return -EIO;
427 	default:
428 		pr_info_ratelimited("dmi-sysfs: Unknown access method %02x\n",
429 			sel.access_method);
430 		return -EIO;
431 	}
432 }
433 
dmi_sel_raw_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)434 static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
435 				struct bin_attribute *bin_attr,
436 				char *buf, loff_t pos, size_t count)
437 {
438 	struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
439 	struct dmi_read_state state = {
440 		.buf = buf,
441 		.pos = pos,
442 		.count = count,
443 	};
444 
445 	return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
446 }
447 
448 static struct bin_attribute dmi_sel_raw_attr = {
449 	.attr = {.name = "raw_event_log", .mode = 0400},
450 	.read = dmi_sel_raw_read,
451 };
452 
dmi_system_event_log(struct dmi_sysfs_entry * entry)453 static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
454 {
455 	int ret;
456 
457 	entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL);
458 	if (!entry->child)
459 		return -ENOMEM;
460 	ret = kobject_init_and_add(entry->child,
461 				   &dmi_system_event_log_ktype,
462 				   &entry->kobj,
463 				   "system_event_log");
464 	if (ret)
465 		goto out_free;
466 
467 	ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
468 	if (ret)
469 		goto out_del;
470 
471 	return 0;
472 
473 out_del:
474 	kobject_del(entry->child);
475 out_free:
476 	kfree(entry->child);
477 	return ret;
478 }
479 
480 /*************************************************
481  * Generic DMI entry support.
482  *************************************************/
483 
dmi_sysfs_entry_length(struct dmi_sysfs_entry * entry,char * buf)484 static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf)
485 {
486 	return sprintf(buf, "%d\n", entry->dh.length);
487 }
488 
dmi_sysfs_entry_handle(struct dmi_sysfs_entry * entry,char * buf)489 static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf)
490 {
491 	return sprintf(buf, "%d\n", entry->dh.handle);
492 }
493 
dmi_sysfs_entry_type(struct dmi_sysfs_entry * entry,char * buf)494 static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf)
495 {
496 	return sprintf(buf, "%d\n", entry->dh.type);
497 }
498 
dmi_sysfs_entry_instance(struct dmi_sysfs_entry * entry,char * buf)499 static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry,
500 					char *buf)
501 {
502 	return sprintf(buf, "%d\n", entry->instance);
503 }
504 
dmi_sysfs_entry_position(struct dmi_sysfs_entry * entry,char * buf)505 static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry,
506 					char *buf)
507 {
508 	return sprintf(buf, "%d\n", entry->position);
509 }
510 
511 static DMI_SYSFS_ATTR(entry, length);
512 static DMI_SYSFS_ATTR(entry, handle);
513 static DMI_SYSFS_ATTR(entry, type);
514 static DMI_SYSFS_ATTR(entry, instance);
515 static DMI_SYSFS_ATTR(entry, position);
516 
517 static struct attribute *dmi_sysfs_entry_attrs[] = {
518 	&dmi_sysfs_attr_entry_length.attr,
519 	&dmi_sysfs_attr_entry_handle.attr,
520 	&dmi_sysfs_attr_entry_type.attr,
521 	&dmi_sysfs_attr_entry_instance.attr,
522 	&dmi_sysfs_attr_entry_position.attr,
523 	NULL,
524 };
525 ATTRIBUTE_GROUPS(dmi_sysfs_entry);
526 
dmi_entry_raw_read_helper(struct dmi_sysfs_entry * entry,const struct dmi_header * dh,void * _state)527 static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
528 					 const struct dmi_header *dh,
529 					 void *_state)
530 {
531 	struct dmi_read_state *state = _state;
532 	size_t entry_length;
533 
534 	entry_length = dmi_entry_length(dh);
535 
536 	return memory_read_from_buffer(state->buf, state->count,
537 				       &state->pos, dh, entry_length);
538 }
539 
dmi_entry_raw_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)540 static ssize_t dmi_entry_raw_read(struct file *filp,
541 				  struct kobject *kobj,
542 				  struct bin_attribute *bin_attr,
543 				  char *buf, loff_t pos, size_t count)
544 {
545 	struct dmi_sysfs_entry *entry = to_entry(kobj);
546 	struct dmi_read_state state = {
547 		.buf = buf,
548 		.pos = pos,
549 		.count = count,
550 	};
551 
552 	return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
553 }
554 
555 static const struct bin_attribute dmi_entry_raw_attr = {
556 	.attr = {.name = "raw", .mode = 0400},
557 	.read = dmi_entry_raw_read,
558 };
559 
dmi_sysfs_entry_release(struct kobject * kobj)560 static void dmi_sysfs_entry_release(struct kobject *kobj)
561 {
562 	struct dmi_sysfs_entry *entry = to_entry(kobj);
563 
564 	spin_lock(&entry_list_lock);
565 	list_del(&entry->list);
566 	spin_unlock(&entry_list_lock);
567 	kfree(entry);
568 }
569 
570 static const struct kobj_type dmi_sysfs_entry_ktype = {
571 	.release = dmi_sysfs_entry_release,
572 	.sysfs_ops = &dmi_sysfs_attr_ops,
573 	.default_groups = dmi_sysfs_entry_groups,
574 };
575 
576 static struct kset *dmi_kset;
577 
578 /* Global count of all instances seen.  Only for setup */
579 static int __initdata instance_counts[MAX_ENTRY_TYPE + 1];
580 
581 /* Global positional count of all entries seen.  Only for setup */
582 static int __initdata position_count;
583 
dmi_sysfs_register_handle(const struct dmi_header * dh,void * _ret)584 static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
585 					     void *_ret)
586 {
587 	struct dmi_sysfs_entry *entry;
588 	int *ret = _ret;
589 
590 	/* If a previous entry saw an error, short circuit */
591 	if (*ret)
592 		return;
593 
594 	/* Allocate and register a new entry into the entries set */
595 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
596 	if (!entry) {
597 		*ret = -ENOMEM;
598 		return;
599 	}
600 
601 	/* Set the key */
602 	memcpy(&entry->dh, dh, sizeof(*dh));
603 	entry->instance = instance_counts[dh->type]++;
604 	entry->position = position_count++;
605 
606 	entry->kobj.kset = dmi_kset;
607 	*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
608 				    "%d-%d", dh->type, entry->instance);
609 
610 	/* Thread on the global list for cleanup */
611 	spin_lock(&entry_list_lock);
612 	list_add_tail(&entry->list, &entry_list);
613 	spin_unlock(&entry_list_lock);
614 
615 	if (*ret) {
616 		kobject_put(&entry->kobj);
617 		return;
618 	}
619 
620 	/* Handle specializations by type */
621 	switch (dh->type) {
622 	case DMI_ENTRY_SYSTEM_EVENT_LOG:
623 		*ret = dmi_system_event_log(entry);
624 		break;
625 	default:
626 		/* No specialization */
627 		break;
628 	}
629 	if (*ret)
630 		goto out_err;
631 
632 	/* Create the raw binary file to access the entry */
633 	*ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
634 	if (*ret)
635 		goto out_err;
636 
637 	return;
638 out_err:
639 	kobject_put(entry->child);
640 	kobject_put(&entry->kobj);
641 	return;
642 }
643 
cleanup_entry_list(void)644 static void cleanup_entry_list(void)
645 {
646 	struct dmi_sysfs_entry *entry, *next;
647 
648 	/* No locks, we are on our way out */
649 	list_for_each_entry_safe(entry, next, &entry_list, list) {
650 		kobject_put(entry->child);
651 		kobject_put(&entry->kobj);
652 	}
653 }
654 
dmi_sysfs_init(void)655 static int __init dmi_sysfs_init(void)
656 {
657 	int error;
658 	int val;
659 
660 	if (!dmi_kobj) {
661 		pr_debug("dmi-sysfs: dmi entry is absent.\n");
662 		error = -ENODATA;
663 		goto err;
664 	}
665 
666 	dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj);
667 	if (!dmi_kset) {
668 		error = -ENOMEM;
669 		goto err;
670 	}
671 
672 	val = 0;
673 	error = dmi_walk(dmi_sysfs_register_handle, &val);
674 	if (error)
675 		goto err;
676 	if (val) {
677 		error = val;
678 		goto err;
679 	}
680 
681 	pr_debug("dmi-sysfs: loaded.\n");
682 
683 	return 0;
684 err:
685 	cleanup_entry_list();
686 	kset_unregister(dmi_kset);
687 	return error;
688 }
689 
690 /* clean up everything. */
dmi_sysfs_exit(void)691 static void __exit dmi_sysfs_exit(void)
692 {
693 	pr_debug("dmi-sysfs: unloading.\n");
694 	cleanup_entry_list();
695 	kset_unregister(dmi_kset);
696 }
697 
698 module_init(dmi_sysfs_init);
699 module_exit(dmi_sysfs_exit);
700 
701 MODULE_AUTHOR("Mike Waychison <mikew@google.com>");
702 MODULE_DESCRIPTION("DMI sysfs support");
703 MODULE_LICENSE("GPL");
704