xref: /linux/drivers/acpi/sysfs.c (revision bf80eef2212a1e8451df13b52533f4bc31bb4f8e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sysfs.c - ACPI sysfs interface to userspace.
4  */
5 
6 #define pr_fmt(fmt) "ACPI: " fmt
7 
8 #include <linux/acpi.h>
9 #include <linux/bitmap.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/moduleparam.h>
13 
14 #include "internal.h"
15 
16 #ifdef CONFIG_ACPI_DEBUG
17 /*
18  * ACPI debug sysfs I/F, including:
19  * /sys/modules/acpi/parameters/debug_layer
20  * /sys/modules/acpi/parameters/debug_level
21  * /sys/modules/acpi/parameters/trace_method_name
22  * /sys/modules/acpi/parameters/trace_state
23  * /sys/modules/acpi/parameters/trace_debug_layer
24  * /sys/modules/acpi/parameters/trace_debug_level
25  */
26 
27 struct acpi_dlayer {
28 	const char *name;
29 	unsigned long value;
30 };
31 struct acpi_dlevel {
32 	const char *name;
33 	unsigned long value;
34 };
35 #define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
36 
37 static const struct acpi_dlayer acpi_debug_layers[] = {
38 	ACPI_DEBUG_INIT(ACPI_UTILITIES),
39 	ACPI_DEBUG_INIT(ACPI_HARDWARE),
40 	ACPI_DEBUG_INIT(ACPI_EVENTS),
41 	ACPI_DEBUG_INIT(ACPI_TABLES),
42 	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
43 	ACPI_DEBUG_INIT(ACPI_PARSER),
44 	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
45 	ACPI_DEBUG_INIT(ACPI_EXECUTER),
46 	ACPI_DEBUG_INIT(ACPI_RESOURCES),
47 	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
48 	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
49 	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
50 	ACPI_DEBUG_INIT(ACPI_COMPILER),
51 	ACPI_DEBUG_INIT(ACPI_TOOLS),
52 };
53 
54 static const struct acpi_dlevel acpi_debug_levels[] = {
55 	ACPI_DEBUG_INIT(ACPI_LV_INIT),
56 	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
57 	ACPI_DEBUG_INIT(ACPI_LV_INFO),
58 	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
59 	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
60 
61 	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
62 	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
63 	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
64 	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
65 	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
66 	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
67 	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
68 	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
69 	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
70 	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
71 	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
72 	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
73 	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
74 	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
75 
76 	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
77 	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
78 	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
79 
80 	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
81 	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
82 	ACPI_DEBUG_INIT(ACPI_LV_IO),
83 	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
84 
85 	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
86 	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
87 	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
88 	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
89 };
90 
91 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
92 {
93 	int result = 0;
94 	int i;
95 
96 	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
97 
98 	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
99 		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
100 				  acpi_debug_layers[i].name,
101 				  acpi_debug_layers[i].value,
102 				  (acpi_dbg_layer & acpi_debug_layers[i].value)
103 				  ? '*' : ' ');
104 	}
105 	result +=
106 	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
107 		    ACPI_ALL_DRIVERS,
108 		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
109 		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
110 		    == 0 ? ' ' : '-');
111 	result +=
112 	    sprintf(buffer + result,
113 		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
114 		    acpi_dbg_layer);
115 
116 	return result;
117 }
118 
119 static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
120 {
121 	int result = 0;
122 	int i;
123 
124 	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
125 
126 	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
127 		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
128 				  acpi_debug_levels[i].name,
129 				  acpi_debug_levels[i].value,
130 				  (acpi_dbg_level & acpi_debug_levels[i].value)
131 				  ? '*' : ' ');
132 	}
133 	result +=
134 	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
135 		    acpi_dbg_level);
136 
137 	return result;
138 }
139 
140 static const struct kernel_param_ops param_ops_debug_layer = {
141 	.set = param_set_uint,
142 	.get = param_get_debug_layer,
143 };
144 
145 static const struct kernel_param_ops param_ops_debug_level = {
146 	.set = param_set_uint,
147 	.get = param_get_debug_level,
148 };
149 
150 module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
151 module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
152 
153 static char trace_method_name[1024];
154 
155 static int param_set_trace_method_name(const char *val,
156 				       const struct kernel_param *kp)
157 {
158 	u32 saved_flags = 0;
159 	bool is_abs_path = true;
160 
161 	if (*val != '\\')
162 		is_abs_path = false;
163 
164 	if ((is_abs_path && strlen(val) > 1023) ||
165 	    (!is_abs_path && strlen(val) > 1022)) {
166 		pr_err("%s: string parameter too long\n", kp->name);
167 		return -ENOSPC;
168 	}
169 
170 	/*
171 	 * It's not safe to update acpi_gbl_trace_method_name without
172 	 * having the tracer stopped, so we save the original tracer
173 	 * state and disable it.
174 	 */
175 	saved_flags = acpi_gbl_trace_flags;
176 	(void)acpi_debug_trace(NULL,
177 			       acpi_gbl_trace_dbg_level,
178 			       acpi_gbl_trace_dbg_layer,
179 			       0);
180 
181 	/* This is a hack.  We can't kmalloc in early boot. */
182 	if (is_abs_path)
183 		strcpy(trace_method_name, val);
184 	else {
185 		trace_method_name[0] = '\\';
186 		strcpy(trace_method_name+1, val);
187 	}
188 
189 	/* Restore the original tracer state */
190 	(void)acpi_debug_trace(trace_method_name,
191 			       acpi_gbl_trace_dbg_level,
192 			       acpi_gbl_trace_dbg_layer,
193 			       saved_flags);
194 
195 	return 0;
196 }
197 
198 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
199 {
200 	return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
201 }
202 
203 static const struct kernel_param_ops param_ops_trace_method = {
204 	.set = param_set_trace_method_name,
205 	.get = param_get_trace_method_name,
206 };
207 
208 static const struct kernel_param_ops param_ops_trace_attrib = {
209 	.set = param_set_uint,
210 	.get = param_get_uint,
211 };
212 
213 module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
214 module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
215 module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
216 
217 static int param_set_trace_state(const char *val,
218 				 const struct kernel_param *kp)
219 {
220 	acpi_status status;
221 	const char *method = trace_method_name;
222 	u32 flags = 0;
223 
224 /* So "xxx-once" comparison should go prior than "xxx" comparison */
225 #define acpi_compare_param(val, key)	\
226 	strncmp((val), (key), sizeof(key) - 1)
227 
228 	if (!acpi_compare_param(val, "enable")) {
229 		method = NULL;
230 		flags = ACPI_TRACE_ENABLED;
231 	} else if (!acpi_compare_param(val, "disable"))
232 		method = NULL;
233 	else if (!acpi_compare_param(val, "method-once"))
234 		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
235 	else if (!acpi_compare_param(val, "method"))
236 		flags = ACPI_TRACE_ENABLED;
237 	else if (!acpi_compare_param(val, "opcode-once"))
238 		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
239 	else if (!acpi_compare_param(val, "opcode"))
240 		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
241 	else
242 		return -EINVAL;
243 
244 	status = acpi_debug_trace(method,
245 				  acpi_gbl_trace_dbg_level,
246 				  acpi_gbl_trace_dbg_layer,
247 				  flags);
248 	if (ACPI_FAILURE(status))
249 		return -EBUSY;
250 
251 	return 0;
252 }
253 
254 static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
255 {
256 	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
257 		return sprintf(buffer, "disable\n");
258 	if (!acpi_gbl_trace_method_name)
259 		return sprintf(buffer, "enable\n");
260 	if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
261 		return sprintf(buffer, "method-once\n");
262 	else
263 		return sprintf(buffer, "method\n");
264 }
265 
266 module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
267 		  NULL, 0644);
268 #endif /* CONFIG_ACPI_DEBUG */
269 
270 
271 /* /sys/modules/acpi/parameters/aml_debug_output */
272 
273 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
274 		   byte, 0644);
275 MODULE_PARM_DESC(aml_debug_output,
276 		 "To enable/disable the ACPI Debug Object output.");
277 
278 /* /sys/module/acpi/parameters/acpica_version */
279 static int param_get_acpica_version(char *buffer,
280 				    const struct kernel_param *kp)
281 {
282 	int result;
283 
284 	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
285 
286 	return result;
287 }
288 
289 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
290 
291 /*
292  * ACPI table sysfs I/F:
293  * /sys/firmware/acpi/tables/
294  * /sys/firmware/acpi/tables/data/
295  * /sys/firmware/acpi/tables/dynamic/
296  */
297 
298 static LIST_HEAD(acpi_table_attr_list);
299 static struct kobject *tables_kobj;
300 static struct kobject *tables_data_kobj;
301 static struct kobject *dynamic_tables_kobj;
302 static struct kobject *hotplug_kobj;
303 
304 #define ACPI_MAX_TABLE_INSTANCES	999
305 #define ACPI_INST_SIZE			4 /* including trailing 0 */
306 
307 struct acpi_table_attr {
308 	struct bin_attribute attr;
309 	char name[ACPI_NAMESEG_SIZE];
310 	int instance;
311 	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
312 	struct list_head node;
313 };
314 
315 struct acpi_data_attr {
316 	struct bin_attribute attr;
317 	u64	addr;
318 };
319 
320 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
321 			       struct bin_attribute *bin_attr, char *buf,
322 			       loff_t offset, size_t count)
323 {
324 	struct acpi_table_attr *table_attr =
325 	    container_of(bin_attr, struct acpi_table_attr, attr);
326 	struct acpi_table_header *table_header = NULL;
327 	acpi_status status;
328 	ssize_t rc;
329 
330 	status = acpi_get_table(table_attr->name, table_attr->instance,
331 				&table_header);
332 	if (ACPI_FAILURE(status))
333 		return -ENODEV;
334 
335 	rc = memory_read_from_buffer(buf, count, &offset, table_header,
336 			table_header->length);
337 	acpi_put_table(table_header);
338 	return rc;
339 }
340 
341 static int acpi_table_attr_init(struct kobject *tables_obj,
342 				struct acpi_table_attr *table_attr,
343 				struct acpi_table_header *table_header)
344 {
345 	struct acpi_table_header *header = NULL;
346 	struct acpi_table_attr *attr = NULL;
347 	char instance_str[ACPI_INST_SIZE];
348 
349 	sysfs_attr_init(&table_attr->attr.attr);
350 	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
351 
352 	list_for_each_entry(attr, &acpi_table_attr_list, node) {
353 		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
354 			if (table_attr->instance < attr->instance)
355 				table_attr->instance = attr->instance;
356 	}
357 	table_attr->instance++;
358 	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
359 		pr_warn("%4.4s: too many table instances\n", table_attr->name);
360 		return -ERANGE;
361 	}
362 
363 	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
364 	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
365 	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
366 					 !acpi_get_table
367 					 (table_header->signature, 2, &header))) {
368 		snprintf(instance_str, sizeof(instance_str), "%u",
369 			 table_attr->instance);
370 		strcat(table_attr->filename, instance_str);
371 	}
372 
373 	table_attr->attr.size = table_header->length;
374 	table_attr->attr.read = acpi_table_show;
375 	table_attr->attr.attr.name = table_attr->filename;
376 	table_attr->attr.attr.mode = 0400;
377 
378 	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
379 }
380 
381 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
382 {
383 	struct acpi_table_attr *table_attr;
384 
385 	switch (event) {
386 	case ACPI_TABLE_EVENT_INSTALL:
387 		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
388 		if (!table_attr)
389 			return AE_NO_MEMORY;
390 
391 		if (acpi_table_attr_init(dynamic_tables_kobj,
392 					 table_attr, table)) {
393 			kfree(table_attr);
394 			return AE_ERROR;
395 		}
396 		list_add_tail(&table_attr->node, &acpi_table_attr_list);
397 		break;
398 	case ACPI_TABLE_EVENT_LOAD:
399 	case ACPI_TABLE_EVENT_UNLOAD:
400 	case ACPI_TABLE_EVENT_UNINSTALL:
401 		/*
402 		 * we do not need to do anything right now
403 		 * because the table is not deleted from the
404 		 * global table list when unloading it.
405 		 */
406 		break;
407 	default:
408 		return AE_BAD_PARAMETER;
409 	}
410 	return AE_OK;
411 }
412 
413 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
414 			      struct bin_attribute *bin_attr, char *buf,
415 			      loff_t offset, size_t count)
416 {
417 	struct acpi_data_attr *data_attr;
418 	void __iomem *base;
419 	ssize_t size;
420 
421 	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
422 	size = data_attr->attr.size;
423 
424 	if (offset < 0)
425 		return -EINVAL;
426 
427 	if (offset >= size)
428 		return 0;
429 
430 	if (count > size - offset)
431 		count = size - offset;
432 
433 	base = acpi_os_map_iomem(data_attr->addr, size);
434 	if (!base)
435 		return -ENOMEM;
436 
437 	memcpy_fromio(buf, base + offset, count);
438 
439 	acpi_os_unmap_iomem(base, size);
440 
441 	return count;
442 }
443 
444 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
445 {
446 	struct acpi_table_bert *bert = th;
447 
448 	if (bert->header.length < sizeof(struct acpi_table_bert) ||
449 	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
450 		kfree(data_attr);
451 		return -EINVAL;
452 	}
453 	data_attr->addr = bert->address;
454 	data_attr->attr.size = bert->region_length;
455 	data_attr->attr.attr.name = "BERT";
456 
457 	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
458 }
459 
460 static struct acpi_data_obj {
461 	char *name;
462 	int (*fn)(void *, struct acpi_data_attr *);
463 } acpi_data_objs[] = {
464 	{ ACPI_SIG_BERT, acpi_bert_data_init },
465 };
466 
467 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
468 
469 static int acpi_table_data_init(struct acpi_table_header *th)
470 {
471 	struct acpi_data_attr *data_attr;
472 	int i;
473 
474 	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
475 		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
476 			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
477 			if (!data_attr)
478 				return -ENOMEM;
479 			sysfs_attr_init(&data_attr->attr.attr);
480 			data_attr->attr.read = acpi_data_show;
481 			data_attr->attr.attr.mode = 0400;
482 			return acpi_data_objs[i].fn(th, data_attr);
483 		}
484 	}
485 	return 0;
486 }
487 
488 static int acpi_tables_sysfs_init(void)
489 {
490 	struct acpi_table_attr *table_attr;
491 	struct acpi_table_header *table_header = NULL;
492 	int table_index;
493 	acpi_status status;
494 	int ret;
495 
496 	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
497 	if (!tables_kobj)
498 		goto err;
499 
500 	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
501 	if (!tables_data_kobj)
502 		goto err_tables_data;
503 
504 	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
505 	if (!dynamic_tables_kobj)
506 		goto err_dynamic_tables;
507 
508 	for (table_index = 0;; table_index++) {
509 		status = acpi_get_table_by_index(table_index, &table_header);
510 
511 		if (status == AE_BAD_PARAMETER)
512 			break;
513 
514 		if (ACPI_FAILURE(status))
515 			continue;
516 
517 		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
518 		if (!table_attr)
519 			return -ENOMEM;
520 
521 		ret = acpi_table_attr_init(tables_kobj,
522 					   table_attr, table_header);
523 		if (ret) {
524 			kfree(table_attr);
525 			return ret;
526 		}
527 		list_add_tail(&table_attr->node, &acpi_table_attr_list);
528 		acpi_table_data_init(table_header);
529 	}
530 
531 	kobject_uevent(tables_kobj, KOBJ_ADD);
532 	kobject_uevent(tables_data_kobj, KOBJ_ADD);
533 	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
534 
535 	return 0;
536 err_dynamic_tables:
537 	kobject_put(tables_data_kobj);
538 err_tables_data:
539 	kobject_put(tables_kobj);
540 err:
541 	return -ENOMEM;
542 }
543 
544 /*
545  * Detailed ACPI IRQ counters:
546  * /sys/firmware/acpi/interrupts/
547  */
548 
549 u32 acpi_irq_handled;
550 u32 acpi_irq_not_handled;
551 
552 #define COUNT_GPE 0
553 #define COUNT_SCI 1		/* acpi_irq_handled */
554 #define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
555 #define COUNT_ERROR 3		/* other */
556 #define NUM_COUNTERS_EXTRA 4
557 
558 struct event_counter {
559 	u32 count;
560 	u32 flags;
561 };
562 
563 static struct event_counter *all_counters;
564 static u32 num_gpes;
565 static u32 num_counters;
566 static struct attribute **all_attrs;
567 static u32 acpi_gpe_count;
568 
569 static struct attribute_group interrupt_stats_attr_group = {
570 	.name = "interrupts",
571 };
572 
573 static struct kobj_attribute *counter_attrs;
574 
575 static void delete_gpe_attr_array(void)
576 {
577 	struct event_counter *tmp = all_counters;
578 
579 	all_counters = NULL;
580 	kfree(tmp);
581 
582 	if (counter_attrs) {
583 		int i;
584 
585 		for (i = 0; i < num_gpes; i++)
586 			kfree(counter_attrs[i].attr.name);
587 
588 		kfree(counter_attrs);
589 	}
590 	kfree(all_attrs);
591 }
592 
593 static void gpe_count(u32 gpe_number)
594 {
595 	acpi_gpe_count++;
596 
597 	if (!all_counters)
598 		return;
599 
600 	if (gpe_number < num_gpes)
601 		all_counters[gpe_number].count++;
602 	else
603 		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
604 			     COUNT_ERROR].count++;
605 }
606 
607 static void fixed_event_count(u32 event_number)
608 {
609 	if (!all_counters)
610 		return;
611 
612 	if (event_number < ACPI_NUM_FIXED_EVENTS)
613 		all_counters[num_gpes + event_number].count++;
614 	else
615 		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
616 			     COUNT_ERROR].count++;
617 }
618 
619 static void acpi_global_event_handler(u32 event_type, acpi_handle device,
620 	u32 event_number, void *context)
621 {
622 	if (event_type == ACPI_EVENT_TYPE_GPE) {
623 		gpe_count(event_number);
624 		pr_debug("GPE event 0x%02x\n", event_number);
625 	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
626 		fixed_event_count(event_number);
627 		pr_debug("Fixed event 0x%02x\n", event_number);
628 	} else {
629 		pr_debug("Other event 0x%02x\n", event_number);
630 	}
631 }
632 
633 static int get_status(u32 index, acpi_event_status *ret,
634 		      acpi_handle *handle)
635 {
636 	acpi_status status;
637 
638 	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
639 		return -EINVAL;
640 
641 	if (index < num_gpes) {
642 		status = acpi_get_gpe_device(index, handle);
643 		if (ACPI_FAILURE(status)) {
644 			pr_warn("Invalid GPE 0x%x", index);
645 			return -ENXIO;
646 		}
647 		status = acpi_get_gpe_status(*handle, index, ret);
648 	} else {
649 		status = acpi_get_event_status(index - num_gpes, ret);
650 	}
651 	if (ACPI_FAILURE(status))
652 		return -EIO;
653 
654 	return 0;
655 }
656 
657 static ssize_t counter_show(struct kobject *kobj,
658 			    struct kobj_attribute *attr, char *buf)
659 {
660 	int index = attr - counter_attrs;
661 	int size;
662 	acpi_handle handle;
663 	acpi_event_status status;
664 	int result = 0;
665 
666 	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
667 	    acpi_irq_handled;
668 	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
669 	    acpi_irq_not_handled;
670 	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
671 	    acpi_gpe_count;
672 	size = sprintf(buf, "%8u", all_counters[index].count);
673 
674 	/* "gpe_all" or "sci" */
675 	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
676 		goto end;
677 
678 	result = get_status(index, &status, &handle);
679 	if (result)
680 		goto end;
681 
682 	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
683 		size += sprintf(buf + size, "  EN");
684 	else
685 		size += sprintf(buf + size, "    ");
686 	if (status & ACPI_EVENT_FLAG_STATUS_SET)
687 		size += sprintf(buf + size, " STS");
688 	else
689 		size += sprintf(buf + size, "    ");
690 
691 	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
692 		size += sprintf(buf + size, " invalid     ");
693 	else if (status & ACPI_EVENT_FLAG_ENABLED)
694 		size += sprintf(buf + size, " enabled     ");
695 	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
696 		size += sprintf(buf + size, " wake_enabled");
697 	else
698 		size += sprintf(buf + size, " disabled    ");
699 	if (status & ACPI_EVENT_FLAG_MASKED)
700 		size += sprintf(buf + size, " masked  ");
701 	else
702 		size += sprintf(buf + size, " unmasked");
703 
704 end:
705 	size += sprintf(buf + size, "\n");
706 	return result ? result : size;
707 }
708 
709 /*
710  * counter_set() sets the specified counter.
711  * setting the total "sci" file to any value clears all counters.
712  * enable/disable/clear a gpe/fixed event in user space.
713  */
714 static ssize_t counter_set(struct kobject *kobj,
715 			   struct kobj_attribute *attr, const char *buf,
716 			   size_t size)
717 {
718 	int index = attr - counter_attrs;
719 	acpi_event_status status;
720 	acpi_handle handle;
721 	int result = 0;
722 	unsigned long tmp;
723 
724 	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
725 		int i;
726 		for (i = 0; i < num_counters; ++i)
727 			all_counters[i].count = 0;
728 		acpi_gpe_count = 0;
729 		acpi_irq_handled = 0;
730 		acpi_irq_not_handled = 0;
731 		goto end;
732 	}
733 
734 	/* show the event status for both GPEs and Fixed Events */
735 	result = get_status(index, &status, &handle);
736 	if (result)
737 		goto end;
738 
739 	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
740 		pr_warn("Can not change Invalid GPE/Fixed Event status\n");
741 		return -EINVAL;
742 	}
743 
744 	if (index < num_gpes) {
745 		if (!strcmp(buf, "disable\n") &&
746 		    (status & ACPI_EVENT_FLAG_ENABLED))
747 			result = acpi_disable_gpe(handle, index);
748 		else if (!strcmp(buf, "enable\n") &&
749 			 !(status & ACPI_EVENT_FLAG_ENABLED))
750 			result = acpi_enable_gpe(handle, index);
751 		else if (!strcmp(buf, "clear\n") &&
752 			 (status & ACPI_EVENT_FLAG_STATUS_SET))
753 			result = acpi_clear_gpe(handle, index);
754 		else if (!strcmp(buf, "mask\n"))
755 			result = acpi_mask_gpe(handle, index, TRUE);
756 		else if (!strcmp(buf, "unmask\n"))
757 			result = acpi_mask_gpe(handle, index, FALSE);
758 		else if (!kstrtoul(buf, 0, &tmp))
759 			all_counters[index].count = tmp;
760 		else
761 			result = -EINVAL;
762 	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
763 		int event = index - num_gpes;
764 		if (!strcmp(buf, "disable\n") &&
765 		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
766 			result = acpi_disable_event(event, ACPI_NOT_ISR);
767 		else if (!strcmp(buf, "enable\n") &&
768 			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
769 			result = acpi_enable_event(event, ACPI_NOT_ISR);
770 		else if (!strcmp(buf, "clear\n") &&
771 			 (status & ACPI_EVENT_FLAG_STATUS_SET))
772 			result = acpi_clear_event(event);
773 		else if (!kstrtoul(buf, 0, &tmp))
774 			all_counters[index].count = tmp;
775 		else
776 			result = -EINVAL;
777 	} else
778 		all_counters[index].count = strtoul(buf, NULL, 0);
779 
780 	if (ACPI_FAILURE(result))
781 		result = -EINVAL;
782 end:
783 	return result ? result : size;
784 }
785 
786 /*
787  * A Quirk Mechanism for GPE Flooding Prevention:
788  *
789  * Quirks may be needed to prevent GPE flooding on a specific GPE. The
790  * flooding typically cannot be detected and automatically prevented by
791  * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
792  * the AML tables. This normally indicates a feature gap in Linux, thus
793  * instead of providing endless quirk tables, we provide a boot parameter
794  * for those who want this quirk. For example, if the users want to prevent
795  * the GPE flooding for GPE 00, they need to specify the following boot
796  * parameter:
797  *   acpi_mask_gpe=0x00
798  * Note, the parameter can be a list (see bitmap_parselist() for the details).
799  * The masking status can be modified by the following runtime controlling
800  * interface:
801  *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
802  */
803 #define ACPI_MASKABLE_GPE_MAX	0x100
804 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
805 
806 static int __init acpi_gpe_set_masked_gpes(char *val)
807 {
808 	int ret;
809 	u8 gpe;
810 
811 	ret = kstrtou8(val, 0, &gpe);
812 	if (ret) {
813 		ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX);
814 		if (ret)
815 			return ret;
816 	} else
817 		set_bit(gpe, acpi_masked_gpes_map);
818 
819 	return 1;
820 }
821 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
822 
823 void __init acpi_gpe_apply_masked_gpes(void)
824 {
825 	acpi_handle handle;
826 	acpi_status status;
827 	u16 gpe;
828 
829 	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
830 		status = acpi_get_gpe_device(gpe, &handle);
831 		if (ACPI_SUCCESS(status)) {
832 			pr_info("Masking GPE 0x%x.\n", gpe);
833 			(void)acpi_mask_gpe(handle, gpe, TRUE);
834 		}
835 	}
836 }
837 
838 void acpi_irq_stats_init(void)
839 {
840 	acpi_status status;
841 	int i;
842 
843 	if (all_counters)
844 		return;
845 
846 	num_gpes = acpi_current_gpe_count;
847 	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
848 
849 	all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL);
850 	if (all_attrs == NULL)
851 		return;
852 
853 	all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL);
854 	if (all_counters == NULL)
855 		goto fail;
856 
857 	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
858 	if (ACPI_FAILURE(status))
859 		goto fail;
860 
861 	counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL);
862 	if (counter_attrs == NULL)
863 		goto fail;
864 
865 	for (i = 0; i < num_counters; ++i) {
866 		char buffer[12];
867 		char *name;
868 
869 		if (i < num_gpes)
870 			sprintf(buffer, "gpe%02X", i);
871 		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
872 			sprintf(buffer, "ff_pmtimer");
873 		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
874 			sprintf(buffer, "ff_gbl_lock");
875 		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
876 			sprintf(buffer, "ff_pwr_btn");
877 		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
878 			sprintf(buffer, "ff_slp_btn");
879 		else if (i == num_gpes + ACPI_EVENT_RTC)
880 			sprintf(buffer, "ff_rt_clk");
881 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
882 			sprintf(buffer, "gpe_all");
883 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
884 			sprintf(buffer, "sci");
885 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
886 			sprintf(buffer, "sci_not");
887 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
888 			sprintf(buffer, "error");
889 		else
890 			sprintf(buffer, "bug%02X", i);
891 
892 		name = kstrdup(buffer, GFP_KERNEL);
893 		if (name == NULL)
894 			goto fail;
895 
896 		sysfs_attr_init(&counter_attrs[i].attr);
897 		counter_attrs[i].attr.name = name;
898 		counter_attrs[i].attr.mode = 0644;
899 		counter_attrs[i].show = counter_show;
900 		counter_attrs[i].store = counter_set;
901 
902 		all_attrs[i] = &counter_attrs[i].attr;
903 	}
904 
905 	interrupt_stats_attr_group.attrs = all_attrs;
906 	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
907 		return;
908 
909 fail:
910 	delete_gpe_attr_array();
911 }
912 
913 static void __exit interrupt_stats_exit(void)
914 {
915 	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
916 
917 	delete_gpe_attr_array();
918 }
919 
920 static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
921 {
922 	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
923 }
924 
925 static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile);
926 
927 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
928 {
929 	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
930 
931 	return sprintf(buf, "%d\n", hotplug->enabled);
932 }
933 
934 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
935 			     const char *buf, size_t size)
936 {
937 	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
938 	unsigned int val;
939 
940 	if (kstrtouint(buf, 10, &val) || val > 1)
941 		return -EINVAL;
942 
943 	acpi_scan_hotplug_enabled(hotplug, val);
944 	return size;
945 }
946 
947 static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled);
948 
949 static struct attribute *hotplug_profile_attrs[] = {
950 	&hotplug_enabled_attr.attr,
951 	NULL
952 };
953 ATTRIBUTE_GROUPS(hotplug_profile);
954 
955 static struct kobj_type acpi_hotplug_profile_ktype = {
956 	.sysfs_ops = &kobj_sysfs_ops,
957 	.default_groups = hotplug_profile_groups,
958 };
959 
960 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
961 				    const char *name)
962 {
963 	int error;
964 
965 	if (!hotplug_kobj)
966 		goto err_out;
967 
968 	error = kobject_init_and_add(&hotplug->kobj,
969 		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
970 	if (error) {
971 		kobject_put(&hotplug->kobj);
972 		goto err_out;
973 	}
974 
975 	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
976 	return;
977 
978  err_out:
979 	pr_err("Unable to add hotplug profile '%s'\n", name);
980 }
981 
982 static ssize_t force_remove_show(struct kobject *kobj,
983 				 struct kobj_attribute *attr, char *buf)
984 {
985 	return sprintf(buf, "%d\n", 0);
986 }
987 
988 static ssize_t force_remove_store(struct kobject *kobj,
989 				  struct kobj_attribute *attr,
990 				  const char *buf, size_t size)
991 {
992 	bool val;
993 	int ret;
994 
995 	ret = strtobool(buf, &val);
996 	if (ret < 0)
997 		return ret;
998 
999 	if (val) {
1000 		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1001 		return -EINVAL;
1002 	}
1003 	return size;
1004 }
1005 
1006 static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove);
1007 
1008 int __init acpi_sysfs_init(void)
1009 {
1010 	int result;
1011 
1012 	result = acpi_tables_sysfs_init();
1013 	if (result)
1014 		return result;
1015 
1016 	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1017 	if (!hotplug_kobj)
1018 		return -ENOMEM;
1019 
1020 	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1021 	if (result)
1022 		return result;
1023 
1024 	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1025 	return result;
1026 }
1027