1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sysfs.c - ACPI sysfs interface to userspace. 4 */ 5 6 #define pr_fmt(fmt) "ACPI: " fmt 7 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/moduleparam.h> 11 #include <linux/acpi.h> 12 13 #include "internal.h" 14 15 #define _COMPONENT ACPI_SYSTEM_COMPONENT 16 ACPI_MODULE_NAME("sysfs"); 17 18 #ifdef CONFIG_ACPI_DEBUG 19 /* 20 * ACPI debug sysfs I/F, including: 21 * /sys/modules/acpi/parameters/debug_layer 22 * /sys/modules/acpi/parameters/debug_level 23 * /sys/modules/acpi/parameters/trace_method_name 24 * /sys/modules/acpi/parameters/trace_state 25 * /sys/modules/acpi/parameters/trace_debug_layer 26 * /sys/modules/acpi/parameters/trace_debug_level 27 */ 28 29 struct acpi_dlayer { 30 const char *name; 31 unsigned long value; 32 }; 33 struct acpi_dlevel { 34 const char *name; 35 unsigned long value; 36 }; 37 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v } 38 39 static const struct acpi_dlayer acpi_debug_layers[] = { 40 ACPI_DEBUG_INIT(ACPI_UTILITIES), 41 ACPI_DEBUG_INIT(ACPI_HARDWARE), 42 ACPI_DEBUG_INIT(ACPI_EVENTS), 43 ACPI_DEBUG_INIT(ACPI_TABLES), 44 ACPI_DEBUG_INIT(ACPI_NAMESPACE), 45 ACPI_DEBUG_INIT(ACPI_PARSER), 46 ACPI_DEBUG_INIT(ACPI_DISPATCHER), 47 ACPI_DEBUG_INIT(ACPI_EXECUTER), 48 ACPI_DEBUG_INIT(ACPI_RESOURCES), 49 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER), 50 ACPI_DEBUG_INIT(ACPI_OS_SERVICES), 51 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), 52 ACPI_DEBUG_INIT(ACPI_COMPILER), 53 ACPI_DEBUG_INIT(ACPI_TOOLS), 54 55 ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT), 56 ACPI_DEBUG_INIT(ACPI_AC_COMPONENT), 57 ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT), 58 ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT), 59 ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT), 60 ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT), 61 ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT), 62 ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT), 63 ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT), 64 ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT), 65 ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT), 66 ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT), 67 ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT), 68 ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT), 69 }; 70 71 static const struct acpi_dlevel acpi_debug_levels[] = { 72 ACPI_DEBUG_INIT(ACPI_LV_INIT), 73 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), 74 ACPI_DEBUG_INIT(ACPI_LV_INFO), 75 ACPI_DEBUG_INIT(ACPI_LV_REPAIR), 76 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT), 77 78 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), 79 ACPI_DEBUG_INIT(ACPI_LV_PARSE), 80 ACPI_DEBUG_INIT(ACPI_LV_LOAD), 81 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH), 82 ACPI_DEBUG_INIT(ACPI_LV_EXEC), 83 ACPI_DEBUG_INIT(ACPI_LV_NAMES), 84 ACPI_DEBUG_INIT(ACPI_LV_OPREGION), 85 ACPI_DEBUG_INIT(ACPI_LV_BFIELD), 86 ACPI_DEBUG_INIT(ACPI_LV_TABLES), 87 ACPI_DEBUG_INIT(ACPI_LV_VALUES), 88 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS), 89 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES), 90 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS), 91 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE), 92 93 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS), 94 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS), 95 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS), 96 97 ACPI_DEBUG_INIT(ACPI_LV_MUTEX), 98 ACPI_DEBUG_INIT(ACPI_LV_THREADS), 99 ACPI_DEBUG_INIT(ACPI_LV_IO), 100 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS), 101 102 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE), 103 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO), 104 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES), 105 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 106 }; 107 108 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) 109 { 110 int result = 0; 111 int i; 112 113 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 114 115 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) { 116 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 117 acpi_debug_layers[i].name, 118 acpi_debug_layers[i].value, 119 (acpi_dbg_layer & acpi_debug_layers[i].value) 120 ? '*' : ' '); 121 } 122 result += 123 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", 124 ACPI_ALL_DRIVERS, 125 (acpi_dbg_layer & ACPI_ALL_DRIVERS) == 126 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS) 127 == 0 ? ' ' : '-'); 128 result += 129 sprintf(buffer + result, 130 "--\ndebug_layer = 0x%08X ( * = enabled)\n", 131 acpi_dbg_layer); 132 133 return result; 134 } 135 136 static int param_get_debug_level(char *buffer, const struct kernel_param *kp) 137 { 138 int result = 0; 139 int i; 140 141 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 142 143 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { 144 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 145 acpi_debug_levels[i].name, 146 acpi_debug_levels[i].value, 147 (acpi_dbg_level & acpi_debug_levels[i].value) 148 ? '*' : ' '); 149 } 150 result += 151 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n", 152 acpi_dbg_level); 153 154 return result; 155 } 156 157 static const struct kernel_param_ops param_ops_debug_layer = { 158 .set = param_set_uint, 159 .get = param_get_debug_layer, 160 }; 161 162 static const struct kernel_param_ops param_ops_debug_level = { 163 .set = param_set_uint, 164 .get = param_get_debug_level, 165 }; 166 167 module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); 168 module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); 169 170 static char trace_method_name[1024]; 171 172 int param_set_trace_method_name(const char *val, const struct kernel_param *kp) 173 { 174 u32 saved_flags = 0; 175 bool is_abs_path = true; 176 177 if (*val != '\\') 178 is_abs_path = false; 179 180 if ((is_abs_path && strlen(val) > 1023) || 181 (!is_abs_path && strlen(val) > 1022)) { 182 pr_err("%s: string parameter too long\n", kp->name); 183 return -ENOSPC; 184 } 185 186 /* 187 * It's not safe to update acpi_gbl_trace_method_name without 188 * having the tracer stopped, so we save the original tracer 189 * state and disable it. 190 */ 191 saved_flags = acpi_gbl_trace_flags; 192 (void)acpi_debug_trace(NULL, 193 acpi_gbl_trace_dbg_level, 194 acpi_gbl_trace_dbg_layer, 195 0); 196 197 /* This is a hack. We can't kmalloc in early boot. */ 198 if (is_abs_path) 199 strcpy(trace_method_name, val); 200 else { 201 trace_method_name[0] = '\\'; 202 strcpy(trace_method_name+1, val); 203 } 204 205 /* Restore the original tracer state */ 206 (void)acpi_debug_trace(trace_method_name, 207 acpi_gbl_trace_dbg_level, 208 acpi_gbl_trace_dbg_layer, 209 saved_flags); 210 211 return 0; 212 } 213 214 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp) 215 { 216 return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name); 217 } 218 219 static const struct kernel_param_ops param_ops_trace_method = { 220 .set = param_set_trace_method_name, 221 .get = param_get_trace_method_name, 222 }; 223 224 static const struct kernel_param_ops param_ops_trace_attrib = { 225 .set = param_set_uint, 226 .get = param_get_uint, 227 }; 228 229 module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644); 230 module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); 231 module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); 232 233 static int param_set_trace_state(const char *val, struct kernel_param *kp) 234 { 235 acpi_status status; 236 const char *method = trace_method_name; 237 u32 flags = 0; 238 239 /* So "xxx-once" comparison should go prior than "xxx" comparison */ 240 #define acpi_compare_param(val, key) \ 241 strncmp((val), (key), sizeof(key) - 1) 242 243 if (!acpi_compare_param(val, "enable")) { 244 method = NULL; 245 flags = ACPI_TRACE_ENABLED; 246 } else if (!acpi_compare_param(val, "disable")) 247 method = NULL; 248 else if (!acpi_compare_param(val, "method-once")) 249 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT; 250 else if (!acpi_compare_param(val, "method")) 251 flags = ACPI_TRACE_ENABLED; 252 else if (!acpi_compare_param(val, "opcode-once")) 253 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE; 254 else if (!acpi_compare_param(val, "opcode")) 255 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE; 256 else 257 return -EINVAL; 258 259 status = acpi_debug_trace(method, 260 acpi_gbl_trace_dbg_level, 261 acpi_gbl_trace_dbg_layer, 262 flags); 263 if (ACPI_FAILURE(status)) 264 return -EBUSY; 265 266 return 0; 267 } 268 269 static int param_get_trace_state(char *buffer, struct kernel_param *kp) 270 { 271 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) 272 return sprintf(buffer, "disable"); 273 else { 274 if (acpi_gbl_trace_method_name) { 275 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) 276 return sprintf(buffer, "method-once"); 277 else 278 return sprintf(buffer, "method"); 279 } else 280 return sprintf(buffer, "enable"); 281 } 282 return 0; 283 } 284 285 module_param_call(trace_state, param_set_trace_state, param_get_trace_state, 286 NULL, 0644); 287 #endif /* CONFIG_ACPI_DEBUG */ 288 289 290 /* /sys/modules/acpi/parameters/aml_debug_output */ 291 292 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object, 293 byte, 0644); 294 MODULE_PARM_DESC(aml_debug_output, 295 "To enable/disable the ACPI Debug Object output."); 296 297 /* /sys/module/acpi/parameters/acpica_version */ 298 static int param_get_acpica_version(char *buffer, struct kernel_param *kp) 299 { 300 int result; 301 302 result = sprintf(buffer, "%x", ACPI_CA_VERSION); 303 304 return result; 305 } 306 307 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444); 308 309 /* 310 * ACPI table sysfs I/F: 311 * /sys/firmware/acpi/tables/ 312 * /sys/firmware/acpi/tables/data/ 313 * /sys/firmware/acpi/tables/dynamic/ 314 */ 315 316 static LIST_HEAD(acpi_table_attr_list); 317 static struct kobject *tables_kobj; 318 static struct kobject *tables_data_kobj; 319 static struct kobject *dynamic_tables_kobj; 320 static struct kobject *hotplug_kobj; 321 322 #define ACPI_MAX_TABLE_INSTANCES 999 323 #define ACPI_INST_SIZE 4 /* including trailing 0 */ 324 325 struct acpi_table_attr { 326 struct bin_attribute attr; 327 char name[ACPI_NAME_SIZE]; 328 int instance; 329 char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE]; 330 struct list_head node; 331 }; 332 333 struct acpi_data_attr { 334 struct bin_attribute attr; 335 u64 addr; 336 }; 337 338 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, 339 struct bin_attribute *bin_attr, char *buf, 340 loff_t offset, size_t count) 341 { 342 struct acpi_table_attr *table_attr = 343 container_of(bin_attr, struct acpi_table_attr, attr); 344 struct acpi_table_header *table_header = NULL; 345 acpi_status status; 346 ssize_t rc; 347 348 status = acpi_get_table(table_attr->name, table_attr->instance, 349 &table_header); 350 if (ACPI_FAILURE(status)) 351 return -ENODEV; 352 353 rc = memory_read_from_buffer(buf, count, &offset, table_header, 354 table_header->length); 355 acpi_put_table(table_header); 356 return rc; 357 } 358 359 static int acpi_table_attr_init(struct kobject *tables_obj, 360 struct acpi_table_attr *table_attr, 361 struct acpi_table_header *table_header) 362 { 363 struct acpi_table_header *header = NULL; 364 struct acpi_table_attr *attr = NULL; 365 char instance_str[ACPI_INST_SIZE]; 366 367 sysfs_attr_init(&table_attr->attr.attr); 368 ACPI_MOVE_NAME(table_attr->name, table_header->signature); 369 370 list_for_each_entry(attr, &acpi_table_attr_list, node) { 371 if (ACPI_COMPARE_NAME(table_attr->name, attr->name)) 372 if (table_attr->instance < attr->instance) 373 table_attr->instance = attr->instance; 374 } 375 table_attr->instance++; 376 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) { 377 pr_warn("%4.4s: too many table instances\n", 378 table_attr->name); 379 return -ERANGE; 380 } 381 382 ACPI_MOVE_NAME(table_attr->filename, table_header->signature); 383 table_attr->filename[ACPI_NAME_SIZE] = '\0'; 384 if (table_attr->instance > 1 || (table_attr->instance == 1 && 385 !acpi_get_table 386 (table_header->signature, 2, &header))) { 387 snprintf(instance_str, sizeof(instance_str), "%u", 388 table_attr->instance); 389 strcat(table_attr->filename, instance_str); 390 } 391 392 table_attr->attr.size = table_header->length; 393 table_attr->attr.read = acpi_table_show; 394 table_attr->attr.attr.name = table_attr->filename; 395 table_attr->attr.attr.mode = 0400; 396 397 return sysfs_create_bin_file(tables_obj, &table_attr->attr); 398 } 399 400 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) 401 { 402 struct acpi_table_attr *table_attr; 403 404 switch (event) { 405 case ACPI_TABLE_EVENT_INSTALL: 406 table_attr = 407 kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); 408 if (!table_attr) 409 return AE_NO_MEMORY; 410 411 if (acpi_table_attr_init(dynamic_tables_kobj, 412 table_attr, table)) { 413 kfree(table_attr); 414 return AE_ERROR; 415 } 416 list_add_tail(&table_attr->node, &acpi_table_attr_list); 417 break; 418 case ACPI_TABLE_EVENT_LOAD: 419 case ACPI_TABLE_EVENT_UNLOAD: 420 case ACPI_TABLE_EVENT_UNINSTALL: 421 /* 422 * we do not need to do anything right now 423 * because the table is not deleted from the 424 * global table list when unloading it. 425 */ 426 break; 427 default: 428 return AE_BAD_PARAMETER; 429 } 430 return AE_OK; 431 } 432 433 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, 434 struct bin_attribute *bin_attr, char *buf, 435 loff_t offset, size_t count) 436 { 437 struct acpi_data_attr *data_attr; 438 void __iomem *base; 439 ssize_t rc; 440 441 data_attr = container_of(bin_attr, struct acpi_data_attr, attr); 442 443 base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); 444 if (!base) 445 return -ENOMEM; 446 rc = memory_read_from_buffer(buf, count, &offset, base, 447 data_attr->attr.size); 448 acpi_os_unmap_memory(base, data_attr->attr.size); 449 450 return rc; 451 } 452 453 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) 454 { 455 struct acpi_table_bert *bert = th; 456 457 if (bert->header.length < sizeof(struct acpi_table_bert) || 458 bert->region_length < sizeof(struct acpi_hest_generic_status)) { 459 kfree(data_attr); 460 return -EINVAL; 461 } 462 data_attr->addr = bert->address; 463 data_attr->attr.size = bert->region_length; 464 data_attr->attr.attr.name = "BERT"; 465 466 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); 467 } 468 469 static struct acpi_data_obj { 470 char *name; 471 int (*fn)(void *, struct acpi_data_attr *); 472 } acpi_data_objs[] = { 473 { ACPI_SIG_BERT, acpi_bert_data_init }, 474 }; 475 476 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs) 477 478 static int acpi_table_data_init(struct acpi_table_header *th) 479 { 480 struct acpi_data_attr *data_attr; 481 int i; 482 483 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) { 484 if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) { 485 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL); 486 if (!data_attr) 487 return -ENOMEM; 488 sysfs_attr_init(&data_attr->attr.attr); 489 data_attr->attr.read = acpi_data_show; 490 data_attr->attr.attr.mode = 0400; 491 return acpi_data_objs[i].fn(th, data_attr); 492 } 493 } 494 return 0; 495 } 496 497 static int acpi_tables_sysfs_init(void) 498 { 499 struct acpi_table_attr *table_attr; 500 struct acpi_table_header *table_header = NULL; 501 int table_index; 502 acpi_status status; 503 int ret; 504 505 tables_kobj = kobject_create_and_add("tables", acpi_kobj); 506 if (!tables_kobj) 507 goto err; 508 509 tables_data_kobj = kobject_create_and_add("data", tables_kobj); 510 if (!tables_data_kobj) 511 goto err_tables_data; 512 513 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj); 514 if (!dynamic_tables_kobj) 515 goto err_dynamic_tables; 516 517 for (table_index = 0;; table_index++) { 518 status = acpi_get_table_by_index(table_index, &table_header); 519 520 if (status == AE_BAD_PARAMETER) 521 break; 522 523 if (ACPI_FAILURE(status)) 524 continue; 525 526 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); 527 if (!table_attr) 528 return -ENOMEM; 529 530 ret = acpi_table_attr_init(tables_kobj, 531 table_attr, table_header); 532 if (ret) { 533 kfree(table_attr); 534 return ret; 535 } 536 list_add_tail(&table_attr->node, &acpi_table_attr_list); 537 acpi_table_data_init(table_header); 538 } 539 540 kobject_uevent(tables_kobj, KOBJ_ADD); 541 kobject_uevent(tables_data_kobj, KOBJ_ADD); 542 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); 543 544 return 0; 545 err_dynamic_tables: 546 kobject_put(tables_data_kobj); 547 err_tables_data: 548 kobject_put(tables_kobj); 549 err: 550 return -ENOMEM; 551 } 552 553 /* 554 * Detailed ACPI IRQ counters: 555 * /sys/firmware/acpi/interrupts/ 556 */ 557 558 u32 acpi_irq_handled; 559 u32 acpi_irq_not_handled; 560 561 #define COUNT_GPE 0 562 #define COUNT_SCI 1 /* acpi_irq_handled */ 563 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */ 564 #define COUNT_ERROR 3 /* other */ 565 #define NUM_COUNTERS_EXTRA 4 566 567 struct event_counter { 568 u32 count; 569 u32 flags; 570 }; 571 572 static struct event_counter *all_counters; 573 static u32 num_gpes; 574 static u32 num_counters; 575 static struct attribute **all_attrs; 576 static u32 acpi_gpe_count; 577 578 static struct attribute_group interrupt_stats_attr_group = { 579 .name = "interrupts", 580 }; 581 582 static struct kobj_attribute *counter_attrs; 583 584 static void delete_gpe_attr_array(void) 585 { 586 struct event_counter *tmp = all_counters; 587 588 all_counters = NULL; 589 kfree(tmp); 590 591 if (counter_attrs) { 592 int i; 593 594 for (i = 0; i < num_gpes; i++) 595 kfree(counter_attrs[i].attr.name); 596 597 kfree(counter_attrs); 598 } 599 kfree(all_attrs); 600 601 return; 602 } 603 604 static void gpe_count(u32 gpe_number) 605 { 606 acpi_gpe_count++; 607 608 if (!all_counters) 609 return; 610 611 if (gpe_number < num_gpes) 612 all_counters[gpe_number].count++; 613 else 614 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 615 COUNT_ERROR].count++; 616 617 return; 618 } 619 620 static void fixed_event_count(u32 event_number) 621 { 622 if (!all_counters) 623 return; 624 625 if (event_number < ACPI_NUM_FIXED_EVENTS) 626 all_counters[num_gpes + event_number].count++; 627 else 628 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 629 COUNT_ERROR].count++; 630 631 return; 632 } 633 634 static void acpi_global_event_handler(u32 event_type, acpi_handle device, 635 u32 event_number, void *context) 636 { 637 if (event_type == ACPI_EVENT_TYPE_GPE) { 638 gpe_count(event_number); 639 pr_debug("GPE event 0x%02x\n", event_number); 640 } else if (event_type == ACPI_EVENT_TYPE_FIXED) { 641 fixed_event_count(event_number); 642 pr_debug("Fixed event 0x%02x\n", event_number); 643 } else { 644 pr_debug("Other event 0x%02x\n", event_number); 645 } 646 } 647 648 static int get_status(u32 index, acpi_event_status *status, 649 acpi_handle *handle) 650 { 651 int result; 652 653 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 654 return -EINVAL; 655 656 if (index < num_gpes) { 657 result = acpi_get_gpe_device(index, handle); 658 if (result) { 659 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, 660 "Invalid GPE 0x%x", index)); 661 return result; 662 } 663 result = acpi_get_gpe_status(*handle, index, status); 664 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) 665 result = acpi_get_event_status(index - num_gpes, status); 666 667 return result; 668 } 669 670 static ssize_t counter_show(struct kobject *kobj, 671 struct kobj_attribute *attr, char *buf) 672 { 673 int index = attr - counter_attrs; 674 int size; 675 acpi_handle handle; 676 acpi_event_status status; 677 int result = 0; 678 679 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = 680 acpi_irq_handled; 681 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count = 682 acpi_irq_not_handled; 683 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = 684 acpi_gpe_count; 685 size = sprintf(buf, "%8u", all_counters[index].count); 686 687 /* "gpe_all" or "sci" */ 688 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 689 goto end; 690 691 result = get_status(index, &status, &handle); 692 if (result) 693 goto end; 694 695 if (status & ACPI_EVENT_FLAG_ENABLE_SET) 696 size += sprintf(buf + size, " EN"); 697 else 698 size += sprintf(buf + size, " "); 699 if (status & ACPI_EVENT_FLAG_STATUS_SET) 700 size += sprintf(buf + size, " STS"); 701 else 702 size += sprintf(buf + size, " "); 703 704 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) 705 size += sprintf(buf + size, " invalid "); 706 else if (status & ACPI_EVENT_FLAG_ENABLED) 707 size += sprintf(buf + size, " enabled "); 708 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED) 709 size += sprintf(buf + size, " wake_enabled"); 710 else 711 size += sprintf(buf + size, " disabled "); 712 if (status & ACPI_EVENT_FLAG_MASKED) 713 size += sprintf(buf + size, " masked "); 714 else 715 size += sprintf(buf + size, " unmasked"); 716 717 end: 718 size += sprintf(buf + size, "\n"); 719 return result ? result : size; 720 } 721 722 /* 723 * counter_set() sets the specified counter. 724 * setting the total "sci" file to any value clears all counters. 725 * enable/disable/clear a gpe/fixed event in user space. 726 */ 727 static ssize_t counter_set(struct kobject *kobj, 728 struct kobj_attribute *attr, const char *buf, 729 size_t size) 730 { 731 int index = attr - counter_attrs; 732 acpi_event_status status; 733 acpi_handle handle; 734 int result = 0; 735 unsigned long tmp; 736 737 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { 738 int i; 739 for (i = 0; i < num_counters; ++i) 740 all_counters[i].count = 0; 741 acpi_gpe_count = 0; 742 acpi_irq_handled = 0; 743 acpi_irq_not_handled = 0; 744 goto end; 745 } 746 747 /* show the event status for both GPEs and Fixed Events */ 748 result = get_status(index, &status, &handle); 749 if (result) 750 goto end; 751 752 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) { 753 printk(KERN_WARNING PREFIX 754 "Can not change Invalid GPE/Fixed Event status\n"); 755 return -EINVAL; 756 } 757 758 if (index < num_gpes) { 759 if (!strcmp(buf, "disable\n") && 760 (status & ACPI_EVENT_FLAG_ENABLED)) 761 result = acpi_disable_gpe(handle, index); 762 else if (!strcmp(buf, "enable\n") && 763 !(status & ACPI_EVENT_FLAG_ENABLED)) 764 result = acpi_enable_gpe(handle, index); 765 else if (!strcmp(buf, "clear\n") && 766 (status & ACPI_EVENT_FLAG_STATUS_SET)) 767 result = acpi_clear_gpe(handle, index); 768 else if (!strcmp(buf, "mask\n")) 769 result = acpi_mask_gpe(handle, index, TRUE); 770 else if (!strcmp(buf, "unmask\n")) 771 result = acpi_mask_gpe(handle, index, FALSE); 772 else if (!kstrtoul(buf, 0, &tmp)) 773 all_counters[index].count = tmp; 774 else 775 result = -EINVAL; 776 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { 777 int event = index - num_gpes; 778 if (!strcmp(buf, "disable\n") && 779 (status & ACPI_EVENT_FLAG_ENABLE_SET)) 780 result = acpi_disable_event(event, ACPI_NOT_ISR); 781 else if (!strcmp(buf, "enable\n") && 782 !(status & ACPI_EVENT_FLAG_ENABLE_SET)) 783 result = acpi_enable_event(event, ACPI_NOT_ISR); 784 else if (!strcmp(buf, "clear\n") && 785 (status & ACPI_EVENT_FLAG_STATUS_SET)) 786 result = acpi_clear_event(event); 787 else if (!kstrtoul(buf, 0, &tmp)) 788 all_counters[index].count = tmp; 789 else 790 result = -EINVAL; 791 } else 792 all_counters[index].count = strtoul(buf, NULL, 0); 793 794 if (ACPI_FAILURE(result)) 795 result = -EINVAL; 796 end: 797 return result ? result : size; 798 } 799 800 /* 801 * A Quirk Mechanism for GPE Flooding Prevention: 802 * 803 * Quirks may be needed to prevent GPE flooding on a specific GPE. The 804 * flooding typically cannot be detected and automatically prevented by 805 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in 806 * the AML tables. This normally indicates a feature gap in Linux, thus 807 * instead of providing endless quirk tables, we provide a boot parameter 808 * for those who want this quirk. For example, if the users want to prevent 809 * the GPE flooding for GPE 00, they need to specify the following boot 810 * parameter: 811 * acpi_mask_gpe=0x00 812 * The masking status can be modified by the following runtime controlling 813 * interface: 814 * echo unmask > /sys/firmware/acpi/interrupts/gpe00 815 */ 816 817 /* 818 * Currently, the GPE flooding prevention only supports to mask the GPEs 819 * numbered from 00 to 7f. 820 */ 821 #define ACPI_MASKABLE_GPE_MAX 0x80 822 823 static u64 __initdata acpi_masked_gpes; 824 825 static int __init acpi_gpe_set_masked_gpes(char *val) 826 { 827 u8 gpe; 828 829 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) 830 return -EINVAL; 831 acpi_masked_gpes |= ((u64)1<<gpe); 832 833 return 1; 834 } 835 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes); 836 837 void __init acpi_gpe_apply_masked_gpes(void) 838 { 839 acpi_handle handle; 840 acpi_status status; 841 u8 gpe; 842 843 for (gpe = 0; 844 gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count); 845 gpe++) { 846 if (acpi_masked_gpes & ((u64)1<<gpe)) { 847 status = acpi_get_gpe_device(gpe, &handle); 848 if (ACPI_SUCCESS(status)) { 849 pr_info("Masking GPE 0x%x.\n", gpe); 850 (void)acpi_mask_gpe(handle, gpe, TRUE); 851 } 852 } 853 } 854 } 855 856 void acpi_irq_stats_init(void) 857 { 858 acpi_status status; 859 int i; 860 861 if (all_counters) 862 return; 863 864 num_gpes = acpi_current_gpe_count; 865 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; 866 867 all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), 868 GFP_KERNEL); 869 if (all_attrs == NULL) 870 return; 871 872 all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), 873 GFP_KERNEL); 874 if (all_counters == NULL) 875 goto fail; 876 877 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); 878 if (ACPI_FAILURE(status)) 879 goto fail; 880 881 counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), 882 GFP_KERNEL); 883 if (counter_attrs == NULL) 884 goto fail; 885 886 for (i = 0; i < num_counters; ++i) { 887 char buffer[12]; 888 char *name; 889 890 if (i < num_gpes) 891 sprintf(buffer, "gpe%02X", i); 892 else if (i == num_gpes + ACPI_EVENT_PMTIMER) 893 sprintf(buffer, "ff_pmtimer"); 894 else if (i == num_gpes + ACPI_EVENT_GLOBAL) 895 sprintf(buffer, "ff_gbl_lock"); 896 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON) 897 sprintf(buffer, "ff_pwr_btn"); 898 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON) 899 sprintf(buffer, "ff_slp_btn"); 900 else if (i == num_gpes + ACPI_EVENT_RTC) 901 sprintf(buffer, "ff_rt_clk"); 902 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE) 903 sprintf(buffer, "gpe_all"); 904 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) 905 sprintf(buffer, "sci"); 906 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT) 907 sprintf(buffer, "sci_not"); 908 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR) 909 sprintf(buffer, "error"); 910 else 911 sprintf(buffer, "bug%02X", i); 912 913 name = kstrdup(buffer, GFP_KERNEL); 914 if (name == NULL) 915 goto fail; 916 917 sysfs_attr_init(&counter_attrs[i].attr); 918 counter_attrs[i].attr.name = name; 919 counter_attrs[i].attr.mode = 0644; 920 counter_attrs[i].show = counter_show; 921 counter_attrs[i].store = counter_set; 922 923 all_attrs[i] = &counter_attrs[i].attr; 924 } 925 926 interrupt_stats_attr_group.attrs = all_attrs; 927 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group)) 928 return; 929 930 fail: 931 delete_gpe_attr_array(); 932 return; 933 } 934 935 static void __exit interrupt_stats_exit(void) 936 { 937 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group); 938 939 delete_gpe_attr_array(); 940 941 return; 942 } 943 944 static ssize_t 945 acpi_show_profile(struct device *dev, struct device_attribute *attr, 946 char *buf) 947 { 948 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); 949 } 950 951 static const struct device_attribute pm_profile_attr = 952 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); 953 954 static ssize_t hotplug_enabled_show(struct kobject *kobj, 955 struct kobj_attribute *attr, char *buf) 956 { 957 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 958 959 return sprintf(buf, "%d\n", hotplug->enabled); 960 } 961 962 static ssize_t hotplug_enabled_store(struct kobject *kobj, 963 struct kobj_attribute *attr, 964 const char *buf, size_t size) 965 { 966 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 967 unsigned int val; 968 969 if (kstrtouint(buf, 10, &val) || val > 1) 970 return -EINVAL; 971 972 acpi_scan_hotplug_enabled(hotplug, val); 973 return size; 974 } 975 976 static struct kobj_attribute hotplug_enabled_attr = 977 __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show, 978 hotplug_enabled_store); 979 980 static struct attribute *hotplug_profile_attrs[] = { 981 &hotplug_enabled_attr.attr, 982 NULL 983 }; 984 985 static struct kobj_type acpi_hotplug_profile_ktype = { 986 .sysfs_ops = &kobj_sysfs_ops, 987 .default_attrs = hotplug_profile_attrs, 988 }; 989 990 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, 991 const char *name) 992 { 993 int error; 994 995 if (!hotplug_kobj) 996 goto err_out; 997 998 error = kobject_init_and_add(&hotplug->kobj, 999 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); 1000 if (error) 1001 goto err_out; 1002 1003 kobject_uevent(&hotplug->kobj, KOBJ_ADD); 1004 return; 1005 1006 err_out: 1007 pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name); 1008 } 1009 1010 static ssize_t force_remove_show(struct kobject *kobj, 1011 struct kobj_attribute *attr, char *buf) 1012 { 1013 return sprintf(buf, "%d\n", 0); 1014 } 1015 1016 static ssize_t force_remove_store(struct kobject *kobj, 1017 struct kobj_attribute *attr, 1018 const char *buf, size_t size) 1019 { 1020 bool val; 1021 int ret; 1022 1023 ret = strtobool(buf, &val); 1024 if (ret < 0) 1025 return ret; 1026 1027 if (val) { 1028 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n"); 1029 return -EINVAL; 1030 } 1031 return size; 1032 } 1033 1034 static const struct kobj_attribute force_remove_attr = 1035 __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show, 1036 force_remove_store); 1037 1038 int __init acpi_sysfs_init(void) 1039 { 1040 int result; 1041 1042 result = acpi_tables_sysfs_init(); 1043 if (result) 1044 return result; 1045 1046 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj); 1047 if (!hotplug_kobj) 1048 return -ENOMEM; 1049 1050 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr); 1051 if (result) 1052 return result; 1053 1054 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); 1055 return result; 1056 } 1057