1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sysfs.c - ACPI sysfs interface to userspace. 4 */ 5 6 #define pr_fmt(fmt) "ACPI: " fmt 7 8 #include <linux/acpi.h> 9 #include <linux/bitmap.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/kstrtox.h> 13 #include <linux/moduleparam.h> 14 15 #include "internal.h" 16 17 #ifdef CONFIG_ACPI_DEBUG 18 /* 19 * ACPI debug sysfs I/F, including: 20 * /sys/modules/acpi/parameters/debug_layer 21 * /sys/modules/acpi/parameters/debug_level 22 * /sys/modules/acpi/parameters/trace_method_name 23 * /sys/modules/acpi/parameters/trace_state 24 * /sys/modules/acpi/parameters/trace_debug_layer 25 * /sys/modules/acpi/parameters/trace_debug_level 26 */ 27 28 struct acpi_dlayer { 29 const char *name; 30 unsigned long value; 31 }; 32 struct acpi_dlevel { 33 const char *name; 34 unsigned long value; 35 }; 36 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v } 37 38 static const struct acpi_dlayer acpi_debug_layers[] = { 39 ACPI_DEBUG_INIT(ACPI_UTILITIES), 40 ACPI_DEBUG_INIT(ACPI_HARDWARE), 41 ACPI_DEBUG_INIT(ACPI_EVENTS), 42 ACPI_DEBUG_INIT(ACPI_TABLES), 43 ACPI_DEBUG_INIT(ACPI_NAMESPACE), 44 ACPI_DEBUG_INIT(ACPI_PARSER), 45 ACPI_DEBUG_INIT(ACPI_DISPATCHER), 46 ACPI_DEBUG_INIT(ACPI_EXECUTER), 47 ACPI_DEBUG_INIT(ACPI_RESOURCES), 48 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER), 49 ACPI_DEBUG_INIT(ACPI_OS_SERVICES), 50 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), 51 ACPI_DEBUG_INIT(ACPI_COMPILER), 52 ACPI_DEBUG_INIT(ACPI_TOOLS), 53 }; 54 55 static const struct acpi_dlevel acpi_debug_levels[] = { 56 ACPI_DEBUG_INIT(ACPI_LV_INIT), 57 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), 58 ACPI_DEBUG_INIT(ACPI_LV_INFO), 59 ACPI_DEBUG_INIT(ACPI_LV_REPAIR), 60 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT), 61 62 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), 63 ACPI_DEBUG_INIT(ACPI_LV_PARSE), 64 ACPI_DEBUG_INIT(ACPI_LV_LOAD), 65 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH), 66 ACPI_DEBUG_INIT(ACPI_LV_EXEC), 67 ACPI_DEBUG_INIT(ACPI_LV_NAMES), 68 ACPI_DEBUG_INIT(ACPI_LV_OPREGION), 69 ACPI_DEBUG_INIT(ACPI_LV_BFIELD), 70 ACPI_DEBUG_INIT(ACPI_LV_TABLES), 71 ACPI_DEBUG_INIT(ACPI_LV_VALUES), 72 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS), 73 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES), 74 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS), 75 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE), 76 77 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS), 78 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS), 79 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS), 80 81 ACPI_DEBUG_INIT(ACPI_LV_MUTEX), 82 ACPI_DEBUG_INIT(ACPI_LV_THREADS), 83 ACPI_DEBUG_INIT(ACPI_LV_IO), 84 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS), 85 86 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE), 87 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO), 88 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES), 89 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 90 }; 91 92 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) 93 { 94 int result = 0; 95 int i; 96 97 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 98 99 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) { 100 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 101 acpi_debug_layers[i].name, 102 acpi_debug_layers[i].value, 103 (acpi_dbg_layer & acpi_debug_layers[i].value) 104 ? '*' : ' '); 105 } 106 result += 107 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", 108 ACPI_ALL_DRIVERS, 109 (acpi_dbg_layer & ACPI_ALL_DRIVERS) == 110 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS) 111 == 0 ? ' ' : '-'); 112 result += 113 sprintf(buffer + result, 114 "--\ndebug_layer = 0x%08X ( * = enabled)\n", 115 acpi_dbg_layer); 116 117 return result; 118 } 119 120 static int param_get_debug_level(char *buffer, const struct kernel_param *kp) 121 { 122 int result = 0; 123 int i; 124 125 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 126 127 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { 128 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 129 acpi_debug_levels[i].name, 130 acpi_debug_levels[i].value, 131 (acpi_dbg_level & acpi_debug_levels[i].value) 132 ? '*' : ' '); 133 } 134 result += 135 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n", 136 acpi_dbg_level); 137 138 return result; 139 } 140 141 static const struct kernel_param_ops param_ops_debug_layer = { 142 .set = param_set_uint, 143 .get = param_get_debug_layer, 144 }; 145 146 static const struct kernel_param_ops param_ops_debug_level = { 147 .set = param_set_uint, 148 .get = param_get_debug_level, 149 }; 150 151 module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); 152 module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); 153 154 static char trace_method_name[1024]; 155 156 static int param_set_trace_method_name(const char *val, 157 const struct kernel_param *kp) 158 { 159 u32 saved_flags = 0; 160 bool is_abs_path = true; 161 162 if (*val != '\\') 163 is_abs_path = false; 164 165 if ((is_abs_path && strlen(val) > 1023) || 166 (!is_abs_path && strlen(val) > 1022)) { 167 pr_err("%s: string parameter too long\n", kp->name); 168 return -ENOSPC; 169 } 170 171 /* 172 * It's not safe to update acpi_gbl_trace_method_name without 173 * having the tracer stopped, so we save the original tracer 174 * state and disable it. 175 */ 176 saved_flags = acpi_gbl_trace_flags; 177 (void)acpi_debug_trace(NULL, 178 acpi_gbl_trace_dbg_level, 179 acpi_gbl_trace_dbg_layer, 180 0); 181 182 /* This is a hack. We can't kmalloc in early boot. */ 183 if (is_abs_path) 184 strcpy(trace_method_name, val); 185 else { 186 trace_method_name[0] = '\\'; 187 strcpy(trace_method_name+1, val); 188 } 189 190 /* Restore the original tracer state */ 191 (void)acpi_debug_trace(trace_method_name, 192 acpi_gbl_trace_dbg_level, 193 acpi_gbl_trace_dbg_layer, 194 saved_flags); 195 196 return 0; 197 } 198 199 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp) 200 { 201 return sysfs_emit(buffer, "%s\n", acpi_gbl_trace_method_name); 202 } 203 204 static const struct kernel_param_ops param_ops_trace_method = { 205 .set = param_set_trace_method_name, 206 .get = param_get_trace_method_name, 207 }; 208 209 static const struct kernel_param_ops param_ops_trace_attrib = { 210 .set = param_set_uint, 211 .get = param_get_uint, 212 }; 213 214 module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644); 215 module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); 216 module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); 217 218 static int param_set_trace_state(const char *val, 219 const struct kernel_param *kp) 220 { 221 acpi_status status; 222 const char *method = trace_method_name; 223 u32 flags = 0; 224 225 /* So "xxx-once" comparison should go prior than "xxx" comparison */ 226 #define acpi_compare_param(val, key) \ 227 strncmp((val), (key), sizeof(key) - 1) 228 229 if (!acpi_compare_param(val, "enable")) { 230 method = NULL; 231 flags = ACPI_TRACE_ENABLED; 232 } else if (!acpi_compare_param(val, "disable")) 233 method = NULL; 234 else if (!acpi_compare_param(val, "method-once")) 235 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT; 236 else if (!acpi_compare_param(val, "method")) 237 flags = ACPI_TRACE_ENABLED; 238 else if (!acpi_compare_param(val, "opcode-once")) 239 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE; 240 else if (!acpi_compare_param(val, "opcode")) 241 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE; 242 else 243 return -EINVAL; 244 245 status = acpi_debug_trace(method, 246 acpi_gbl_trace_dbg_level, 247 acpi_gbl_trace_dbg_layer, 248 flags); 249 if (ACPI_FAILURE(status)) 250 return -EBUSY; 251 252 return 0; 253 } 254 255 static int param_get_trace_state(char *buffer, const struct kernel_param *kp) 256 { 257 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) 258 return sprintf(buffer, "disable\n"); 259 if (!acpi_gbl_trace_method_name) 260 return sprintf(buffer, "enable\n"); 261 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) 262 return sprintf(buffer, "method-once\n"); 263 else 264 return sprintf(buffer, "method\n"); 265 } 266 267 module_param_call(trace_state, param_set_trace_state, param_get_trace_state, 268 NULL, 0644); 269 #endif /* CONFIG_ACPI_DEBUG */ 270 271 272 /* /sys/modules/acpi/parameters/aml_debug_output */ 273 274 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object, 275 byte, 0644); 276 MODULE_PARM_DESC(aml_debug_output, 277 "To enable/disable the ACPI Debug Object output."); 278 279 /* /sys/module/acpi/parameters/acpica_version */ 280 static int param_get_acpica_version(char *buffer, 281 const struct kernel_param *kp) 282 { 283 int result; 284 285 result = sprintf(buffer, "%x\n", ACPI_CA_VERSION); 286 287 return result; 288 } 289 290 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444); 291 292 /* 293 * ACPI table sysfs I/F: 294 * /sys/firmware/acpi/tables/ 295 * /sys/firmware/acpi/tables/data/ 296 * /sys/firmware/acpi/tables/dynamic/ 297 */ 298 299 static LIST_HEAD(acpi_table_attr_list); 300 static struct kobject *tables_kobj; 301 static struct kobject *tables_data_kobj; 302 static struct kobject *dynamic_tables_kobj; 303 static struct kobject *hotplug_kobj; 304 305 #define ACPI_MAX_TABLE_INSTANCES 999 306 #define ACPI_INST_SIZE 4 /* including trailing 0 */ 307 308 struct acpi_table_attr { 309 struct bin_attribute attr; 310 char name[ACPI_NAMESEG_SIZE]; 311 int instance; 312 char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE]; 313 struct list_head node; 314 }; 315 316 struct acpi_data_attr { 317 struct bin_attribute attr; 318 u64 addr; 319 }; 320 321 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, 322 struct bin_attribute *bin_attr, char *buf, 323 loff_t offset, size_t count) 324 { 325 struct acpi_table_attr *table_attr = 326 container_of(bin_attr, struct acpi_table_attr, attr); 327 struct acpi_table_header *table_header = NULL; 328 acpi_status status; 329 ssize_t rc; 330 331 status = acpi_get_table(table_attr->name, table_attr->instance, 332 &table_header); 333 if (ACPI_FAILURE(status)) 334 return -ENODEV; 335 336 rc = memory_read_from_buffer(buf, count, &offset, table_header, 337 table_header->length); 338 acpi_put_table(table_header); 339 return rc; 340 } 341 342 static int acpi_table_attr_init(struct kobject *tables_obj, 343 struct acpi_table_attr *table_attr, 344 struct acpi_table_header *table_header) 345 { 346 struct acpi_table_header *header = NULL; 347 struct acpi_table_attr *attr = NULL; 348 char instance_str[ACPI_INST_SIZE]; 349 350 sysfs_attr_init(&table_attr->attr.attr); 351 ACPI_COPY_NAMESEG(table_attr->name, table_header->signature); 352 353 list_for_each_entry(attr, &acpi_table_attr_list, node) { 354 if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name)) 355 if (table_attr->instance < attr->instance) 356 table_attr->instance = attr->instance; 357 } 358 table_attr->instance++; 359 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) { 360 pr_warn("%4.4s: too many table instances\n", table_attr->name); 361 return -ERANGE; 362 } 363 364 ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature); 365 table_attr->filename[ACPI_NAMESEG_SIZE] = '\0'; 366 if (table_attr->instance > 1 || (table_attr->instance == 1 && 367 !acpi_get_table 368 (table_header->signature, 2, &header))) { 369 snprintf(instance_str, sizeof(instance_str), "%u", 370 table_attr->instance); 371 strcat(table_attr->filename, instance_str); 372 } 373 374 table_attr->attr.size = table_header->length; 375 table_attr->attr.read = acpi_table_show; 376 table_attr->attr.attr.name = table_attr->filename; 377 table_attr->attr.attr.mode = 0400; 378 379 return sysfs_create_bin_file(tables_obj, &table_attr->attr); 380 } 381 382 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) 383 { 384 struct acpi_table_attr *table_attr; 385 386 switch (event) { 387 case ACPI_TABLE_EVENT_INSTALL: 388 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); 389 if (!table_attr) 390 return AE_NO_MEMORY; 391 392 if (acpi_table_attr_init(dynamic_tables_kobj, 393 table_attr, table)) { 394 kfree(table_attr); 395 return AE_ERROR; 396 } 397 list_add_tail(&table_attr->node, &acpi_table_attr_list); 398 break; 399 case ACPI_TABLE_EVENT_LOAD: 400 case ACPI_TABLE_EVENT_UNLOAD: 401 case ACPI_TABLE_EVENT_UNINSTALL: 402 /* 403 * we do not need to do anything right now 404 * because the table is not deleted from the 405 * global table list when unloading it. 406 */ 407 break; 408 default: 409 return AE_BAD_PARAMETER; 410 } 411 return AE_OK; 412 } 413 414 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, 415 struct bin_attribute *bin_attr, char *buf, 416 loff_t offset, size_t count) 417 { 418 struct acpi_data_attr *data_attr; 419 void __iomem *base; 420 ssize_t size; 421 422 data_attr = container_of(bin_attr, struct acpi_data_attr, attr); 423 size = data_attr->attr.size; 424 425 if (offset < 0) 426 return -EINVAL; 427 428 if (offset >= size) 429 return 0; 430 431 if (count > size - offset) 432 count = size - offset; 433 434 base = acpi_os_map_iomem(data_attr->addr, size); 435 if (!base) 436 return -ENOMEM; 437 438 memcpy_fromio(buf, base + offset, count); 439 440 acpi_os_unmap_iomem(base, size); 441 442 return count; 443 } 444 445 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) 446 { 447 struct acpi_table_bert *bert = th; 448 449 if (bert->header.length < sizeof(struct acpi_table_bert) || 450 bert->region_length < sizeof(struct acpi_hest_generic_status)) { 451 kfree(data_attr); 452 return -EINVAL; 453 } 454 data_attr->addr = bert->address; 455 data_attr->attr.size = bert->region_length; 456 data_attr->attr.attr.name = "BERT"; 457 458 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); 459 } 460 461 static struct acpi_data_obj { 462 char *name; 463 int (*fn)(void *, struct acpi_data_attr *); 464 } acpi_data_objs[] = { 465 { ACPI_SIG_BERT, acpi_bert_data_init }, 466 }; 467 468 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs) 469 470 static int acpi_table_data_init(struct acpi_table_header *th) 471 { 472 struct acpi_data_attr *data_attr; 473 int i; 474 475 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) { 476 if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) { 477 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL); 478 if (!data_attr) 479 return -ENOMEM; 480 sysfs_attr_init(&data_attr->attr.attr); 481 data_attr->attr.read = acpi_data_show; 482 data_attr->attr.attr.mode = 0400; 483 return acpi_data_objs[i].fn(th, data_attr); 484 } 485 } 486 return 0; 487 } 488 489 static int acpi_tables_sysfs_init(void) 490 { 491 struct acpi_table_attr *table_attr; 492 struct acpi_table_header *table_header = NULL; 493 int table_index; 494 acpi_status status; 495 int ret; 496 497 tables_kobj = kobject_create_and_add("tables", acpi_kobj); 498 if (!tables_kobj) 499 goto err; 500 501 tables_data_kobj = kobject_create_and_add("data", tables_kobj); 502 if (!tables_data_kobj) 503 goto err_tables_data; 504 505 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj); 506 if (!dynamic_tables_kobj) 507 goto err_dynamic_tables; 508 509 for (table_index = 0;; table_index++) { 510 status = acpi_get_table_by_index(table_index, &table_header); 511 512 if (status == AE_BAD_PARAMETER) 513 break; 514 515 if (ACPI_FAILURE(status)) 516 continue; 517 518 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); 519 if (!table_attr) 520 return -ENOMEM; 521 522 ret = acpi_table_attr_init(tables_kobj, 523 table_attr, table_header); 524 if (ret) { 525 kfree(table_attr); 526 return ret; 527 } 528 list_add_tail(&table_attr->node, &acpi_table_attr_list); 529 acpi_table_data_init(table_header); 530 } 531 532 kobject_uevent(tables_kobj, KOBJ_ADD); 533 kobject_uevent(tables_data_kobj, KOBJ_ADD); 534 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); 535 536 return 0; 537 err_dynamic_tables: 538 kobject_put(tables_data_kobj); 539 err_tables_data: 540 kobject_put(tables_kobj); 541 err: 542 return -ENOMEM; 543 } 544 545 /* 546 * Detailed ACPI IRQ counters: 547 * /sys/firmware/acpi/interrupts/ 548 */ 549 550 u32 acpi_irq_handled; 551 u32 acpi_irq_not_handled; 552 553 #define COUNT_GPE 0 554 #define COUNT_SCI 1 /* acpi_irq_handled */ 555 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */ 556 #define COUNT_ERROR 3 /* other */ 557 #define NUM_COUNTERS_EXTRA 4 558 559 struct event_counter { 560 u32 count; 561 u32 flags; 562 }; 563 564 static struct event_counter *all_counters; 565 static u32 num_gpes; 566 static u32 num_counters; 567 static struct attribute **all_attrs; 568 static u32 acpi_gpe_count; 569 570 static struct attribute_group interrupt_stats_attr_group = { 571 .name = "interrupts", 572 }; 573 574 static struct kobj_attribute *counter_attrs; 575 576 static void delete_gpe_attr_array(void) 577 { 578 struct event_counter *tmp = all_counters; 579 580 all_counters = NULL; 581 kfree(tmp); 582 583 if (counter_attrs) { 584 int i; 585 586 for (i = 0; i < num_gpes; i++) 587 kfree(counter_attrs[i].attr.name); 588 589 kfree(counter_attrs); 590 } 591 kfree(all_attrs); 592 } 593 594 static void gpe_count(u32 gpe_number) 595 { 596 acpi_gpe_count++; 597 598 if (!all_counters) 599 return; 600 601 if (gpe_number < num_gpes) 602 all_counters[gpe_number].count++; 603 else 604 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 605 COUNT_ERROR].count++; 606 } 607 608 static void fixed_event_count(u32 event_number) 609 { 610 if (!all_counters) 611 return; 612 613 if (event_number < ACPI_NUM_FIXED_EVENTS) 614 all_counters[num_gpes + event_number].count++; 615 else 616 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 617 COUNT_ERROR].count++; 618 } 619 620 static void acpi_global_event_handler(u32 event_type, acpi_handle device, 621 u32 event_number, void *context) 622 { 623 if (event_type == ACPI_EVENT_TYPE_GPE) { 624 gpe_count(event_number); 625 pr_debug("GPE event 0x%02x\n", event_number); 626 } else if (event_type == ACPI_EVENT_TYPE_FIXED) { 627 fixed_event_count(event_number); 628 pr_debug("Fixed event 0x%02x\n", event_number); 629 } else { 630 pr_debug("Other event 0x%02x\n", event_number); 631 } 632 } 633 634 static int get_status(u32 index, acpi_event_status *ret, 635 acpi_handle *handle) 636 { 637 acpi_status status; 638 639 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 640 return -EINVAL; 641 642 if (index < num_gpes) { 643 status = acpi_get_gpe_device(index, handle); 644 if (ACPI_FAILURE(status)) { 645 pr_warn("Invalid GPE 0x%x", index); 646 return -ENXIO; 647 } 648 status = acpi_get_gpe_status(*handle, index, ret); 649 } else { 650 status = acpi_get_event_status(index - num_gpes, ret); 651 } 652 if (ACPI_FAILURE(status)) 653 return -EIO; 654 655 return 0; 656 } 657 658 static ssize_t counter_show(struct kobject *kobj, 659 struct kobj_attribute *attr, char *buf) 660 { 661 int index = attr - counter_attrs; 662 int size; 663 acpi_handle handle; 664 acpi_event_status status; 665 int result = 0; 666 667 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = 668 acpi_irq_handled; 669 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count = 670 acpi_irq_not_handled; 671 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = 672 acpi_gpe_count; 673 size = sprintf(buf, "%8u", all_counters[index].count); 674 675 /* "gpe_all" or "sci" */ 676 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 677 goto end; 678 679 result = get_status(index, &status, &handle); 680 if (result) 681 goto end; 682 683 if (status & ACPI_EVENT_FLAG_ENABLE_SET) 684 size += sprintf(buf + size, " EN"); 685 else 686 size += sprintf(buf + size, " "); 687 if (status & ACPI_EVENT_FLAG_STATUS_SET) 688 size += sprintf(buf + size, " STS"); 689 else 690 size += sprintf(buf + size, " "); 691 692 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) 693 size += sprintf(buf + size, " invalid "); 694 else if (status & ACPI_EVENT_FLAG_ENABLED) 695 size += sprintf(buf + size, " enabled "); 696 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED) 697 size += sprintf(buf + size, " wake_enabled"); 698 else 699 size += sprintf(buf + size, " disabled "); 700 if (status & ACPI_EVENT_FLAG_MASKED) 701 size += sprintf(buf + size, " masked "); 702 else 703 size += sprintf(buf + size, " unmasked"); 704 705 end: 706 size += sprintf(buf + size, "\n"); 707 return result ? result : size; 708 } 709 710 /* 711 * counter_set() sets the specified counter. 712 * setting the total "sci" file to any value clears all counters. 713 * enable/disable/clear a gpe/fixed event in user space. 714 */ 715 static ssize_t counter_set(struct kobject *kobj, 716 struct kobj_attribute *attr, const char *buf, 717 size_t size) 718 { 719 int index = attr - counter_attrs; 720 acpi_event_status status; 721 acpi_handle handle; 722 int result = 0; 723 unsigned long tmp; 724 725 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { 726 int i; 727 for (i = 0; i < num_counters; ++i) 728 all_counters[i].count = 0; 729 acpi_gpe_count = 0; 730 acpi_irq_handled = 0; 731 acpi_irq_not_handled = 0; 732 goto end; 733 } 734 735 /* show the event status for both GPEs and Fixed Events */ 736 result = get_status(index, &status, &handle); 737 if (result) 738 goto end; 739 740 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) { 741 pr_warn("Can not change Invalid GPE/Fixed Event status\n"); 742 return -EINVAL; 743 } 744 745 if (index < num_gpes) { 746 if (!strcmp(buf, "disable\n") && 747 (status & ACPI_EVENT_FLAG_ENABLED)) 748 result = acpi_disable_gpe(handle, index); 749 else if (!strcmp(buf, "enable\n") && 750 !(status & ACPI_EVENT_FLAG_ENABLED)) 751 result = acpi_enable_gpe(handle, index); 752 else if (!strcmp(buf, "clear\n") && 753 (status & ACPI_EVENT_FLAG_STATUS_SET)) 754 result = acpi_clear_gpe(handle, index); 755 else if (!strcmp(buf, "mask\n")) 756 result = acpi_mask_gpe(handle, index, TRUE); 757 else if (!strcmp(buf, "unmask\n")) 758 result = acpi_mask_gpe(handle, index, FALSE); 759 else if (!kstrtoul(buf, 0, &tmp)) 760 all_counters[index].count = tmp; 761 else 762 result = -EINVAL; 763 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { 764 int event = index - num_gpes; 765 if (!strcmp(buf, "disable\n") && 766 (status & ACPI_EVENT_FLAG_ENABLE_SET)) 767 result = acpi_disable_event(event, ACPI_NOT_ISR); 768 else if (!strcmp(buf, "enable\n") && 769 !(status & ACPI_EVENT_FLAG_ENABLE_SET)) 770 result = acpi_enable_event(event, ACPI_NOT_ISR); 771 else if (!strcmp(buf, "clear\n") && 772 (status & ACPI_EVENT_FLAG_STATUS_SET)) 773 result = acpi_clear_event(event); 774 else if (!kstrtoul(buf, 0, &tmp)) 775 all_counters[index].count = tmp; 776 else 777 result = -EINVAL; 778 } else 779 all_counters[index].count = strtoul(buf, NULL, 0); 780 781 if (ACPI_FAILURE(result)) 782 result = -EINVAL; 783 end: 784 return result ? result : size; 785 } 786 787 /* 788 * A Quirk Mechanism for GPE Flooding Prevention: 789 * 790 * Quirks may be needed to prevent GPE flooding on a specific GPE. The 791 * flooding typically cannot be detected and automatically prevented by 792 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in 793 * the AML tables. This normally indicates a feature gap in Linux, thus 794 * instead of providing endless quirk tables, we provide a boot parameter 795 * for those who want this quirk. For example, if the users want to prevent 796 * the GPE flooding for GPE 00, they need to specify the following boot 797 * parameter: 798 * acpi_mask_gpe=0x00 799 * Note, the parameter can be a list (see bitmap_parselist() for the details). 800 * The masking status can be modified by the following runtime controlling 801 * interface: 802 * echo unmask > /sys/firmware/acpi/interrupts/gpe00 803 */ 804 #define ACPI_MASKABLE_GPE_MAX 0x100 805 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata; 806 807 static int __init acpi_gpe_set_masked_gpes(char *val) 808 { 809 int ret; 810 u8 gpe; 811 812 ret = kstrtou8(val, 0, &gpe); 813 if (ret) { 814 ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX); 815 if (ret) 816 return ret; 817 } else 818 set_bit(gpe, acpi_masked_gpes_map); 819 820 return 1; 821 } 822 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes); 823 824 void __init acpi_gpe_apply_masked_gpes(void) 825 { 826 acpi_handle handle; 827 acpi_status status; 828 u16 gpe; 829 830 for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) { 831 status = acpi_get_gpe_device(gpe, &handle); 832 if (ACPI_SUCCESS(status)) { 833 pr_info("Masking GPE 0x%x.\n", gpe); 834 (void)acpi_mask_gpe(handle, gpe, TRUE); 835 } 836 } 837 } 838 839 void acpi_irq_stats_init(void) 840 { 841 acpi_status status; 842 int i; 843 844 if (all_counters) 845 return; 846 847 num_gpes = acpi_current_gpe_count; 848 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; 849 850 all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL); 851 if (all_attrs == NULL) 852 return; 853 854 all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL); 855 if (all_counters == NULL) 856 goto fail; 857 858 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); 859 if (ACPI_FAILURE(status)) 860 goto fail; 861 862 counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL); 863 if (counter_attrs == NULL) 864 goto fail; 865 866 for (i = 0; i < num_counters; ++i) { 867 char buffer[12]; 868 char *name; 869 870 if (i < num_gpes) 871 sprintf(buffer, "gpe%02X", i); 872 else if (i == num_gpes + ACPI_EVENT_PMTIMER) 873 sprintf(buffer, "ff_pmtimer"); 874 else if (i == num_gpes + ACPI_EVENT_GLOBAL) 875 sprintf(buffer, "ff_gbl_lock"); 876 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON) 877 sprintf(buffer, "ff_pwr_btn"); 878 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON) 879 sprintf(buffer, "ff_slp_btn"); 880 else if (i == num_gpes + ACPI_EVENT_RTC) 881 sprintf(buffer, "ff_rt_clk"); 882 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE) 883 sprintf(buffer, "gpe_all"); 884 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) 885 sprintf(buffer, "sci"); 886 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT) 887 sprintf(buffer, "sci_not"); 888 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR) 889 sprintf(buffer, "error"); 890 else 891 sprintf(buffer, "bug%02X", i); 892 893 name = kstrdup(buffer, GFP_KERNEL); 894 if (name == NULL) 895 goto fail; 896 897 sysfs_attr_init(&counter_attrs[i].attr); 898 counter_attrs[i].attr.name = name; 899 counter_attrs[i].attr.mode = 0644; 900 counter_attrs[i].show = counter_show; 901 counter_attrs[i].store = counter_set; 902 903 all_attrs[i] = &counter_attrs[i].attr; 904 } 905 906 interrupt_stats_attr_group.attrs = all_attrs; 907 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group)) 908 return; 909 910 fail: 911 delete_gpe_attr_array(); 912 } 913 914 static void __exit interrupt_stats_exit(void) 915 { 916 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group); 917 918 delete_gpe_attr_array(); 919 } 920 921 static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 922 { 923 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); 924 } 925 926 static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile); 927 928 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 929 { 930 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 931 932 return sprintf(buf, "%d\n", hotplug->enabled); 933 } 934 935 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 936 const char *buf, size_t size) 937 { 938 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 939 unsigned int val; 940 941 if (kstrtouint(buf, 10, &val) || val > 1) 942 return -EINVAL; 943 944 acpi_scan_hotplug_enabled(hotplug, val); 945 return size; 946 } 947 948 static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled); 949 950 static struct attribute *hotplug_profile_attrs[] = { 951 &hotplug_enabled_attr.attr, 952 NULL 953 }; 954 ATTRIBUTE_GROUPS(hotplug_profile); 955 956 static struct kobj_type acpi_hotplug_profile_ktype = { 957 .sysfs_ops = &kobj_sysfs_ops, 958 .default_groups = hotplug_profile_groups, 959 }; 960 961 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, 962 const char *name) 963 { 964 int error; 965 966 if (!hotplug_kobj) 967 goto err_out; 968 969 error = kobject_init_and_add(&hotplug->kobj, 970 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); 971 if (error) { 972 kobject_put(&hotplug->kobj); 973 goto err_out; 974 } 975 976 kobject_uevent(&hotplug->kobj, KOBJ_ADD); 977 return; 978 979 err_out: 980 pr_err("Unable to add hotplug profile '%s'\n", name); 981 } 982 983 static ssize_t force_remove_show(struct kobject *kobj, 984 struct kobj_attribute *attr, char *buf) 985 { 986 return sprintf(buf, "%d\n", 0); 987 } 988 989 static ssize_t force_remove_store(struct kobject *kobj, 990 struct kobj_attribute *attr, 991 const char *buf, size_t size) 992 { 993 bool val; 994 int ret; 995 996 ret = kstrtobool(buf, &val); 997 if (ret < 0) 998 return ret; 999 1000 if (val) { 1001 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n"); 1002 return -EINVAL; 1003 } 1004 return size; 1005 } 1006 1007 static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove); 1008 1009 int __init acpi_sysfs_init(void) 1010 { 1011 int result; 1012 1013 result = acpi_tables_sysfs_init(); 1014 if (result) 1015 return result; 1016 1017 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj); 1018 if (!hotplug_kobj) 1019 return -ENOMEM; 1020 1021 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr); 1022 if (result) 1023 return result; 1024 1025 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); 1026 return result; 1027 } 1028