1 /****************************************************************************** 2 * 3 * Module Name: evgpe - General Purpose Event handling and dispatch 4 * 5 *****************************************************************************/ 6 7 /* 8 * Copyright (C) 2000 - 2014, Intel Corp. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 3. Neither the names of the above-listed copyright holders nor the names 23 * of any contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * Alternatively, this software may be distributed under the terms of the 27 * GNU General Public License ("GPL") version 2 as published by the Free 28 * Software Foundation. 29 * 30 * NO WARRANTY 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGES. 42 */ 43 44 #include <acpi/acpi.h> 45 #include "accommon.h" 46 #include "acevents.h" 47 #include "acnamesp.h" 48 49 #define _COMPONENT ACPI_EVENTS 50 ACPI_MODULE_NAME("evgpe") 51 #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ 52 /* Local prototypes */ 53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 54 55 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); 56 57 /******************************************************************************* 58 * 59 * FUNCTION: acpi_ev_update_gpe_enable_mask 60 * 61 * PARAMETERS: gpe_event_info - GPE to update 62 * 63 * RETURN: Status 64 * 65 * DESCRIPTION: Updates GPE register enable mask based upon whether there are 66 * runtime references to this GPE 67 * 68 ******************************************************************************/ 69 70 acpi_status 71 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) 72 { 73 struct acpi_gpe_register_info *gpe_register_info; 74 u32 register_bit; 75 76 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); 77 78 gpe_register_info = gpe_event_info->register_info; 79 if (!gpe_register_info) { 80 return_ACPI_STATUS(AE_NOT_EXIST); 81 } 82 83 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); 84 85 /* Clear the run bit up front */ 86 87 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 88 89 /* Set the mask bit only if there are references to this GPE */ 90 91 if (gpe_event_info->runtime_count) { 92 ACPI_SET_BIT(gpe_register_info->enable_for_run, 93 (u8)register_bit); 94 } 95 96 return_ACPI_STATUS(AE_OK); 97 } 98 99 /******************************************************************************* 100 * 101 * FUNCTION: acpi_ev_enable_gpe 102 * 103 * PARAMETERS: gpe_event_info - GPE to enable 104 * 105 * RETURN: Status 106 * 107 * DESCRIPTION: Clear a GPE of stale events and enable it. 108 * 109 ******************************************************************************/ 110 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 111 { 112 acpi_status status; 113 114 ACPI_FUNCTION_TRACE(ev_enable_gpe); 115 116 /* 117 * We will only allow a GPE to be enabled if it has either an associated 118 * method (_Lxx/_Exx) or a handler, or is using the implicit notify 119 * feature. Otherwise, the GPE will be immediately disabled by 120 * acpi_ev_gpe_dispatch the first time it fires. 121 */ 122 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 123 ACPI_GPE_DISPATCH_NONE) { 124 return_ACPI_STATUS(AE_NO_HANDLER); 125 } 126 127 /* Clear the GPE (of stale events) */ 128 status = acpi_hw_clear_gpe(gpe_event_info); 129 if (ACPI_FAILURE(status)) { 130 return_ACPI_STATUS(status); 131 } 132 133 /* Enable the requested GPE */ 134 135 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 136 return_ACPI_STATUS(status); 137 } 138 139 140 /******************************************************************************* 141 * 142 * FUNCTION: acpi_ev_add_gpe_reference 143 * 144 * PARAMETERS: gpe_event_info - Add a reference to this GPE 145 * 146 * RETURN: Status 147 * 148 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is 149 * hardware-enabled. 150 * 151 ******************************************************************************/ 152 153 acpi_status 154 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) 155 { 156 acpi_status status = AE_OK; 157 158 ACPI_FUNCTION_TRACE(ev_add_gpe_reference); 159 160 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { 161 return_ACPI_STATUS(AE_LIMIT); 162 } 163 164 gpe_event_info->runtime_count++; 165 if (gpe_event_info->runtime_count == 1) { 166 167 /* Enable on first reference */ 168 169 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 170 if (ACPI_SUCCESS(status)) { 171 status = acpi_ev_enable_gpe(gpe_event_info); 172 } 173 174 if (ACPI_FAILURE(status)) { 175 gpe_event_info->runtime_count--; 176 } 177 } 178 179 return_ACPI_STATUS(status); 180 } 181 182 /******************************************************************************* 183 * 184 * FUNCTION: acpi_ev_remove_gpe_reference 185 * 186 * PARAMETERS: gpe_event_info - Remove a reference to this GPE 187 * 188 * RETURN: Status 189 * 190 * DESCRIPTION: Remove a reference to a GPE. When the last reference is 191 * removed, the GPE is hardware-disabled. 192 * 193 ******************************************************************************/ 194 195 acpi_status 196 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) 197 { 198 acpi_status status = AE_OK; 199 200 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); 201 202 if (!gpe_event_info->runtime_count) { 203 return_ACPI_STATUS(AE_LIMIT); 204 } 205 206 gpe_event_info->runtime_count--; 207 if (!gpe_event_info->runtime_count) { 208 209 /* Disable on last reference */ 210 211 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 212 if (ACPI_SUCCESS(status)) { 213 status = 214 acpi_hw_low_set_gpe(gpe_event_info, 215 ACPI_GPE_DISABLE); 216 } 217 218 if (ACPI_FAILURE(status)) { 219 gpe_event_info->runtime_count++; 220 } 221 } 222 223 return_ACPI_STATUS(status); 224 } 225 226 /******************************************************************************* 227 * 228 * FUNCTION: acpi_ev_low_get_gpe_info 229 * 230 * PARAMETERS: gpe_number - Raw GPE number 231 * gpe_block - A GPE info block 232 * 233 * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number 234 * is not within the specified GPE block) 235 * 236 * DESCRIPTION: Returns the event_info struct associated with this GPE. This is 237 * the low-level implementation of ev_get_gpe_event_info. 238 * 239 ******************************************************************************/ 240 241 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, 242 struct acpi_gpe_block_info 243 *gpe_block) 244 { 245 u32 gpe_index; 246 247 /* 248 * Validate that the gpe_number is within the specified gpe_block. 249 * (Two steps) 250 */ 251 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { 252 return (NULL); 253 } 254 255 gpe_index = gpe_number - gpe_block->block_base_number; 256 if (gpe_index >= gpe_block->gpe_count) { 257 return (NULL); 258 } 259 260 return (&gpe_block->event_info[gpe_index]); 261 } 262 263 264 /******************************************************************************* 265 * 266 * FUNCTION: acpi_ev_get_gpe_event_info 267 * 268 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 269 * gpe_number - Raw GPE number 270 * 271 * RETURN: A GPE event_info struct. NULL if not a valid GPE 272 * 273 * DESCRIPTION: Returns the event_info struct associated with this GPE. 274 * Validates the gpe_block and the gpe_number 275 * 276 * Should be called only when the GPE lists are semaphore locked 277 * and not subject to change. 278 * 279 ******************************************************************************/ 280 281 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 282 u32 gpe_number) 283 { 284 union acpi_operand_object *obj_desc; 285 struct acpi_gpe_event_info *gpe_info; 286 u32 i; 287 288 ACPI_FUNCTION_ENTRY(); 289 290 /* A NULL gpe_device means use the FADT-defined GPE block(s) */ 291 292 if (!gpe_device) { 293 294 /* Examine GPE Block 0 and 1 (These blocks are permanent) */ 295 296 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { 297 gpe_info = acpi_ev_low_get_gpe_info(gpe_number, 298 acpi_gbl_gpe_fadt_blocks 299 [i]); 300 if (gpe_info) { 301 return (gpe_info); 302 } 303 } 304 305 /* The gpe_number was not in the range of either FADT GPE block */ 306 307 return (NULL); 308 } 309 310 /* A Non-NULL gpe_device means this is a GPE Block Device */ 311 312 obj_desc = 313 acpi_ns_get_attached_object((struct acpi_namespace_node *) 314 gpe_device); 315 if (!obj_desc || !obj_desc->device.gpe_block) { 316 return (NULL); 317 } 318 319 return (acpi_ev_low_get_gpe_info 320 (gpe_number, obj_desc->device.gpe_block)); 321 } 322 323 /******************************************************************************* 324 * 325 * FUNCTION: acpi_ev_gpe_detect 326 * 327 * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. 328 * Can have multiple GPE blocks attached. 329 * 330 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 331 * 332 * DESCRIPTION: Detect if any GP events have occurred. This function is 333 * executed at interrupt level. 334 * 335 ******************************************************************************/ 336 337 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) 338 { 339 acpi_status status; 340 struct acpi_gpe_block_info *gpe_block; 341 struct acpi_gpe_register_info *gpe_register_info; 342 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; 343 u8 enabled_status_byte; 344 u32 status_reg; 345 u32 enable_reg; 346 acpi_cpu_flags flags; 347 u32 i; 348 u32 j; 349 350 ACPI_FUNCTION_NAME(ev_gpe_detect); 351 352 /* Check for the case where there are no GPEs */ 353 354 if (!gpe_xrupt_list) { 355 return (int_status); 356 } 357 358 /* 359 * We need to obtain the GPE lock for both the data structs and registers 360 * Note: Not necessary to obtain the hardware lock, since the GPE 361 * registers are owned by the gpe_lock. 362 */ 363 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 364 365 /* Examine all GPE blocks attached to this interrupt level */ 366 367 gpe_block = gpe_xrupt_list->gpe_block_list_head; 368 while (gpe_block) { 369 /* 370 * Read all of the 8-bit GPE status and enable registers in this GPE 371 * block, saving all of them. Find all currently active GP events. 372 */ 373 for (i = 0; i < gpe_block->register_count; i++) { 374 375 /* Get the next status/enable pair */ 376 377 gpe_register_info = &gpe_block->register_info[i]; 378 379 /* 380 * Optimization: If there are no GPEs enabled within this 381 * register, we can safely ignore the entire register. 382 */ 383 if (!(gpe_register_info->enable_for_run | 384 gpe_register_info->enable_for_wake)) { 385 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 386 "Ignore disabled registers for GPE %02X-%02X: " 387 "RunEnable=%02X, WakeEnable=%02X\n", 388 gpe_register_info-> 389 base_gpe_number, 390 gpe_register_info-> 391 base_gpe_number + 392 (ACPI_GPE_REGISTER_WIDTH - 1), 393 gpe_register_info-> 394 enable_for_run, 395 gpe_register_info-> 396 enable_for_wake)); 397 continue; 398 } 399 400 /* Read the Status Register */ 401 402 status = 403 acpi_hw_read(&status_reg, 404 &gpe_register_info->status_address); 405 if (ACPI_FAILURE(status)) { 406 goto unlock_and_exit; 407 } 408 409 /* Read the Enable Register */ 410 411 status = 412 acpi_hw_read(&enable_reg, 413 &gpe_register_info->enable_address); 414 if (ACPI_FAILURE(status)) { 415 goto unlock_and_exit; 416 } 417 418 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 419 "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, " 420 "RunEnable=%02X, WakeEnable=%02X\n", 421 gpe_register_info->base_gpe_number, 422 gpe_register_info->base_gpe_number + 423 (ACPI_GPE_REGISTER_WIDTH - 1), 424 status_reg, enable_reg, 425 gpe_register_info->enable_for_run, 426 gpe_register_info->enable_for_wake)); 427 428 /* Check if there is anything active at all in this register */ 429 430 enabled_status_byte = (u8) (status_reg & enable_reg); 431 if (!enabled_status_byte) { 432 433 /* No active GPEs in this register, move on */ 434 435 continue; 436 } 437 438 /* Now look at the individual GPEs in this byte register */ 439 440 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 441 442 /* Examine one GPE bit */ 443 444 if (enabled_status_byte & (1 << j)) { 445 /* 446 * Found an active GPE. Dispatch the event to a handler 447 * or method. 448 */ 449 int_status |= 450 acpi_ev_gpe_dispatch(gpe_block-> 451 node, 452 &gpe_block-> 453 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 454 } 455 } 456 } 457 458 gpe_block = gpe_block->next; 459 } 460 461 unlock_and_exit: 462 463 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 464 return (int_status); 465 } 466 467 /******************************************************************************* 468 * 469 * FUNCTION: acpi_ev_asynch_execute_gpe_method 470 * 471 * PARAMETERS: Context (gpe_event_info) - Info for this GPE 472 * 473 * RETURN: None 474 * 475 * DESCRIPTION: Perform the actual execution of a GPE control method. This 476 * function is called from an invocation of acpi_os_execute and 477 * therefore does NOT execute at interrupt level - so that 478 * the control method itself is not executed in the context of 479 * an interrupt handler. 480 * 481 ******************************************************************************/ 482 483 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 484 { 485 struct acpi_gpe_event_info *gpe_event_info = context; 486 acpi_status status; 487 struct acpi_gpe_event_info *local_gpe_event_info; 488 struct acpi_evaluate_info *info; 489 struct acpi_gpe_notify_info *notify; 490 491 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 492 493 /* Allocate a local GPE block */ 494 495 local_gpe_event_info = 496 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); 497 if (!local_gpe_event_info) { 498 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); 499 return_VOID; 500 } 501 502 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 503 if (ACPI_FAILURE(status)) { 504 ACPI_FREE(local_gpe_event_info); 505 return_VOID; 506 } 507 508 /* Must revalidate the gpe_number/gpe_block */ 509 510 if (!acpi_ev_valid_gpe_event(gpe_event_info)) { 511 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 512 ACPI_FREE(local_gpe_event_info); 513 return_VOID; 514 } 515 516 /* 517 * Take a snapshot of the GPE info for this level - we copy the info to 518 * prevent a race condition with remove_handler/remove_block. 519 */ 520 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, 521 sizeof(struct acpi_gpe_event_info)); 522 523 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 524 if (ACPI_FAILURE(status)) { 525 ACPI_FREE(local_gpe_event_info); 526 return_VOID; 527 } 528 529 /* Do the correct dispatch - normal method or implicit notify */ 530 531 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 532 case ACPI_GPE_DISPATCH_NOTIFY: 533 /* 534 * Implicit notify. 535 * Dispatch a DEVICE_WAKE notify to the appropriate handler. 536 * NOTE: the request is queued for execution after this method 537 * completes. The notify handlers are NOT invoked synchronously 538 * from this thread -- because handlers may in turn run other 539 * control methods. 540 * 541 * June 2012: Expand implicit notify mechanism to support 542 * notifies on multiple device objects. 543 */ 544 notify = local_gpe_event_info->dispatch.notify_list; 545 while (ACPI_SUCCESS(status) && notify) { 546 status = 547 acpi_ev_queue_notify_request(notify->device_node, 548 ACPI_NOTIFY_DEVICE_WAKE); 549 550 notify = notify->next; 551 } 552 553 break; 554 555 case ACPI_GPE_DISPATCH_METHOD: 556 557 /* Allocate the evaluation information block */ 558 559 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 560 if (!info) { 561 status = AE_NO_MEMORY; 562 } else { 563 /* 564 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the 565 * _Lxx/_Exx control method that corresponds to this GPE 566 */ 567 info->prefix_node = 568 local_gpe_event_info->dispatch.method_node; 569 info->flags = ACPI_IGNORE_RETURN_VALUE; 570 571 status = acpi_ns_evaluate(info); 572 ACPI_FREE(info); 573 } 574 575 if (ACPI_FAILURE(status)) { 576 ACPI_EXCEPTION((AE_INFO, status, 577 "while evaluating GPE method [%4.4s]", 578 acpi_ut_get_node_name 579 (local_gpe_event_info->dispatch. 580 method_node))); 581 } 582 break; 583 584 default: 585 586 return_VOID; /* Should never happen */ 587 } 588 589 /* Defer enabling of GPE until all notify handlers are done */ 590 591 status = acpi_os_execute(OSL_NOTIFY_HANDLER, 592 acpi_ev_asynch_enable_gpe, 593 local_gpe_event_info); 594 if (ACPI_FAILURE(status)) { 595 ACPI_FREE(local_gpe_event_info); 596 } 597 return_VOID; 598 } 599 600 601 /******************************************************************************* 602 * 603 * FUNCTION: acpi_ev_asynch_enable_gpe 604 * 605 * PARAMETERS: Context (gpe_event_info) - Info for this GPE 606 * Callback from acpi_os_execute 607 * 608 * RETURN: None 609 * 610 * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to 611 * complete (i.e., finish execution of Notify) 612 * 613 ******************************************************************************/ 614 615 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) 616 { 617 struct acpi_gpe_event_info *gpe_event_info = context; 618 619 (void)acpi_ev_finish_gpe(gpe_event_info); 620 621 ACPI_FREE(gpe_event_info); 622 return; 623 } 624 625 626 /******************************************************************************* 627 * 628 * FUNCTION: acpi_ev_finish_gpe 629 * 630 * PARAMETERS: gpe_event_info - Info for this GPE 631 * 632 * RETURN: Status 633 * 634 * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution 635 * of a GPE method or a synchronous or asynchronous GPE handler. 636 * 637 ******************************************************************************/ 638 639 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) 640 { 641 acpi_status status; 642 643 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 644 ACPI_GPE_LEVEL_TRIGGERED) { 645 /* 646 * GPE is level-triggered, we clear the GPE status bit after 647 * handling the event. 648 */ 649 status = acpi_hw_clear_gpe(gpe_event_info); 650 if (ACPI_FAILURE(status)) { 651 return (status); 652 } 653 } 654 655 /* 656 * Enable this GPE, conditionally. This means that the GPE will 657 * only be physically enabled if the enable_for_run bit is set 658 * in the event_info. 659 */ 660 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); 661 return (AE_OK); 662 } 663 664 665 /******************************************************************************* 666 * 667 * FUNCTION: acpi_ev_gpe_dispatch 668 * 669 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 670 * gpe_event_info - Info for this GPE 671 * gpe_number - Number relative to the parent GPE block 672 * 673 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 674 * 675 * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) 676 * or method (e.g. _Lxx/_Exx) handler. 677 * 678 * This function executes at interrupt level. 679 * 680 ******************************************************************************/ 681 682 u32 683 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 684 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 685 { 686 acpi_status status; 687 u32 return_value; 688 689 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 690 691 /* Invoke global event handler if present */ 692 693 acpi_gpe_count++; 694 if (acpi_gbl_global_event_handler) { 695 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, 696 gpe_number, 697 acpi_gbl_global_event_handler_context); 698 } 699 700 /* 701 * Always disable the GPE so that it does not keep firing before 702 * any asynchronous activity completes (either from the execution 703 * of a GPE method or an asynchronous GPE handler.) 704 * 705 * If there is no handler or method to run, just disable the 706 * GPE and leave it disabled permanently to prevent further such 707 * pointless events from firing. 708 */ 709 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 710 if (ACPI_FAILURE(status)) { 711 ACPI_EXCEPTION((AE_INFO, status, 712 "Unable to disable GPE %02X", gpe_number)); 713 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 714 } 715 716 /* 717 * If edge-triggered, clear the GPE status bit now. Note that 718 * level-triggered events are cleared after the GPE is serviced. 719 */ 720 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 721 ACPI_GPE_EDGE_TRIGGERED) { 722 status = acpi_hw_clear_gpe(gpe_event_info); 723 if (ACPI_FAILURE(status)) { 724 ACPI_EXCEPTION((AE_INFO, status, 725 "Unable to clear GPE %02X", 726 gpe_number)); 727 (void)acpi_hw_low_set_gpe(gpe_event_info, 728 ACPI_GPE_CONDITIONAL_ENABLE); 729 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 730 } 731 } 732 733 /* 734 * Dispatch the GPE to either an installed handler or the control 735 * method associated with this GPE (_Lxx or _Exx). If a handler 736 * exists, we invoke it and do not attempt to run the method. 737 * If there is neither a handler nor a method, leave the GPE 738 * disabled. 739 */ 740 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 741 case ACPI_GPE_DISPATCH_HANDLER: 742 743 /* Invoke the installed handler (at interrupt level) */ 744 745 return_value = 746 gpe_event_info->dispatch.handler->address(gpe_device, 747 gpe_number, 748 gpe_event_info-> 749 dispatch.handler-> 750 context); 751 752 /* If requested, clear (if level-triggered) and reenable the GPE */ 753 754 if (return_value & ACPI_REENABLE_GPE) { 755 (void)acpi_ev_finish_gpe(gpe_event_info); 756 } 757 break; 758 759 case ACPI_GPE_DISPATCH_METHOD: 760 case ACPI_GPE_DISPATCH_NOTIFY: 761 /* 762 * Execute the method associated with the GPE 763 * NOTE: Level-triggered GPEs are cleared after the method completes. 764 */ 765 status = acpi_os_execute(OSL_GPE_HANDLER, 766 acpi_ev_asynch_execute_gpe_method, 767 gpe_event_info); 768 if (ACPI_FAILURE(status)) { 769 ACPI_EXCEPTION((AE_INFO, status, 770 "Unable to queue handler for GPE %02X - event disabled", 771 gpe_number)); 772 } 773 break; 774 775 default: 776 /* 777 * No handler or method to run! 778 * 03/2010: This case should no longer be possible. We will not allow 779 * a GPE to be enabled if it has no handler or method. 780 */ 781 ACPI_ERROR((AE_INFO, 782 "No handler or method for GPE %02X, disabling event", 783 gpe_number)); 784 785 break; 786 } 787 788 return_UINT32(ACPI_INTERRUPT_HANDLED); 789 } 790 791 #endif /* !ACPI_REDUCED_HARDWARE */ 792