xref: /linux/drivers/acpi/acpica/evgpe.c (revision bf070bb0e6c62ba3075db0a666763ba52c677102)
1 /******************************************************************************
2  *
3  * Module Name: evgpe - General Purpose Event handling and dispatch
4  *
5  *****************************************************************************/
6 
7 /*
8  * Copyright (C) 2000 - 2017, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43 
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 #include "acnamesp.h"
48 
49 #define _COMPONENT          ACPI_EVENTS
50 ACPI_MODULE_NAME("evgpe")
51 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
52 /* Local prototypes */
53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54 
55 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56 
57 /*******************************************************************************
58  *
59  * FUNCTION:    acpi_ev_update_gpe_enable_mask
60  *
61  * PARAMETERS:  gpe_event_info          - GPE to update
62  *
63  * RETURN:      Status
64  *
65  * DESCRIPTION: Updates GPE register enable mask based upon whether there are
66  *              runtime references to this GPE
67  *
68  ******************************************************************************/
69 
70 acpi_status
71 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
72 {
73 	struct acpi_gpe_register_info *gpe_register_info;
74 	u32 register_bit;
75 
76 	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
77 
78 	gpe_register_info = gpe_event_info->register_info;
79 	if (!gpe_register_info) {
80 		return_ACPI_STATUS(AE_NOT_EXIST);
81 	}
82 
83 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
84 
85 	/* Clear the run bit up front */
86 
87 	ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
88 
89 	/* Set the mask bit only if there are references to this GPE */
90 
91 	if (gpe_event_info->runtime_count) {
92 		ACPI_SET_BIT(gpe_register_info->enable_for_run,
93 			     (u8)register_bit);
94 	}
95 
96 	gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
97 	return_ACPI_STATUS(AE_OK);
98 }
99 
100 /*******************************************************************************
101  *
102  * FUNCTION:    acpi_ev_enable_gpe
103  *
104  * PARAMETERS:  gpe_event_info          - GPE to enable
105  *
106  * RETURN:      Status
107  *
108  * DESCRIPTION: Clear a GPE of stale events and enable it.
109  *
110  ******************************************************************************/
111 
112 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
113 {
114 	acpi_status status;
115 
116 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
117 
118 	/* Clear the GPE (of stale events) */
119 
120 	status = acpi_hw_clear_gpe(gpe_event_info);
121 	if (ACPI_FAILURE(status)) {
122 		return_ACPI_STATUS(status);
123 	}
124 
125 	/* Enable the requested GPE */
126 
127 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
128 	return_ACPI_STATUS(status);
129 }
130 
131 /*******************************************************************************
132  *
133  * FUNCTION:    acpi_ev_mask_gpe
134  *
135  * PARAMETERS:  gpe_event_info          - GPE to be blocked/unblocked
136  *              is_masked               - Whether the GPE is masked or not
137  *
138  * RETURN:      Status
139  *
140  * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
141  *
142  ******************************************************************************/
143 
144 acpi_status
145 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
146 {
147 	struct acpi_gpe_register_info *gpe_register_info;
148 	u32 register_bit;
149 
150 	ACPI_FUNCTION_TRACE(ev_mask_gpe);
151 
152 	gpe_register_info = gpe_event_info->register_info;
153 	if (!gpe_register_info) {
154 		return_ACPI_STATUS(AE_NOT_EXIST);
155 	}
156 
157 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
158 
159 	/* Perform the action */
160 
161 	if (is_masked) {
162 		if (register_bit & gpe_register_info->mask_for_run) {
163 			return_ACPI_STATUS(AE_BAD_PARAMETER);
164 		}
165 
166 		(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
167 		ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
168 	} else {
169 		if (!(register_bit & gpe_register_info->mask_for_run)) {
170 			return_ACPI_STATUS(AE_BAD_PARAMETER);
171 		}
172 
173 		ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
174 			       (u8)register_bit);
175 		if (gpe_event_info->runtime_count
176 		    && !gpe_event_info->disable_for_dispatch) {
177 			(void)acpi_hw_low_set_gpe(gpe_event_info,
178 						  ACPI_GPE_ENABLE);
179 		}
180 	}
181 
182 	return_ACPI_STATUS(AE_OK);
183 }
184 
185 /*******************************************************************************
186  *
187  * FUNCTION:    acpi_ev_add_gpe_reference
188  *
189  * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
190  *
191  * RETURN:      Status
192  *
193  * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
194  *              hardware-enabled.
195  *
196  ******************************************************************************/
197 
198 acpi_status
199 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
200 {
201 	acpi_status status = AE_OK;
202 
203 	ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
204 
205 	if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
206 		return_ACPI_STATUS(AE_LIMIT);
207 	}
208 
209 	gpe_event_info->runtime_count++;
210 	if (gpe_event_info->runtime_count == 1) {
211 
212 		/* Enable on first reference */
213 
214 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
215 		if (ACPI_SUCCESS(status)) {
216 			status = acpi_ev_enable_gpe(gpe_event_info);
217 		}
218 
219 		if (ACPI_FAILURE(status)) {
220 			gpe_event_info->runtime_count--;
221 		}
222 	}
223 
224 	return_ACPI_STATUS(status);
225 }
226 
227 /*******************************************************************************
228  *
229  * FUNCTION:    acpi_ev_remove_gpe_reference
230  *
231  * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
232  *
233  * RETURN:      Status
234  *
235  * DESCRIPTION: Remove a reference to a GPE. When the last reference is
236  *              removed, the GPE is hardware-disabled.
237  *
238  ******************************************************************************/
239 
240 acpi_status
241 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
242 {
243 	acpi_status status = AE_OK;
244 
245 	ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
246 
247 	if (!gpe_event_info->runtime_count) {
248 		return_ACPI_STATUS(AE_LIMIT);
249 	}
250 
251 	gpe_event_info->runtime_count--;
252 	if (!gpe_event_info->runtime_count) {
253 
254 		/* Disable on last reference */
255 
256 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
257 		if (ACPI_SUCCESS(status)) {
258 			status =
259 			    acpi_hw_low_set_gpe(gpe_event_info,
260 						ACPI_GPE_DISABLE);
261 		}
262 
263 		if (ACPI_FAILURE(status)) {
264 			gpe_event_info->runtime_count++;
265 		}
266 	}
267 
268 	return_ACPI_STATUS(status);
269 }
270 
271 /*******************************************************************************
272  *
273  * FUNCTION:    acpi_ev_low_get_gpe_info
274  *
275  * PARAMETERS:  gpe_number          - Raw GPE number
276  *              gpe_block           - A GPE info block
277  *
278  * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
279  *              is not within the specified GPE block)
280  *
281  * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
282  *              the low-level implementation of ev_get_gpe_event_info.
283  *
284  ******************************************************************************/
285 
286 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
287 						     struct acpi_gpe_block_info
288 						     *gpe_block)
289 {
290 	u32 gpe_index;
291 
292 	/*
293 	 * Validate that the gpe_number is within the specified gpe_block.
294 	 * (Two steps)
295 	 */
296 	if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
297 		return (NULL);
298 	}
299 
300 	gpe_index = gpe_number - gpe_block->block_base_number;
301 	if (gpe_index >= gpe_block->gpe_count) {
302 		return (NULL);
303 	}
304 
305 	return (&gpe_block->event_info[gpe_index]);
306 }
307 
308 
309 /*******************************************************************************
310  *
311  * FUNCTION:    acpi_ev_get_gpe_event_info
312  *
313  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
314  *              gpe_number          - Raw GPE number
315  *
316  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
317  *
318  * DESCRIPTION: Returns the event_info struct associated with this GPE.
319  *              Validates the gpe_block and the gpe_number
320  *
321  *              Should be called only when the GPE lists are semaphore locked
322  *              and not subject to change.
323  *
324  ******************************************************************************/
325 
326 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
327 						       u32 gpe_number)
328 {
329 	union acpi_operand_object *obj_desc;
330 	struct acpi_gpe_event_info *gpe_info;
331 	u32 i;
332 
333 	ACPI_FUNCTION_ENTRY();
334 
335 	/* A NULL gpe_device means use the FADT-defined GPE block(s) */
336 
337 	if (!gpe_device) {
338 
339 		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
340 
341 		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
342 			gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
343 							    acpi_gbl_gpe_fadt_blocks
344 							    [i]);
345 			if (gpe_info) {
346 				return (gpe_info);
347 			}
348 		}
349 
350 		/* The gpe_number was not in the range of either FADT GPE block */
351 
352 		return (NULL);
353 	}
354 
355 	/* A Non-NULL gpe_device means this is a GPE Block Device */
356 
357 	obj_desc =
358 	    acpi_ns_get_attached_object((struct acpi_namespace_node *)
359 					       gpe_device);
360 	if (!obj_desc || !obj_desc->device.gpe_block) {
361 		return (NULL);
362 	}
363 
364 	return (acpi_ev_low_get_gpe_info
365 		(gpe_number, obj_desc->device.gpe_block));
366 }
367 
368 /*******************************************************************************
369  *
370  * FUNCTION:    acpi_ev_gpe_detect
371  *
372  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
373  *                                    Can have multiple GPE blocks attached.
374  *
375  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
376  *
377  * DESCRIPTION: Detect if any GP events have occurred. This function is
378  *              executed at interrupt level.
379  *
380  ******************************************************************************/
381 
382 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
383 {
384 	acpi_status status;
385 	struct acpi_gpe_block_info *gpe_block;
386 	struct acpi_namespace_node *gpe_device;
387 	struct acpi_gpe_register_info *gpe_register_info;
388 	struct acpi_gpe_event_info *gpe_event_info;
389 	u32 gpe_number;
390 	struct acpi_gpe_handler_info *gpe_handler_info;
391 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
392 	u8 enabled_status_byte;
393 	u64 status_reg;
394 	u64 enable_reg;
395 	acpi_cpu_flags flags;
396 	u32 i;
397 	u32 j;
398 
399 	ACPI_FUNCTION_NAME(ev_gpe_detect);
400 
401 	/* Check for the case where there are no GPEs */
402 
403 	if (!gpe_xrupt_list) {
404 		return (int_status);
405 	}
406 
407 	/*
408 	 * We need to obtain the GPE lock for both the data structs and registers
409 	 * Note: Not necessary to obtain the hardware lock, since the GPE
410 	 * registers are owned by the gpe_lock.
411 	 */
412 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
413 
414 	/* Examine all GPE blocks attached to this interrupt level */
415 
416 	gpe_block = gpe_xrupt_list->gpe_block_list_head;
417 	while (gpe_block) {
418 		gpe_device = gpe_block->node;
419 
420 		/*
421 		 * Read all of the 8-bit GPE status and enable registers in this GPE
422 		 * block, saving all of them. Find all currently active GP events.
423 		 */
424 		for (i = 0; i < gpe_block->register_count; i++) {
425 
426 			/* Get the next status/enable pair */
427 
428 			gpe_register_info = &gpe_block->register_info[i];
429 
430 			/*
431 			 * Optimization: If there are no GPEs enabled within this
432 			 * register, we can safely ignore the entire register.
433 			 */
434 			if (!(gpe_register_info->enable_for_run |
435 			      gpe_register_info->enable_for_wake)) {
436 				ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
437 						  "Ignore disabled registers for GPE %02X-%02X: "
438 						  "RunEnable=%02X, WakeEnable=%02X\n",
439 						  gpe_register_info->
440 						  base_gpe_number,
441 						  gpe_register_info->
442 						  base_gpe_number +
443 						  (ACPI_GPE_REGISTER_WIDTH - 1),
444 						  gpe_register_info->
445 						  enable_for_run,
446 						  gpe_register_info->
447 						  enable_for_wake));
448 				continue;
449 			}
450 
451 			/* Read the Status Register */
452 
453 			status =
454 			    acpi_hw_read(&status_reg,
455 					 &gpe_register_info->status_address);
456 			if (ACPI_FAILURE(status)) {
457 				goto unlock_and_exit;
458 			}
459 
460 			/* Read the Enable Register */
461 
462 			status =
463 			    acpi_hw_read(&enable_reg,
464 					 &gpe_register_info->enable_address);
465 			if (ACPI_FAILURE(status)) {
466 				goto unlock_and_exit;
467 			}
468 
469 			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
470 					  "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, "
471 					  "RunEnable=%02X, WakeEnable=%02X\n",
472 					  gpe_register_info->base_gpe_number,
473 					  gpe_register_info->base_gpe_number +
474 					  (ACPI_GPE_REGISTER_WIDTH - 1),
475 					  (u32)status_reg, (u32)enable_reg,
476 					  gpe_register_info->enable_for_run,
477 					  gpe_register_info->enable_for_wake));
478 
479 			/* Check if there is anything active at all in this register */
480 
481 			enabled_status_byte = (u8)(status_reg & enable_reg);
482 			if (!enabled_status_byte) {
483 
484 				/* No active GPEs in this register, move on */
485 
486 				continue;
487 			}
488 
489 			/* Now look at the individual GPEs in this byte register */
490 
491 			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
492 
493 				/* Examine one GPE bit */
494 
495 				gpe_event_info =
496 				    &gpe_block->
497 				    event_info[((acpi_size)i *
498 						ACPI_GPE_REGISTER_WIDTH) + j];
499 				gpe_number =
500 				    j + gpe_register_info->base_gpe_number;
501 
502 				if (enabled_status_byte & (1 << j)) {
503 
504 					/* Invoke global event handler if present */
505 
506 					acpi_gpe_count++;
507 					if (acpi_gbl_global_event_handler) {
508 						acpi_gbl_global_event_handler
509 						    (ACPI_EVENT_TYPE_GPE,
510 						     gpe_device, gpe_number,
511 						     acpi_gbl_global_event_handler_context);
512 					}
513 
514 					/* Found an active GPE */
515 
516 					if (ACPI_GPE_DISPATCH_TYPE
517 					    (gpe_event_info->flags) ==
518 					    ACPI_GPE_DISPATCH_RAW_HANDLER) {
519 
520 						/* Dispatch the event to a raw handler */
521 
522 						gpe_handler_info =
523 						    gpe_event_info->dispatch.
524 						    handler;
525 
526 						/*
527 						 * There is no protection around the namespace node
528 						 * and the GPE handler to ensure a safe destruction
529 						 * because:
530 						 * 1. The namespace node is expected to always
531 						 *    exist after loading a table.
532 						 * 2. The GPE handler is expected to be flushed by
533 						 *    acpi_os_wait_events_complete() before the
534 						 *    destruction.
535 						 */
536 						acpi_os_release_lock
537 						    (acpi_gbl_gpe_lock, flags);
538 						int_status |=
539 						    gpe_handler_info->
540 						    address(gpe_device,
541 							    gpe_number,
542 							    gpe_handler_info->
543 							    context);
544 						flags =
545 						    acpi_os_acquire_lock
546 						    (acpi_gbl_gpe_lock);
547 					} else {
548 						/*
549 						 * Dispatch the event to a standard handler or
550 						 * method.
551 						 */
552 						int_status |=
553 						    acpi_ev_gpe_dispatch
554 						    (gpe_device, gpe_event_info,
555 						     gpe_number);
556 					}
557 				}
558 			}
559 		}
560 
561 		gpe_block = gpe_block->next;
562 	}
563 
564 unlock_and_exit:
565 
566 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
567 	return (int_status);
568 }
569 
570 /*******************************************************************************
571  *
572  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
573  *
574  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
575  *
576  * RETURN:      None
577  *
578  * DESCRIPTION: Perform the actual execution of a GPE control method. This
579  *              function is called from an invocation of acpi_os_execute and
580  *              therefore does NOT execute at interrupt level - so that
581  *              the control method itself is not executed in the context of
582  *              an interrupt handler.
583  *
584  ******************************************************************************/
585 
586 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
587 {
588 	struct acpi_gpe_event_info *gpe_event_info = context;
589 	acpi_status status = AE_OK;
590 	struct acpi_evaluate_info *info;
591 	struct acpi_gpe_notify_info *notify;
592 
593 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
594 
595 	/* Do the correct dispatch - normal method or implicit notify */
596 
597 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
598 	case ACPI_GPE_DISPATCH_NOTIFY:
599 		/*
600 		 * Implicit notify.
601 		 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
602 		 * NOTE: the request is queued for execution after this method
603 		 * completes. The notify handlers are NOT invoked synchronously
604 		 * from this thread -- because handlers may in turn run other
605 		 * control methods.
606 		 *
607 		 * June 2012: Expand implicit notify mechanism to support
608 		 * notifies on multiple device objects.
609 		 */
610 		notify = gpe_event_info->dispatch.notify_list;
611 		while (ACPI_SUCCESS(status) && notify) {
612 			status =
613 			    acpi_ev_queue_notify_request(notify->device_node,
614 							 ACPI_NOTIFY_DEVICE_WAKE);
615 
616 			notify = notify->next;
617 		}
618 
619 		break;
620 
621 	case ACPI_GPE_DISPATCH_METHOD:
622 
623 		/* Allocate the evaluation information block */
624 
625 		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
626 		if (!info) {
627 			status = AE_NO_MEMORY;
628 		} else {
629 			/*
630 			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
631 			 * _Lxx/_Exx control method that corresponds to this GPE
632 			 */
633 			info->prefix_node =
634 			    gpe_event_info->dispatch.method_node;
635 			info->flags = ACPI_IGNORE_RETURN_VALUE;
636 
637 			status = acpi_ns_evaluate(info);
638 			ACPI_FREE(info);
639 		}
640 
641 		if (ACPI_FAILURE(status)) {
642 			ACPI_EXCEPTION((AE_INFO, status,
643 					"while evaluating GPE method [%4.4s]",
644 					acpi_ut_get_node_name(gpe_event_info->
645 							      dispatch.
646 							      method_node)));
647 		}
648 		break;
649 
650 	default:
651 
652 		goto error_exit;	/* Should never happen */
653 	}
654 
655 	/* Defer enabling of GPE until all notify handlers are done */
656 
657 	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
658 				 acpi_ev_asynch_enable_gpe, gpe_event_info);
659 	if (ACPI_SUCCESS(status)) {
660 		return_VOID;
661 	}
662 
663 error_exit:
664 	acpi_ev_asynch_enable_gpe(gpe_event_info);
665 	return_VOID;
666 }
667 
668 
669 /*******************************************************************************
670  *
671  * FUNCTION:    acpi_ev_asynch_enable_gpe
672  *
673  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
674  *              Callback from acpi_os_execute
675  *
676  * RETURN:      None
677  *
678  * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
679  *              complete (i.e., finish execution of Notify)
680  *
681  ******************************************************************************/
682 
683 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
684 {
685 	struct acpi_gpe_event_info *gpe_event_info = context;
686 	acpi_cpu_flags flags;
687 
688 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
689 	(void)acpi_ev_finish_gpe(gpe_event_info);
690 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
691 
692 	return;
693 }
694 
695 
696 /*******************************************************************************
697  *
698  * FUNCTION:    acpi_ev_finish_gpe
699  *
700  * PARAMETERS:  gpe_event_info      - Info for this GPE
701  *
702  * RETURN:      Status
703  *
704  * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
705  *              of a GPE method or a synchronous or asynchronous GPE handler.
706  *
707  ******************************************************************************/
708 
709 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
710 {
711 	acpi_status status;
712 
713 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
714 	    ACPI_GPE_LEVEL_TRIGGERED) {
715 		/*
716 		 * GPE is level-triggered, we clear the GPE status bit after
717 		 * handling the event.
718 		 */
719 		status = acpi_hw_clear_gpe(gpe_event_info);
720 		if (ACPI_FAILURE(status)) {
721 			return (status);
722 		}
723 	}
724 
725 	/*
726 	 * Enable this GPE, conditionally. This means that the GPE will
727 	 * only be physically enabled if the enable_mask bit is set
728 	 * in the event_info.
729 	 */
730 	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
731 	gpe_event_info->disable_for_dispatch = FALSE;
732 	return (AE_OK);
733 }
734 
735 
736 /*******************************************************************************
737  *
738  * FUNCTION:    acpi_ev_gpe_dispatch
739  *
740  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
741  *              gpe_event_info      - Info for this GPE
742  *              gpe_number          - Number relative to the parent GPE block
743  *
744  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
745  *
746  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
747  *              or method (e.g. _Lxx/_Exx) handler.
748  *
749  *              This function executes at interrupt level.
750  *
751  ******************************************************************************/
752 
753 u32
754 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
755 		     struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
756 {
757 	acpi_status status;
758 	u32 return_value;
759 
760 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
761 
762 	/*
763 	 * Always disable the GPE so that it does not keep firing before
764 	 * any asynchronous activity completes (either from the execution
765 	 * of a GPE method or an asynchronous GPE handler.)
766 	 *
767 	 * If there is no handler or method to run, just disable the
768 	 * GPE and leave it disabled permanently to prevent further such
769 	 * pointless events from firing.
770 	 */
771 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
772 	if (ACPI_FAILURE(status)) {
773 		ACPI_EXCEPTION((AE_INFO, status,
774 				"Unable to disable GPE %02X", gpe_number));
775 		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
776 	}
777 
778 	/*
779 	 * If edge-triggered, clear the GPE status bit now. Note that
780 	 * level-triggered events are cleared after the GPE is serviced.
781 	 */
782 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
783 	    ACPI_GPE_EDGE_TRIGGERED) {
784 		status = acpi_hw_clear_gpe(gpe_event_info);
785 		if (ACPI_FAILURE(status)) {
786 			ACPI_EXCEPTION((AE_INFO, status,
787 					"Unable to clear GPE %02X",
788 					gpe_number));
789 			(void)acpi_hw_low_set_gpe(gpe_event_info,
790 						  ACPI_GPE_CONDITIONAL_ENABLE);
791 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
792 		}
793 	}
794 
795 	gpe_event_info->disable_for_dispatch = TRUE;
796 
797 	/*
798 	 * Dispatch the GPE to either an installed handler or the control
799 	 * method associated with this GPE (_Lxx or _Exx). If a handler
800 	 * exists, we invoke it and do not attempt to run the method.
801 	 * If there is neither a handler nor a method, leave the GPE
802 	 * disabled.
803 	 */
804 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
805 	case ACPI_GPE_DISPATCH_HANDLER:
806 
807 		/* Invoke the installed handler (at interrupt level) */
808 
809 		return_value =
810 		    gpe_event_info->dispatch.handler->address(gpe_device,
811 							      gpe_number,
812 							      gpe_event_info->
813 							      dispatch.handler->
814 							      context);
815 
816 		/* If requested, clear (if level-triggered) and reenable the GPE */
817 
818 		if (return_value & ACPI_REENABLE_GPE) {
819 			(void)acpi_ev_finish_gpe(gpe_event_info);
820 		}
821 		break;
822 
823 	case ACPI_GPE_DISPATCH_METHOD:
824 	case ACPI_GPE_DISPATCH_NOTIFY:
825 		/*
826 		 * Execute the method associated with the GPE
827 		 * NOTE: Level-triggered GPEs are cleared after the method completes.
828 		 */
829 		status = acpi_os_execute(OSL_GPE_HANDLER,
830 					 acpi_ev_asynch_execute_gpe_method,
831 					 gpe_event_info);
832 		if (ACPI_FAILURE(status)) {
833 			ACPI_EXCEPTION((AE_INFO, status,
834 					"Unable to queue handler for GPE %02X - event disabled",
835 					gpe_number));
836 		}
837 		break;
838 
839 	default:
840 		/*
841 		 * No handler or method to run!
842 		 * 03/2010: This case should no longer be possible. We will not allow
843 		 * a GPE to be enabled if it has no handler or method.
844 		 */
845 		ACPI_ERROR((AE_INFO,
846 			    "No handler or method for GPE %02X, disabling event",
847 			    gpe_number));
848 
849 		break;
850 	}
851 
852 	return_UINT32(ACPI_INTERRUPT_HANDLED);
853 }
854 
855 #endif				/* !ACPI_REDUCED_HARDWARE */
856