xref: /linux/drivers/acpi/acpica/evgpeblk.c (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: evgpeblk - GPE block creation and initialization.
5  *
6  * Copyright (C) 2000 - 2023, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
14 
15 #define _COMPONENT          ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpeblk")
17 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
18 /* Local prototypes */
19 static acpi_status
20 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
21 			  u32 interrupt_number);
22 
23 static acpi_status
24 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
25 
26 /*******************************************************************************
27  *
28  * FUNCTION:    acpi_ev_install_gpe_block
29  *
30  * PARAMETERS:  gpe_block               - New GPE block
31  *              interrupt_number        - Xrupt to be associated with this
32  *                                        GPE block
33  *
34  * RETURN:      Status
35  *
36  * DESCRIPTION: Install new GPE block with mutex support
37  *
38  ******************************************************************************/
39 
40 static acpi_status
41 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
42 			  u32 interrupt_number)
43 {
44 	struct acpi_gpe_block_info *next_gpe_block;
45 	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
46 	acpi_status status;
47 	acpi_cpu_flags flags;
48 
49 	ACPI_FUNCTION_TRACE(ev_install_gpe_block);
50 
51 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
52 	if (ACPI_FAILURE(status)) {
53 		return_ACPI_STATUS(status);
54 	}
55 
56 	status =
57 	    acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
58 	if (ACPI_FAILURE(status)) {
59 		goto unlock_and_exit;
60 	}
61 
62 	/* Install the new block at the end of the list with lock */
63 
64 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
65 	if (gpe_xrupt_block->gpe_block_list_head) {
66 		next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
67 		while (next_gpe_block->next) {
68 			next_gpe_block = next_gpe_block->next;
69 		}
70 
71 		next_gpe_block->next = gpe_block;
72 		gpe_block->previous = next_gpe_block;
73 	} else {
74 		gpe_xrupt_block->gpe_block_list_head = gpe_block;
75 	}
76 
77 	gpe_block->xrupt_block = gpe_xrupt_block;
78 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
79 
80 unlock_and_exit:
81 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
82 	return_ACPI_STATUS(status);
83 }
84 
85 /*******************************************************************************
86  *
87  * FUNCTION:    acpi_ev_delete_gpe_block
88  *
89  * PARAMETERS:  gpe_block           - Existing GPE block
90  *
91  * RETURN:      Status
92  *
93  * DESCRIPTION: Remove a GPE block
94  *
95  ******************************************************************************/
96 
97 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
98 {
99 	acpi_status status;
100 	acpi_cpu_flags flags;
101 
102 	ACPI_FUNCTION_TRACE(ev_install_gpe_block);
103 
104 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
105 	if (ACPI_FAILURE(status)) {
106 		return_ACPI_STATUS(status);
107 	}
108 
109 	/* Disable all GPEs in this block */
110 
111 	status =
112 	    acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
113 	if (ACPI_FAILURE(status)) {
114 		return_ACPI_STATUS(status);
115 	}
116 
117 	if (!gpe_block->previous && !gpe_block->next) {
118 
119 		/* This is the last gpe_block on this interrupt */
120 
121 		status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
122 		if (ACPI_FAILURE(status)) {
123 			goto unlock_and_exit;
124 		}
125 	} else {
126 		/* Remove the block on this interrupt with lock */
127 
128 		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
129 		if (gpe_block->previous) {
130 			gpe_block->previous->next = gpe_block->next;
131 		} else {
132 			gpe_block->xrupt_block->gpe_block_list_head =
133 			    gpe_block->next;
134 		}
135 
136 		if (gpe_block->next) {
137 			gpe_block->next->previous = gpe_block->previous;
138 		}
139 
140 		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
141 	}
142 
143 	acpi_current_gpe_count -= gpe_block->gpe_count;
144 
145 	/* Free the gpe_block */
146 
147 	ACPI_FREE(gpe_block->register_info);
148 	ACPI_FREE(gpe_block->event_info);
149 	ACPI_FREE(gpe_block);
150 
151 unlock_and_exit:
152 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
153 	return_ACPI_STATUS(status);
154 }
155 
156 /*******************************************************************************
157  *
158  * FUNCTION:    acpi_ev_create_gpe_info_blocks
159  *
160  * PARAMETERS:  gpe_block   - New GPE block
161  *
162  * RETURN:      Status
163  *
164  * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
165  *
166  ******************************************************************************/
167 
168 static acpi_status
169 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
170 {
171 	struct acpi_gpe_register_info *gpe_register_info = NULL;
172 	struct acpi_gpe_event_info *gpe_event_info = NULL;
173 	struct acpi_gpe_event_info *this_event;
174 	struct acpi_gpe_register_info *this_register;
175 	u32 i;
176 	u32 j;
177 	acpi_status status;
178 
179 	ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
180 
181 	/* Allocate the GPE register information block */
182 
183 	gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
184 						 register_count *
185 						 sizeof(struct
186 							acpi_gpe_register_info));
187 	if (!gpe_register_info) {
188 		ACPI_ERROR((AE_INFO,
189 			    "Could not allocate the GpeRegisterInfo table"));
190 		return_ACPI_STATUS(AE_NO_MEMORY);
191 	}
192 
193 	/*
194 	 * Allocate the GPE event_info block. There are eight distinct GPEs
195 	 * per register. Initialization to zeros is sufficient.
196 	 */
197 	gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
198 					      sizeof(struct
199 						     acpi_gpe_event_info));
200 	if (!gpe_event_info) {
201 		ACPI_ERROR((AE_INFO,
202 			    "Could not allocate the GpeEventInfo table"));
203 		status = AE_NO_MEMORY;
204 		goto error_exit;
205 	}
206 
207 	/* Save the new Info arrays in the GPE block */
208 
209 	gpe_block->register_info = gpe_register_info;
210 	gpe_block->event_info = gpe_event_info;
211 
212 	/*
213 	 * Initialize the GPE Register and Event structures. A goal of these
214 	 * tables is to hide the fact that there are two separate GPE register
215 	 * sets in a given GPE hardware block, the status registers occupy the
216 	 * first half, and the enable registers occupy the second half.
217 	 */
218 	this_register = gpe_register_info;
219 	this_event = gpe_event_info;
220 
221 	for (i = 0; i < gpe_block->register_count; i++) {
222 
223 		/* Init the register_info for this GPE register (8 GPEs) */
224 
225 		this_register->base_gpe_number = (u16)
226 		    (gpe_block->block_base_number +
227 		     (i * ACPI_GPE_REGISTER_WIDTH));
228 
229 		this_register->status_address.address = gpe_block->address + i;
230 
231 		this_register->enable_address.address =
232 		    gpe_block->address + i + gpe_block->register_count;
233 
234 		this_register->status_address.space_id = gpe_block->space_id;
235 		this_register->enable_address.space_id = gpe_block->space_id;
236 
237 		/* Init the event_info for each GPE within this register */
238 
239 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
240 			this_event->gpe_number =
241 			    (u8) (this_register->base_gpe_number + j);
242 			this_event->register_info = this_register;
243 			this_event++;
244 		}
245 
246 		/* Disable all GPEs within this register */
247 
248 		status = acpi_hw_gpe_write(0x00, &this_register->enable_address);
249 		if (ACPI_FAILURE(status)) {
250 			goto error_exit;
251 		}
252 
253 		/* Clear any pending GPE events within this register */
254 
255 		status = acpi_hw_gpe_write(0xFF, &this_register->status_address);
256 		if (ACPI_FAILURE(status)) {
257 			goto error_exit;
258 		}
259 
260 		this_register++;
261 	}
262 
263 	return_ACPI_STATUS(AE_OK);
264 
265 error_exit:
266 	if (gpe_register_info) {
267 		ACPI_FREE(gpe_register_info);
268 	}
269 	if (gpe_event_info) {
270 		ACPI_FREE(gpe_event_info);
271 	}
272 
273 	return_ACPI_STATUS(status);
274 }
275 
276 /*******************************************************************************
277  *
278  * FUNCTION:    acpi_ev_create_gpe_block
279  *
280  * PARAMETERS:  gpe_device          - Handle to the parent GPE block
281  *              gpe_block_address   - Address and space_ID
282  *              register_count      - Number of GPE register pairs in the block
283  *              gpe_block_base_number - Starting GPE number for the block
284  *              interrupt_number    - H/W interrupt for the block
285  *              return_gpe_block    - Where the new block descriptor is returned
286  *
287  * RETURN:      Status
288  *
289  * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
290  *              the block are disabled at exit.
291  *              Note: Assumes namespace is locked.
292  *
293  ******************************************************************************/
294 
295 acpi_status
296 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
297 			 u64 address,
298 			 u8 space_id,
299 			 u32 register_count,
300 			 u16 gpe_block_base_number,
301 			 u32 interrupt_number,
302 			 struct acpi_gpe_block_info **return_gpe_block)
303 {
304 	acpi_status status;
305 	struct acpi_gpe_block_info *gpe_block;
306 	struct acpi_gpe_walk_info walk_info;
307 
308 	ACPI_FUNCTION_TRACE(ev_create_gpe_block);
309 
310 	if (!register_count) {
311 		return_ACPI_STATUS(AE_OK);
312 	}
313 
314 	/* Validate the space_ID */
315 
316 	if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
317 	    (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
318 		ACPI_ERROR((AE_INFO,
319 			    "Unsupported address space: 0x%X", space_id));
320 		return_ACPI_STATUS(AE_SUPPORT);
321 	}
322 
323 	if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
324 		status = acpi_hw_validate_io_block(address,
325 						   ACPI_GPE_REGISTER_WIDTH,
326 						   register_count);
327 		if (ACPI_FAILURE(status))
328 			return_ACPI_STATUS(status);
329 	}
330 
331 	/* Allocate a new GPE block */
332 
333 	gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
334 	if (!gpe_block) {
335 		return_ACPI_STATUS(AE_NO_MEMORY);
336 	}
337 
338 	/* Initialize the new GPE block */
339 
340 	gpe_block->address = address;
341 	gpe_block->space_id = space_id;
342 	gpe_block->node = gpe_device;
343 	gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
344 	gpe_block->initialized = FALSE;
345 	gpe_block->register_count = register_count;
346 	gpe_block->block_base_number = gpe_block_base_number;
347 
348 	/*
349 	 * Create the register_info and event_info sub-structures
350 	 * Note: disables and clears all GPEs in the block
351 	 */
352 	status = acpi_ev_create_gpe_info_blocks(gpe_block);
353 	if (ACPI_FAILURE(status)) {
354 		ACPI_FREE(gpe_block);
355 		return_ACPI_STATUS(status);
356 	}
357 
358 	/* Install the new block in the global lists */
359 
360 	status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
361 	if (ACPI_FAILURE(status)) {
362 		ACPI_FREE(gpe_block->register_info);
363 		ACPI_FREE(gpe_block->event_info);
364 		ACPI_FREE(gpe_block);
365 		return_ACPI_STATUS(status);
366 	}
367 
368 	acpi_gbl_all_gpes_initialized = FALSE;
369 
370 	/* Find all GPE methods (_Lxx or_Exx) for this block */
371 
372 	walk_info.gpe_block = gpe_block;
373 	walk_info.gpe_device = gpe_device;
374 	walk_info.execute_by_owner_id = FALSE;
375 
376 	(void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
377 				     ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
378 				     acpi_ev_match_gpe_method, NULL, &walk_info,
379 				     NULL);
380 
381 	/* Return the new block */
382 
383 	if (return_gpe_block) {
384 		(*return_gpe_block) = gpe_block;
385 	}
386 
387 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
388 			      "    Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n",
389 			      (u32)gpe_block->block_base_number,
390 			      (u32)(gpe_block->block_base_number +
391 				    (gpe_block->gpe_count - 1)),
392 			      gpe_device->name.ascii, gpe_block->register_count,
393 			      interrupt_number,
394 			      interrupt_number ==
395 			      acpi_gbl_FADT.sci_interrupt ? " (SCI)" : ""));
396 
397 	/* Update global count of currently available GPEs */
398 
399 	acpi_current_gpe_count += gpe_block->gpe_count;
400 	return_ACPI_STATUS(AE_OK);
401 }
402 
403 /*******************************************************************************
404  *
405  * FUNCTION:    acpi_ev_initialize_gpe_block
406  *
407  * PARAMETERS:  acpi_gpe_callback
408  *
409  * RETURN:      Status
410  *
411  * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
412  *              associated methods.
413  *              Note: Assumes namespace is locked.
414  *
415  ******************************************************************************/
416 
417 acpi_status
418 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
419 			     struct acpi_gpe_block_info *gpe_block,
420 			     void *context)
421 {
422 	acpi_status status;
423 	struct acpi_gpe_event_info *gpe_event_info;
424 	u32 gpe_enabled_count;
425 	u32 gpe_index;
426 	u32 i;
427 	u32 j;
428 	u8 *is_polling_needed = context;
429 	ACPI_ERROR_ONLY(u32 gpe_number);
430 
431 	ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
432 
433 	/*
434 	 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
435 	 * any GPE blocks that have been initialized already.
436 	 */
437 	if (!gpe_block || gpe_block->initialized) {
438 		return_ACPI_STATUS(AE_OK);
439 	}
440 
441 	/*
442 	 * Enable all GPEs that have a corresponding method and have the
443 	 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
444 	 * must be enabled via the acpi_enable_gpe() interface.
445 	 */
446 	gpe_enabled_count = 0;
447 
448 	for (i = 0; i < gpe_block->register_count; i++) {
449 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
450 
451 			/* Get the info block for this particular GPE */
452 
453 			gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
454 			gpe_event_info = &gpe_block->event_info[gpe_index];
455 			ACPI_ERROR_ONLY(gpe_number =
456 					gpe_block->block_base_number +
457 					gpe_index);
458 			gpe_event_info->flags |= ACPI_GPE_INITIALIZED;
459 
460 			/*
461 			 * Ignore GPEs that have no corresponding _Lxx/_Exx method
462 			 * and GPEs that are used for wakeup
463 			 */
464 			if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
465 			     ACPI_GPE_DISPATCH_METHOD)
466 			    || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
467 				continue;
468 			}
469 
470 			status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
471 			if (ACPI_FAILURE(status)) {
472 				ACPI_EXCEPTION((AE_INFO, status,
473 					"Could not enable GPE 0x%02X",
474 					gpe_number));
475 				continue;
476 			}
477 
478 			gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
479 
480 			if (is_polling_needed &&
481 			    ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
482 				*is_polling_needed = TRUE;
483 			}
484 
485 			gpe_enabled_count++;
486 		}
487 	}
488 
489 	if (gpe_enabled_count) {
490 		ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
491 			   gpe_enabled_count, (u32)gpe_block->block_base_number,
492 			   (u32)(gpe_block->block_base_number +
493 				 (gpe_block->gpe_count - 1))));
494 	}
495 
496 	gpe_block->initialized = TRUE;
497 
498 	return_ACPI_STATUS(AE_OK);
499 }
500 
501 #endif				/* !ACPI_REDUCED_HARDWARE */
502