1 /****************************************************************************** 2 * 3 * Module Name: evgpeblk - GPE block creation and initialization. 4 * 5 *****************************************************************************/ 6 7 /* 8 * Copyright (C) 2000 - 2010, Intel Corp. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 3. Neither the names of the above-listed copyright holders nor the names 23 * of any contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * Alternatively, this software may be distributed under the terms of the 27 * GNU General Public License ("GPL") version 2 as published by the Free 28 * Software Foundation. 29 * 30 * NO WARRANTY 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGES. 42 */ 43 44 #include <acpi/acpi.h> 45 #include "accommon.h" 46 #include "acevents.h" 47 #include "acnamesp.h" 48 49 #define _COMPONENT ACPI_EVENTS 50 ACPI_MODULE_NAME("evgpeblk") 51 52 /* Local prototypes */ 53 static acpi_status 54 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 55 u32 interrupt_number); 56 57 static acpi_status 58 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block); 59 60 /******************************************************************************* 61 * 62 * FUNCTION: acpi_ev_install_gpe_block 63 * 64 * PARAMETERS: gpe_block - New GPE block 65 * interrupt_number - Xrupt to be associated with this 66 * GPE block 67 * 68 * RETURN: Status 69 * 70 * DESCRIPTION: Install new GPE block with mutex support 71 * 72 ******************************************************************************/ 73 74 static acpi_status 75 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 76 u32 interrupt_number) 77 { 78 struct acpi_gpe_block_info *next_gpe_block; 79 struct acpi_gpe_xrupt_info *gpe_xrupt_block; 80 acpi_status status; 81 acpi_cpu_flags flags; 82 83 ACPI_FUNCTION_TRACE(ev_install_gpe_block); 84 85 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 86 if (ACPI_FAILURE(status)) { 87 return_ACPI_STATUS(status); 88 } 89 90 gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number); 91 if (!gpe_xrupt_block) { 92 status = AE_NO_MEMORY; 93 goto unlock_and_exit; 94 } 95 96 /* Install the new block at the end of the list with lock */ 97 98 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 99 if (gpe_xrupt_block->gpe_block_list_head) { 100 next_gpe_block = gpe_xrupt_block->gpe_block_list_head; 101 while (next_gpe_block->next) { 102 next_gpe_block = next_gpe_block->next; 103 } 104 105 next_gpe_block->next = gpe_block; 106 gpe_block->previous = next_gpe_block; 107 } else { 108 gpe_xrupt_block->gpe_block_list_head = gpe_block; 109 } 110 111 gpe_block->xrupt_block = gpe_xrupt_block; 112 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 113 114 unlock_and_exit: 115 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 116 return_ACPI_STATUS(status); 117 } 118 119 /******************************************************************************* 120 * 121 * FUNCTION: acpi_ev_delete_gpe_block 122 * 123 * PARAMETERS: gpe_block - Existing GPE block 124 * 125 * RETURN: Status 126 * 127 * DESCRIPTION: Remove a GPE block 128 * 129 ******************************************************************************/ 130 131 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) 132 { 133 acpi_status status; 134 acpi_cpu_flags flags; 135 136 ACPI_FUNCTION_TRACE(ev_install_gpe_block); 137 138 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 139 if (ACPI_FAILURE(status)) { 140 return_ACPI_STATUS(status); 141 } 142 143 /* Disable all GPEs in this block */ 144 145 status = 146 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); 147 148 if (!gpe_block->previous && !gpe_block->next) { 149 150 /* This is the last gpe_block on this interrupt */ 151 152 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block); 153 if (ACPI_FAILURE(status)) { 154 goto unlock_and_exit; 155 } 156 } else { 157 /* Remove the block on this interrupt with lock */ 158 159 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 160 if (gpe_block->previous) { 161 gpe_block->previous->next = gpe_block->next; 162 } else { 163 gpe_block->xrupt_block->gpe_block_list_head = 164 gpe_block->next; 165 } 166 167 if (gpe_block->next) { 168 gpe_block->next->previous = gpe_block->previous; 169 } 170 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 171 } 172 173 acpi_current_gpe_count -= gpe_block->gpe_count; 174 175 /* Free the gpe_block */ 176 177 ACPI_FREE(gpe_block->register_info); 178 ACPI_FREE(gpe_block->event_info); 179 ACPI_FREE(gpe_block); 180 181 unlock_and_exit: 182 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 183 return_ACPI_STATUS(status); 184 } 185 186 /******************************************************************************* 187 * 188 * FUNCTION: acpi_ev_create_gpe_info_blocks 189 * 190 * PARAMETERS: gpe_block - New GPE block 191 * 192 * RETURN: Status 193 * 194 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block 195 * 196 ******************************************************************************/ 197 198 static acpi_status 199 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) 200 { 201 struct acpi_gpe_register_info *gpe_register_info = NULL; 202 struct acpi_gpe_event_info *gpe_event_info = NULL; 203 struct acpi_gpe_event_info *this_event; 204 struct acpi_gpe_register_info *this_register; 205 u32 i; 206 u32 j; 207 acpi_status status; 208 209 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); 210 211 /* Allocate the GPE register information block */ 212 213 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block-> 214 register_count * 215 sizeof(struct 216 acpi_gpe_register_info)); 217 if (!gpe_register_info) { 218 ACPI_ERROR((AE_INFO, 219 "Could not allocate the GpeRegisterInfo table")); 220 return_ACPI_STATUS(AE_NO_MEMORY); 221 } 222 223 /* 224 * Allocate the GPE event_info block. There are eight distinct GPEs 225 * per register. Initialization to zeros is sufficient. 226 */ 227 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count * 228 sizeof(struct 229 acpi_gpe_event_info)); 230 if (!gpe_event_info) { 231 ACPI_ERROR((AE_INFO, 232 "Could not allocate the GpeEventInfo table")); 233 status = AE_NO_MEMORY; 234 goto error_exit; 235 } 236 237 /* Save the new Info arrays in the GPE block */ 238 239 gpe_block->register_info = gpe_register_info; 240 gpe_block->event_info = gpe_event_info; 241 242 /* 243 * Initialize the GPE Register and Event structures. A goal of these 244 * tables is to hide the fact that there are two separate GPE register 245 * sets in a given GPE hardware block, the status registers occupy the 246 * first half, and the enable registers occupy the second half. 247 */ 248 this_register = gpe_register_info; 249 this_event = gpe_event_info; 250 251 for (i = 0; i < gpe_block->register_count; i++) { 252 253 /* Init the register_info for this GPE register (8 GPEs) */ 254 255 this_register->base_gpe_number = 256 (u8) (gpe_block->block_base_number + 257 (i * ACPI_GPE_REGISTER_WIDTH)); 258 259 this_register->status_address.address = 260 gpe_block->block_address.address + i; 261 262 this_register->enable_address.address = 263 gpe_block->block_address.address + i + 264 gpe_block->register_count; 265 266 this_register->status_address.space_id = 267 gpe_block->block_address.space_id; 268 this_register->enable_address.space_id = 269 gpe_block->block_address.space_id; 270 this_register->status_address.bit_width = 271 ACPI_GPE_REGISTER_WIDTH; 272 this_register->enable_address.bit_width = 273 ACPI_GPE_REGISTER_WIDTH; 274 this_register->status_address.bit_offset = 0; 275 this_register->enable_address.bit_offset = 0; 276 277 /* Init the event_info for each GPE within this register */ 278 279 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 280 this_event->gpe_number = 281 (u8) (this_register->base_gpe_number + j); 282 this_event->register_info = this_register; 283 this_event++; 284 } 285 286 /* Disable all GPEs within this register */ 287 288 status = acpi_hw_write(0x00, &this_register->enable_address); 289 if (ACPI_FAILURE(status)) { 290 goto error_exit; 291 } 292 293 /* Clear any pending GPE events within this register */ 294 295 status = acpi_hw_write(0xFF, &this_register->status_address); 296 if (ACPI_FAILURE(status)) { 297 goto error_exit; 298 } 299 300 this_register++; 301 } 302 303 return_ACPI_STATUS(AE_OK); 304 305 error_exit: 306 if (gpe_register_info) { 307 ACPI_FREE(gpe_register_info); 308 } 309 if (gpe_event_info) { 310 ACPI_FREE(gpe_event_info); 311 } 312 313 return_ACPI_STATUS(status); 314 } 315 316 /******************************************************************************* 317 * 318 * FUNCTION: acpi_ev_create_gpe_block 319 * 320 * PARAMETERS: gpe_device - Handle to the parent GPE block 321 * gpe_block_address - Address and space_iD 322 * register_count - Number of GPE register pairs in the block 323 * gpe_block_base_number - Starting GPE number for the block 324 * interrupt_number - H/W interrupt for the block 325 * return_gpe_block - Where the new block descriptor is returned 326 * 327 * RETURN: Status 328 * 329 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within 330 * the block are disabled at exit. 331 * Note: Assumes namespace is locked. 332 * 333 ******************************************************************************/ 334 335 acpi_status 336 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, 337 struct acpi_generic_address *gpe_block_address, 338 u32 register_count, 339 u8 gpe_block_base_number, 340 u32 interrupt_number, 341 struct acpi_gpe_block_info **return_gpe_block) 342 { 343 acpi_status status; 344 struct acpi_gpe_block_info *gpe_block; 345 struct acpi_gpe_walk_info walk_info; 346 347 ACPI_FUNCTION_TRACE(ev_create_gpe_block); 348 349 if (!register_count) { 350 return_ACPI_STATUS(AE_OK); 351 } 352 353 /* Allocate a new GPE block */ 354 355 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); 356 if (!gpe_block) { 357 return_ACPI_STATUS(AE_NO_MEMORY); 358 } 359 360 /* Initialize the new GPE block */ 361 362 gpe_block->node = gpe_device; 363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); 364 gpe_block->register_count = register_count; 365 gpe_block->block_base_number = gpe_block_base_number; 366 367 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, 368 sizeof(struct acpi_generic_address)); 369 370 /* 371 * Create the register_info and event_info sub-structures 372 * Note: disables and clears all GPEs in the block 373 */ 374 status = acpi_ev_create_gpe_info_blocks(gpe_block); 375 if (ACPI_FAILURE(status)) { 376 ACPI_FREE(gpe_block); 377 return_ACPI_STATUS(status); 378 } 379 380 /* Install the new block in the global lists */ 381 382 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); 383 if (ACPI_FAILURE(status)) { 384 ACPI_FREE(gpe_block); 385 return_ACPI_STATUS(status); 386 } 387 388 /* Find all GPE methods (_Lxx or_Exx) for this block */ 389 390 walk_info.gpe_block = gpe_block; 391 walk_info.gpe_device = gpe_device; 392 walk_info.enable_this_gpe = FALSE; 393 walk_info.execute_by_owner_id = FALSE; 394 395 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, 396 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, 397 acpi_ev_match_gpe_method, NULL, 398 &walk_info, NULL); 399 400 /* Return the new block */ 401 402 if (return_gpe_block) { 403 (*return_gpe_block) = gpe_block; 404 } 405 406 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 407 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", 408 (u32) gpe_block->block_base_number, 409 (u32) (gpe_block->block_base_number + 410 (gpe_block->gpe_count - 1)), 411 gpe_device->name.ascii, gpe_block->register_count, 412 interrupt_number)); 413 414 /* Update global count of currently available GPEs */ 415 416 acpi_current_gpe_count += gpe_block->gpe_count; 417 return_ACPI_STATUS(AE_OK); 418 } 419 420 /******************************************************************************* 421 * 422 * FUNCTION: acpi_ev_initialize_gpe_block 423 * 424 * PARAMETERS: gpe_device - Handle to the parent GPE block 425 * gpe_block - Gpe Block info 426 * 427 * RETURN: Status 428 * 429 * DESCRIPTION: Initialize and enable a GPE block. First find and run any 430 * _PRT methods associated with the block, then enable the 431 * appropriate GPEs. 432 * Note: Assumes namespace is locked. 433 * 434 ******************************************************************************/ 435 436 acpi_status 437 acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, 438 struct acpi_gpe_block_info *gpe_block) 439 { 440 acpi_status status; 441 struct acpi_gpe_event_info *gpe_event_info; 442 u32 gpe_enabled_count; 443 u32 gpe_index; 444 u32 gpe_number; 445 u32 i; 446 u32 j; 447 448 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); 449 450 /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */ 451 452 if (!gpe_block) { 453 return_ACPI_STATUS(AE_OK); 454 } 455 456 /* 457 * Enable all GPEs that have a corresponding method. Any other GPEs 458 * within this block must be enabled via the acpi_enable_gpe interface. 459 */ 460 gpe_enabled_count = 0; 461 462 if (gpe_device == acpi_gbl_fadt_gpe_device) { 463 gpe_device = NULL; 464 } 465 466 for (i = 0; i < gpe_block->register_count; i++) { 467 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 468 469 /* Get the info block for this particular GPE */ 470 471 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; 472 gpe_event_info = &gpe_block->event_info[gpe_index]; 473 gpe_number = gpe_index + gpe_block->block_base_number; 474 475 /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ 476 477 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) { 478 continue; 479 } 480 481 /* 482 * If the GPE has already been enabled for runtime 483 * signaling, make sure it remains enabled, but do not 484 * increment its reference counter. 485 */ 486 status = gpe_event_info->runtime_count ? 487 acpi_ev_enable_gpe(gpe_event_info) : 488 acpi_enable_gpe(gpe_device, gpe_number); 489 490 if (ACPI_FAILURE(status)) { 491 ACPI_EXCEPTION((AE_INFO, status, 492 "Could not enable GPE 0x%02X", 493 gpe_number)); 494 continue; 495 } 496 497 gpe_enabled_count++; 498 } 499 } 500 501 if (gpe_enabled_count) { 502 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 503 "Enabled %u GPEs in this block\n", 504 gpe_enabled_count)); 505 } 506 507 return_ACPI_STATUS(AE_OK); 508 } 509