1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /****************************************************************************** 3 * 4 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing 5 * 6 * Copyright (C) 2000 - 2025, Intel Corp. 7 * 8 *****************************************************************************/ 9 10 #include <acpi/acpi.h> 11 #include "accommon.h" 12 #include "acdispat.h" 13 #include "acinterp.h" 14 #include "acnamesp.h" 15 #include "acparser.h" 16 #include "amlcode.h" 17 #include "acdebug.h" 18 19 #define _COMPONENT ACPI_DISPATCHER 20 ACPI_MODULE_NAME("dsmethod") 21 22 /* Local prototypes */ 23 static acpi_status 24 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, 25 union acpi_parse_object **out_op); 26 27 static acpi_status 28 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); 29 30 /******************************************************************************* 31 * 32 * FUNCTION: acpi_ds_auto_serialize_method 33 * 34 * PARAMETERS: node - Namespace Node of the method 35 * obj_desc - Method object attached to node 36 * 37 * RETURN: Status 38 * 39 * DESCRIPTION: Parse a control method AML to scan for control methods that 40 * need serialization due to the creation of named objects. 41 * 42 * NOTE: It is a bit of overkill to mark all such methods serialized, since 43 * there is only a problem if the method actually blocks during execution. 44 * A blocking operation is, for example, a Sleep() operation, or any access 45 * to an operation region. However, it is probably not possible to easily 46 * detect whether a method will block or not, so we simply mark all suspicious 47 * methods as serialized. 48 * 49 * NOTE2: This code is essentially a generic routine for parsing a single 50 * control method. 51 * 52 ******************************************************************************/ 53 54 acpi_status 55 acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, 56 union acpi_operand_object *obj_desc) 57 { 58 acpi_status status; 59 union acpi_parse_object *op = NULL; 60 struct acpi_walk_state *walk_state; 61 62 ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node); 63 64 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 65 "Method auto-serialization parse [%4.4s] %p\n", 66 acpi_ut_get_node_name(node), node)); 67 68 /* Create/Init a root op for the method parse tree */ 69 70 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); 71 if (!op) { 72 return_ACPI_STATUS(AE_NO_MEMORY); 73 } 74 75 acpi_ps_set_name(op, node->name.integer); 76 op->common.node = node; 77 78 /* Create and initialize a new walk state */ 79 80 walk_state = 81 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); 82 if (!walk_state) { 83 acpi_ps_free_op(op); 84 return_ACPI_STATUS(AE_NO_MEMORY); 85 } 86 87 status = acpi_ds_init_aml_walk(walk_state, op, node, 88 obj_desc->method.aml_start, 89 obj_desc->method.aml_length, NULL, 0); 90 if (ACPI_FAILURE(status)) { 91 acpi_ds_delete_walk_state(walk_state); 92 acpi_ps_free_op(op); 93 return_ACPI_STATUS(status); 94 } 95 96 walk_state->descending_callback = acpi_ds_detect_named_opcodes; 97 98 /* Parse the method, scan for creation of named objects */ 99 100 status = acpi_ps_parse_aml(walk_state); 101 102 acpi_ps_delete_parse_tree(op); 103 return_ACPI_STATUS(status); 104 } 105 106 /******************************************************************************* 107 * 108 * FUNCTION: acpi_ds_detect_named_opcodes 109 * 110 * PARAMETERS: walk_state - Current state of the parse tree walk 111 * out_op - Unused, required for parser interface 112 * 113 * RETURN: Status 114 * 115 * DESCRIPTION: Descending callback used during the loading of ACPI tables. 116 * Currently used to detect methods that must be marked serialized 117 * in order to avoid problems with the creation of named objects. 118 * 119 ******************************************************************************/ 120 121 static acpi_status 122 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, 123 union acpi_parse_object **out_op) 124 { 125 126 ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes); 127 128 /* We are only interested in opcodes that create a new name */ 129 130 if (! 131 (walk_state->op_info-> 132 flags & (AML_NAMED | AML_CREATE | AML_FIELD))) { 133 return (AE_OK); 134 } 135 136 /* 137 * At this point, we know we have a Named object opcode. 138 * Mark the method as serialized. Later code will create a mutex for 139 * this method to enforce serialization. 140 * 141 * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the 142 * Sync Level mechanism for this method, even though it is now serialized. 143 * Otherwise, there can be conflicts with existing ASL code that actually 144 * uses sync levels. 145 */ 146 walk_state->method_desc->method.sync_level = 0; 147 walk_state->method_desc->method.info_flags |= 148 (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); 149 150 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 151 "Method serialized [%4.4s] %p - [%s] (%4.4X)\n", 152 walk_state->method_node->name.ascii, 153 walk_state->method_node, walk_state->op_info->name, 154 walk_state->opcode)); 155 156 /* Abort the parse, no need to examine this method any further */ 157 158 return (AE_CTRL_TERMINATE); 159 } 160 161 /******************************************************************************* 162 * 163 * FUNCTION: acpi_ds_method_error 164 * 165 * PARAMETERS: status - Execution status 166 * walk_state - Current state 167 * 168 * RETURN: Status 169 * 170 * DESCRIPTION: Called on method error. Invoke the global exception handler if 171 * present, dump the method data if the debugger is configured 172 * 173 * Note: Allows the exception handler to change the status code 174 * 175 ******************************************************************************/ 176 177 acpi_status 178 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) 179 { 180 u32 aml_offset; 181 acpi_name name = 0; 182 183 ACPI_FUNCTION_ENTRY(); 184 185 /* Ignore AE_OK and control exception codes */ 186 187 if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { 188 return (status); 189 } 190 191 /* Invoke the global exception handler */ 192 193 if (acpi_gbl_exception_handler) { 194 195 /* Exit the interpreter, allow handler to execute methods */ 196 197 acpi_ex_exit_interpreter(); 198 199 /* 200 * Handler can map the exception code to anything it wants, including 201 * AE_OK, in which case the executing method will not be aborted. 202 */ 203 aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml, 204 walk_state->parser_state. 205 aml_start); 206 207 if (walk_state->method_node) { 208 name = walk_state->method_node->name.integer; 209 } else if (walk_state->deferred_node) { 210 name = walk_state->deferred_node->name.integer; 211 } 212 213 status = acpi_gbl_exception_handler(status, name, 214 walk_state->opcode, 215 aml_offset, NULL); 216 acpi_ex_enter_interpreter(); 217 } 218 219 acpi_ds_clear_implicit_return(walk_state); 220 221 if (ACPI_FAILURE(status)) { 222 acpi_ds_dump_method_stack(status, walk_state, walk_state->op); 223 224 /* Display method locals/args if debugger is present */ 225 226 #ifdef ACPI_DEBUGGER 227 acpi_db_dump_method_info(status, walk_state); 228 #endif 229 } 230 231 return (status); 232 } 233 234 /******************************************************************************* 235 * 236 * FUNCTION: acpi_ds_create_method_mutex 237 * 238 * PARAMETERS: obj_desc - The method object 239 * 240 * RETURN: Status 241 * 242 * DESCRIPTION: Create a mutex object for a serialized control method 243 * 244 ******************************************************************************/ 245 246 static acpi_status 247 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) 248 { 249 union acpi_operand_object *mutex_desc; 250 acpi_status status; 251 252 ACPI_FUNCTION_TRACE(ds_create_method_mutex); 253 254 /* Create the new mutex object */ 255 256 mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); 257 if (!mutex_desc) { 258 return_ACPI_STATUS(AE_NO_MEMORY); 259 } 260 261 /* Create the actual OS Mutex */ 262 263 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); 264 if (ACPI_FAILURE(status)) { 265 acpi_ut_delete_object_desc(mutex_desc); 266 return_ACPI_STATUS(status); 267 } 268 269 mutex_desc->mutex.sync_level = method_desc->method.sync_level; 270 method_desc->method.mutex = mutex_desc; 271 return_ACPI_STATUS(AE_OK); 272 } 273 274 /******************************************************************************* 275 * 276 * FUNCTION: acpi_ds_begin_method_execution 277 * 278 * PARAMETERS: method_node - Node of the method 279 * obj_desc - The method object 280 * walk_state - current state, NULL if not yet executing 281 * a method. 282 * 283 * RETURN: Status 284 * 285 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, 286 * increments the thread count, and waits at the method semaphore 287 * for clearance to execute. 288 * 289 ******************************************************************************/ 290 291 acpi_status 292 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, 293 union acpi_operand_object *obj_desc, 294 struct acpi_walk_state *walk_state) 295 { 296 acpi_status status = AE_OK; 297 298 ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node); 299 300 if (!method_node) { 301 return_ACPI_STATUS(AE_NULL_ENTRY); 302 } 303 304 acpi_ex_start_trace_method(method_node, obj_desc, walk_state); 305 306 /* Prevent wraparound of thread count */ 307 308 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { 309 ACPI_ERROR((AE_INFO, 310 "Method reached maximum reentrancy limit (255)")); 311 return_ACPI_STATUS(AE_AML_METHOD_LIMIT); 312 } 313 314 /* 315 * If this method is serialized, we need to acquire the method mutex. 316 */ 317 if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { 318 /* 319 * Create a mutex for the method if it is defined to be Serialized 320 * and a mutex has not already been created. We defer the mutex creation 321 * until a method is actually executed, to minimize the object count 322 */ 323 if (!obj_desc->method.mutex) { 324 status = acpi_ds_create_method_mutex(obj_desc); 325 if (ACPI_FAILURE(status)) { 326 return_ACPI_STATUS(status); 327 } 328 } 329 330 /* 331 * The current_sync_level (per-thread) must be less than or equal to 332 * the sync level of the method. This mechanism provides some 333 * deadlock prevention. 334 * 335 * If the method was auto-serialized, we just ignore the sync level 336 * mechanism, because auto-serialization of methods can interfere 337 * with ASL code that actually uses sync levels. 338 * 339 * Top-level method invocation has no walk state at this point 340 */ 341 if (walk_state && 342 (!(obj_desc->method. 343 info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL)) 344 && (walk_state->thread->current_sync_level > 345 obj_desc->method.mutex->mutex.sync_level)) { 346 ACPI_ERROR((AE_INFO, 347 "Cannot acquire Mutex for method [%4.4s]" 348 ", current SyncLevel is too large (%u)", 349 acpi_ut_get_node_name(method_node), 350 walk_state->thread->current_sync_level)); 351 352 return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 353 } 354 355 /* 356 * Obtain the method mutex if necessary. Do not acquire mutex for a 357 * recursive call. 358 */ 359 if (!walk_state || 360 !obj_desc->method.mutex->mutex.thread_id || 361 (walk_state->thread->thread_id != 362 obj_desc->method.mutex->mutex.thread_id)) { 363 /* 364 * Acquire the method mutex. This releases the interpreter if we 365 * block (and reacquires it before it returns) 366 */ 367 status = 368 acpi_ex_system_wait_mutex(obj_desc->method.mutex-> 369 mutex.os_mutex, 370 ACPI_WAIT_FOREVER); 371 if (ACPI_FAILURE(status)) { 372 return_ACPI_STATUS(status); 373 } 374 375 /* Update the mutex and walk info and save the original sync_level */ 376 377 if (walk_state) { 378 obj_desc->method.mutex->mutex. 379 original_sync_level = 380 walk_state->thread->current_sync_level; 381 382 obj_desc->method.mutex->mutex.thread_id = 383 walk_state->thread->thread_id; 384 385 /* 386 * Update the current sync_level only if this is not an auto- 387 * serialized method. In the auto case, we have to ignore 388 * the sync level for the method mutex (created for the 389 * auto-serialization) because we have no idea of what the 390 * sync level should be. Therefore, just ignore it. 391 */ 392 if (!(obj_desc->method.info_flags & 393 ACPI_METHOD_IGNORE_SYNC_LEVEL)) { 394 walk_state->thread->current_sync_level = 395 obj_desc->method.sync_level; 396 } 397 } else { 398 obj_desc->method.mutex->mutex. 399 original_sync_level = 400 obj_desc->method.mutex->mutex.sync_level; 401 402 obj_desc->method.mutex->mutex.thread_id = 403 acpi_os_get_thread_id(); 404 } 405 } 406 407 /* Always increase acquisition depth */ 408 409 obj_desc->method.mutex->mutex.acquisition_depth++; 410 } 411 412 /* 413 * Allocate an Owner ID for this method, only if this is the first thread 414 * to begin concurrent execution. We only need one owner_id, even if the 415 * method is invoked recursively. 416 */ 417 if (!obj_desc->method.owner_id) { 418 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); 419 if (ACPI_FAILURE(status)) { 420 goto cleanup; 421 } 422 } 423 424 /* 425 * Increment the method parse tree thread count since it has been 426 * reentered one more time (even if it is the same thread) 427 */ 428 obj_desc->method.thread_count++; 429 acpi_method_count++; 430 return_ACPI_STATUS(status); 431 432 cleanup: 433 /* On error, must release the method mutex (if present) */ 434 435 if (obj_desc->method.mutex) { 436 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); 437 } 438 return_ACPI_STATUS(status); 439 } 440 441 /******************************************************************************* 442 * 443 * FUNCTION: acpi_ds_call_control_method 444 * 445 * PARAMETERS: thread - Info for this thread 446 * this_walk_state - Current walk state 447 * op - Current Op to be walked 448 * 449 * RETURN: Status 450 * 451 * DESCRIPTION: Transfer execution to a called control method 452 * 453 ******************************************************************************/ 454 455 acpi_status 456 acpi_ds_call_control_method(struct acpi_thread_state *thread, 457 struct acpi_walk_state *this_walk_state, 458 union acpi_parse_object *op) 459 { 460 acpi_status status; 461 struct acpi_namespace_node *method_node; 462 struct acpi_walk_state *next_walk_state = NULL; 463 union acpi_operand_object *obj_desc; 464 struct acpi_evaluate_info *info; 465 u32 i; 466 467 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); 468 469 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 470 "Calling method %p, currentstate=%p\n", 471 this_walk_state->prev_op, this_walk_state)); 472 473 /* 474 * Get the namespace entry for the control method we are about to call 475 */ 476 method_node = this_walk_state->method_call_node; 477 if (!method_node) { 478 return_ACPI_STATUS(AE_NULL_ENTRY); 479 } 480 481 obj_desc = acpi_ns_get_attached_object(method_node); 482 if (!obj_desc) { 483 return_ACPI_STATUS(AE_NULL_OBJECT); 484 } 485 486 if (this_walk_state->num_operands < obj_desc->method.param_count) { 487 ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]", 488 acpi_ut_get_node_name(method_node))); 489 490 return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); 491 } 492 493 /* Init for new method, possibly wait on method mutex */ 494 495 status = 496 acpi_ds_begin_method_execution(method_node, obj_desc, 497 this_walk_state); 498 if (ACPI_FAILURE(status)) { 499 return_ACPI_STATUS(status); 500 } 501 502 /* Begin method parse/execution. Create a new walk state */ 503 504 next_walk_state = 505 acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc, 506 thread); 507 if (!next_walk_state) { 508 status = AE_NO_MEMORY; 509 goto cleanup; 510 } 511 512 /* 513 * The resolved arguments were put on the previous walk state's operand 514 * stack. Operands on the previous walk state stack always 515 * start at index 0. Also, null terminate the list of arguments 516 */ 517 this_walk_state->operands[this_walk_state->num_operands] = NULL; 518 519 /* 520 * Allocate and initialize the evaluation information block 521 * TBD: this is somewhat inefficient, should change interface to 522 * ds_init_aml_walk. For now, keeps this struct off the CPU stack 523 */ 524 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 525 if (!info) { 526 status = AE_NO_MEMORY; 527 goto pop_walk_state; 528 } 529 530 info->parameters = &this_walk_state->operands[0]; 531 532 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, 533 obj_desc->method.aml_start, 534 obj_desc->method.aml_length, info, 535 ACPI_IMODE_EXECUTE); 536 537 ACPI_FREE(info); 538 if (ACPI_FAILURE(status)) { 539 goto pop_walk_state; 540 } 541 542 next_walk_state->method_nesting_depth = 543 this_walk_state->method_nesting_depth + 1; 544 545 /* 546 * Delete the operands on the previous walkstate operand stack 547 * (they were copied to new objects) 548 */ 549 for (i = 0; i < obj_desc->method.param_count; i++) { 550 acpi_ut_remove_reference(this_walk_state->operands[i]); 551 this_walk_state->operands[i] = NULL; 552 } 553 554 /* Clear the operand stack */ 555 556 this_walk_state->num_operands = 0; 557 558 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 559 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", 560 method_node->name.ascii, next_walk_state)); 561 562 this_walk_state->method_pathname = 563 acpi_ns_get_normalized_pathname(method_node, TRUE); 564 this_walk_state->method_is_nested = TRUE; 565 566 /* Optional object evaluation log */ 567 568 ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION, 569 "%-26s: %*s%s\n", " Nested method call", 570 next_walk_state->method_nesting_depth * 3, " ", 571 &this_walk_state->method_pathname[1])); 572 573 /* Invoke an internal method if necessary */ 574 575 if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { 576 status = 577 obj_desc->method.dispatch.implementation(next_walk_state); 578 if (status == AE_OK) { 579 status = AE_CTRL_TERMINATE; 580 } 581 } 582 583 return_ACPI_STATUS(status); 584 585 pop_walk_state: 586 587 /* On error, pop the walk state to be deleted from thread */ 588 589 acpi_ds_pop_walk_state(thread); 590 591 cleanup: 592 593 /* On error, we must terminate the method properly */ 594 595 acpi_ds_terminate_control_method(obj_desc, next_walk_state); 596 acpi_ds_delete_walk_state(next_walk_state); 597 598 return_ACPI_STATUS(status); 599 } 600 601 /******************************************************************************* 602 * 603 * FUNCTION: acpi_ds_restart_control_method 604 * 605 * PARAMETERS: walk_state - State for preempted method (caller) 606 * return_desc - Return value from the called method 607 * 608 * RETURN: Status 609 * 610 * DESCRIPTION: Restart a method that was preempted by another (nested) method 611 * invocation. Handle the return value (if any) from the callee. 612 * 613 ******************************************************************************/ 614 615 acpi_status 616 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, 617 union acpi_operand_object *return_desc) 618 { 619 acpi_status status; 620 int same_as_implicit_return; 621 622 ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state); 623 624 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 625 "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n", 626 acpi_ut_get_node_name(walk_state->method_node), 627 walk_state->method_call_op, return_desc)); 628 629 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 630 " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n", 631 walk_state->return_used, 632 walk_state->results, walk_state)); 633 634 /* Did the called method return a value? */ 635 636 if (return_desc) { 637 638 /* Is the implicit return object the same as the return desc? */ 639 640 same_as_implicit_return = 641 (walk_state->implicit_return_obj == return_desc); 642 643 /* Are we actually going to use the return value? */ 644 645 if (walk_state->return_used) { 646 647 /* Save the return value from the previous method */ 648 649 status = acpi_ds_result_push(return_desc, walk_state); 650 if (ACPI_FAILURE(status)) { 651 acpi_ut_remove_reference(return_desc); 652 return_ACPI_STATUS(status); 653 } 654 655 /* 656 * Save as THIS method's return value in case it is returned 657 * immediately to yet another method 658 */ 659 walk_state->return_desc = return_desc; 660 } 661 662 /* 663 * The following code is the optional support for the so-called 664 * "implicit return". Some AML code assumes that the last value of the 665 * method is "implicitly" returned to the caller, in the absence of an 666 * explicit return value. 667 * 668 * Just save the last result of the method as the return value. 669 * 670 * NOTE: this is optional because the ASL language does not actually 671 * support this behavior. 672 */ 673 else if (!acpi_ds_do_implicit_return 674 (return_desc, walk_state, FALSE) 675 || same_as_implicit_return) { 676 /* 677 * Delete the return value if it will not be used by the 678 * calling method or remove one reference if the explicit return 679 * is the same as the implicit return value. 680 */ 681 acpi_ut_remove_reference(return_desc); 682 } 683 } 684 685 return_ACPI_STATUS(AE_OK); 686 } 687 688 /******************************************************************************* 689 * 690 * FUNCTION: acpi_ds_terminate_control_method 691 * 692 * PARAMETERS: method_desc - Method object 693 * walk_state - State associated with the method 694 * 695 * RETURN: None 696 * 697 * DESCRIPTION: Terminate a control method. Delete everything that the method 698 * created, delete all locals and arguments, and delete the parse 699 * tree if requested. 700 * 701 * MUTEX: Interpreter is locked 702 * 703 ******************************************************************************/ 704 705 void 706 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, 707 struct acpi_walk_state *walk_state) 708 { 709 710 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); 711 712 /* method_desc is required, walk_state is optional */ 713 714 if (!method_desc) { 715 return_VOID; 716 } 717 718 if (walk_state) { 719 720 /* Delete all arguments and locals */ 721 722 acpi_ds_method_data_delete_all(walk_state); 723 724 /* 725 * Delete any namespace objects created anywhere within the 726 * namespace by the execution of this method. Unless: 727 * 1) This method is a module-level executable code method, in which 728 * case we want make the objects permanent. 729 * 2) There are other threads executing the method, in which case we 730 * will wait until the last thread has completed. 731 */ 732 if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) 733 && (method_desc->method.thread_count == 1)) { 734 735 /* Delete any direct children of (created by) this method */ 736 737 (void)acpi_ex_exit_interpreter(); 738 acpi_ns_delete_namespace_subtree(walk_state-> 739 method_node); 740 (void)acpi_ex_enter_interpreter(); 741 742 /* 743 * Delete any objects that were created by this method 744 * elsewhere in the namespace (if any were created). 745 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the 746 * deletion such that we don't have to perform an entire 747 * namespace walk for every control method execution. 748 */ 749 if (method_desc->method. 750 info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { 751 (void)acpi_ex_exit_interpreter(); 752 acpi_ns_delete_namespace_by_owner(method_desc-> 753 method. 754 owner_id); 755 (void)acpi_ex_enter_interpreter(); 756 method_desc->method.info_flags &= 757 ~ACPI_METHOD_MODIFIED_NAMESPACE; 758 } 759 } 760 761 /* 762 * If method is serialized, release the mutex and restore the 763 * current sync level for this thread 764 */ 765 if (method_desc->method.mutex) { 766 767 /* Acquisition Depth handles recursive calls */ 768 769 method_desc->method.mutex->mutex.acquisition_depth--; 770 if (!method_desc->method.mutex->mutex.acquisition_depth) { 771 walk_state->thread->current_sync_level = 772 method_desc->method.mutex->mutex. 773 original_sync_level; 774 775 acpi_os_release_mutex(method_desc->method. 776 mutex->mutex.os_mutex); 777 method_desc->method.mutex->mutex.thread_id = 0; 778 } 779 } 780 } 781 782 /* Decrement the thread count on the method */ 783 784 if (method_desc->method.thread_count) { 785 method_desc->method.thread_count--; 786 } else { 787 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); 788 } 789 790 /* Are there any other threads currently executing this method? */ 791 792 if (method_desc->method.thread_count) { 793 /* 794 * Additional threads. Do not release the owner_id in this case, 795 * we immediately reuse it for the next thread executing this method 796 */ 797 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 798 "*** Completed execution of one thread, %u threads remaining\n", 799 method_desc->method.thread_count)); 800 } else { 801 /* This is the only executing thread for this method */ 802 803 /* 804 * Support to dynamically change a method from not_serialized to 805 * Serialized if it appears that the method is incorrectly written and 806 * does not support multiple thread execution. The best example of this 807 * is if such a method creates namespace objects and blocks. A second 808 * thread will fail with an AE_ALREADY_EXISTS exception. 809 * 810 * This code is here because we must wait until the last thread exits 811 * before marking the method as serialized. 812 */ 813 if (method_desc->method. 814 info_flags & ACPI_METHOD_SERIALIZED_PENDING) { 815 if (walk_state) { 816 ACPI_INFO(("Marking method %4.4s as Serialized " 817 "because of AE_ALREADY_EXISTS error", 818 walk_state->method_node->name. 819 ascii)); 820 } 821 822 /* 823 * Method tried to create an object twice and was marked as 824 * "pending serialized". The probable cause is that the method 825 * cannot handle reentrancy. 826 * 827 * The method was created as not_serialized, but it tried to create 828 * a named object and then blocked, causing the second thread 829 * entrance to begin and then fail. Workaround this problem by 830 * marking the method permanently as Serialized when the last 831 * thread exits here. 832 */ 833 method_desc->method.info_flags &= 834 ~ACPI_METHOD_SERIALIZED_PENDING; 835 836 method_desc->method.info_flags |= 837 (ACPI_METHOD_SERIALIZED | 838 ACPI_METHOD_IGNORE_SYNC_LEVEL); 839 method_desc->method.sync_level = 0; 840 } 841 842 /* No more threads, we can free the owner_id */ 843 844 if (! 845 (method_desc->method. 846 info_flags & ACPI_METHOD_MODULE_LEVEL)) { 847 acpi_ut_release_owner_id(&method_desc->method.owner_id); 848 } 849 } 850 851 acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc-> 852 method.node, method_desc, walk_state); 853 854 return_VOID; 855 } 856