1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 * 26 */ 27 28 #include <linux/module.h> 29 #include <linux/kernel.h> 30 #include <linux/slab.h> 31 #include <linux/mm.h> 32 #include <linux/pci.h> 33 #include <linux/smp_lock.h> 34 #include <linux/interrupt.h> 35 #include <linux/kmod.h> 36 #include <linux/delay.h> 37 #include <linux/workqueue.h> 38 #include <linux/nmi.h> 39 #include <linux/acpi.h> 40 #include <acpi/acpi.h> 41 #include <asm/io.h> 42 #include <acpi/acpi_bus.h> 43 #include <acpi/processor.h> 44 #include <asm/uaccess.h> 45 46 #include <linux/efi.h> 47 48 #define _COMPONENT ACPI_OS_SERVICES 49 ACPI_MODULE_NAME("osl"); 50 #define PREFIX "ACPI: " 51 struct acpi_os_dpc { 52 acpi_osd_exec_callback function; 53 void *context; 54 struct work_struct work; 55 }; 56 57 #ifdef CONFIG_ACPI_CUSTOM_DSDT 58 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 59 #endif 60 61 #ifdef ENABLE_DEBUGGER 62 #include <linux/kdb.h> 63 64 /* stuff for debugger support */ 65 int acpi_in_debugger; 66 EXPORT_SYMBOL(acpi_in_debugger); 67 68 extern char line_buf[80]; 69 #endif /*ENABLE_DEBUGGER */ 70 71 static unsigned int acpi_irq_irq; 72 static acpi_osd_handler acpi_irq_handler; 73 static void *acpi_irq_context; 74 static struct workqueue_struct *kacpid_wq; 75 76 static void __init acpi_request_region (struct acpi_generic_address *addr, 77 unsigned int length, char *desc) 78 { 79 struct resource *res; 80 81 if (!addr->address || !length) 82 return; 83 84 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 85 res = request_region(addr->address, length, desc); 86 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 87 res = request_mem_region(addr->address, length, desc); 88 } 89 90 static int __init acpi_reserve_resources(void) 91 { 92 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 93 "ACPI PM1a_EVT_BLK"); 94 95 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 96 "ACPI PM1b_EVT_BLK"); 97 98 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 99 "ACPI PM1a_CNT_BLK"); 100 101 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 102 "ACPI PM1b_CNT_BLK"); 103 104 if (acpi_gbl_FADT.pm_timer_length == 4) 105 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 106 107 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 108 "ACPI PM2_CNT_BLK"); 109 110 /* Length of GPE blocks must be a non-negative multiple of 2 */ 111 112 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 113 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 114 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 115 116 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 117 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 118 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 119 120 return 0; 121 } 122 device_initcall(acpi_reserve_resources); 123 124 acpi_status acpi_os_initialize(void) 125 { 126 return AE_OK; 127 } 128 129 acpi_status acpi_os_initialize1(void) 130 { 131 /* 132 * Initialize PCI configuration space access, as we'll need to access 133 * it while walking the namespace (bus 0 and root bridges w/ _BBNs). 134 */ 135 if (!raw_pci_ops) { 136 printk(KERN_ERR PREFIX 137 "Access to PCI configuration space unavailable\n"); 138 return AE_NULL_ENTRY; 139 } 140 kacpid_wq = create_singlethread_workqueue("kacpid"); 141 BUG_ON(!kacpid_wq); 142 143 return AE_OK; 144 } 145 146 acpi_status acpi_os_terminate(void) 147 { 148 if (acpi_irq_handler) { 149 acpi_os_remove_interrupt_handler(acpi_irq_irq, 150 acpi_irq_handler); 151 } 152 153 destroy_workqueue(kacpid_wq); 154 155 return AE_OK; 156 } 157 158 void acpi_os_printf(const char *fmt, ...) 159 { 160 va_list args; 161 va_start(args, fmt); 162 acpi_os_vprintf(fmt, args); 163 va_end(args); 164 } 165 166 EXPORT_SYMBOL(acpi_os_printf); 167 168 void acpi_os_vprintf(const char *fmt, va_list args) 169 { 170 static char buffer[512]; 171 172 vsprintf(buffer, fmt, args); 173 174 #ifdef ENABLE_DEBUGGER 175 if (acpi_in_debugger) { 176 kdb_printf("%s", buffer); 177 } else { 178 printk("%s", buffer); 179 } 180 #else 181 printk("%s", buffer); 182 #endif 183 } 184 185 acpi_physical_address __init acpi_os_get_root_pointer(void) 186 { 187 if (efi_enabled) { 188 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 189 return efi.acpi20; 190 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 191 return efi.acpi; 192 else { 193 printk(KERN_ERR PREFIX 194 "System description tables not found\n"); 195 return 0; 196 } 197 } else 198 return acpi_find_rsdp(); 199 } 200 201 void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 202 { 203 if (phys > ULONG_MAX) { 204 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 205 return NULL; 206 } 207 if (acpi_gbl_permanent_mmap) 208 /* 209 * ioremap checks to ensure this is in reserved space 210 */ 211 return ioremap((unsigned long)phys, size); 212 else 213 return __acpi_map_table((unsigned long)phys, size); 214 } 215 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 216 217 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 218 { 219 if (acpi_gbl_permanent_mmap) { 220 iounmap(virt); 221 } 222 } 223 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 224 225 #ifdef ACPI_FUTURE_USAGE 226 acpi_status 227 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 228 { 229 if (!phys || !virt) 230 return AE_BAD_PARAMETER; 231 232 *phys = virt_to_phys(virt); 233 234 return AE_OK; 235 } 236 #endif 237 238 #define ACPI_MAX_OVERRIDE_LEN 100 239 240 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 241 242 acpi_status 243 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 244 acpi_string * new_val) 245 { 246 if (!init_val || !new_val) 247 return AE_BAD_PARAMETER; 248 249 *new_val = NULL; 250 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 251 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 252 acpi_os_name); 253 *new_val = acpi_os_name; 254 } 255 256 return AE_OK; 257 } 258 259 acpi_status 260 acpi_os_table_override(struct acpi_table_header * existing_table, 261 struct acpi_table_header ** new_table) 262 { 263 if (!existing_table || !new_table) 264 return AE_BAD_PARAMETER; 265 266 #ifdef CONFIG_ACPI_CUSTOM_DSDT 267 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 268 *new_table = (struct acpi_table_header *)AmlCode; 269 else 270 *new_table = NULL; 271 #else 272 *new_table = NULL; 273 #endif 274 return AE_OK; 275 } 276 277 static irqreturn_t acpi_irq(int irq, void *dev_id) 278 { 279 return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE; 280 } 281 282 acpi_status 283 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 284 void *context) 285 { 286 unsigned int irq; 287 288 /* 289 * Ignore the GSI from the core, and use the value in our copy of the 290 * FADT. It may not be the same if an interrupt source override exists 291 * for the SCI. 292 */ 293 gsi = acpi_gbl_FADT.sci_interrupt; 294 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 295 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 296 gsi); 297 return AE_OK; 298 } 299 300 acpi_irq_handler = handler; 301 acpi_irq_context = context; 302 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 303 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 304 return AE_NOT_ACQUIRED; 305 } 306 acpi_irq_irq = irq; 307 308 return AE_OK; 309 } 310 311 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 312 { 313 if (irq) { 314 free_irq(irq, acpi_irq); 315 acpi_irq_handler = NULL; 316 acpi_irq_irq = 0; 317 } 318 319 return AE_OK; 320 } 321 322 /* 323 * Running in interpreter thread context, safe to sleep 324 */ 325 326 void acpi_os_sleep(acpi_integer ms) 327 { 328 schedule_timeout_interruptible(msecs_to_jiffies(ms)); 329 } 330 331 EXPORT_SYMBOL(acpi_os_sleep); 332 333 void acpi_os_stall(u32 us) 334 { 335 while (us) { 336 u32 delay = 1000; 337 338 if (delay > us) 339 delay = us; 340 udelay(delay); 341 touch_nmi_watchdog(); 342 us -= delay; 343 } 344 } 345 346 EXPORT_SYMBOL(acpi_os_stall); 347 348 /* 349 * Support ACPI 3.0 AML Timer operand 350 * Returns 64-bit free-running, monotonically increasing timer 351 * with 100ns granularity 352 */ 353 u64 acpi_os_get_timer(void) 354 { 355 static u64 t; 356 357 #ifdef CONFIG_HPET 358 /* TBD: use HPET if available */ 359 #endif 360 361 #ifdef CONFIG_X86_PM_TIMER 362 /* TBD: default to PM timer if HPET was not available */ 363 #endif 364 if (!t) 365 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); 366 367 return ++t; 368 } 369 370 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 371 { 372 u32 dummy; 373 374 if (!value) 375 value = &dummy; 376 377 switch (width) { 378 case 8: 379 *(u8 *) value = inb(port); 380 break; 381 case 16: 382 *(u16 *) value = inw(port); 383 break; 384 case 32: 385 *(u32 *) value = inl(port); 386 break; 387 default: 388 BUG(); 389 } 390 391 return AE_OK; 392 } 393 394 EXPORT_SYMBOL(acpi_os_read_port); 395 396 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 397 { 398 switch (width) { 399 case 8: 400 outb(value, port); 401 break; 402 case 16: 403 outw(value, port); 404 break; 405 case 32: 406 outl(value, port); 407 break; 408 default: 409 BUG(); 410 } 411 412 return AE_OK; 413 } 414 415 EXPORT_SYMBOL(acpi_os_write_port); 416 417 acpi_status 418 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) 419 { 420 u32 dummy; 421 void __iomem *virt_addr; 422 423 virt_addr = ioremap(phys_addr, width); 424 if (!value) 425 value = &dummy; 426 427 switch (width) { 428 case 8: 429 *(u8 *) value = readb(virt_addr); 430 break; 431 case 16: 432 *(u16 *) value = readw(virt_addr); 433 break; 434 case 32: 435 *(u32 *) value = readl(virt_addr); 436 break; 437 default: 438 BUG(); 439 } 440 441 iounmap(virt_addr); 442 443 return AE_OK; 444 } 445 446 acpi_status 447 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 448 { 449 void __iomem *virt_addr; 450 451 virt_addr = ioremap(phys_addr, width); 452 453 switch (width) { 454 case 8: 455 writeb(value, virt_addr); 456 break; 457 case 16: 458 writew(value, virt_addr); 459 break; 460 case 32: 461 writel(value, virt_addr); 462 break; 463 default: 464 BUG(); 465 } 466 467 iounmap(virt_addr); 468 469 return AE_OK; 470 } 471 472 acpi_status 473 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 474 void *value, u32 width) 475 { 476 int result, size; 477 478 if (!value) 479 return AE_BAD_PARAMETER; 480 481 switch (width) { 482 case 8: 483 size = 1; 484 break; 485 case 16: 486 size = 2; 487 break; 488 case 32: 489 size = 4; 490 break; 491 default: 492 return AE_ERROR; 493 } 494 495 BUG_ON(!raw_pci_ops); 496 497 result = raw_pci_ops->read(pci_id->segment, pci_id->bus, 498 PCI_DEVFN(pci_id->device, pci_id->function), 499 reg, size, value); 500 501 return (result ? AE_ERROR : AE_OK); 502 } 503 504 EXPORT_SYMBOL(acpi_os_read_pci_configuration); 505 506 acpi_status 507 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 508 acpi_integer value, u32 width) 509 { 510 int result, size; 511 512 switch (width) { 513 case 8: 514 size = 1; 515 break; 516 case 16: 517 size = 2; 518 break; 519 case 32: 520 size = 4; 521 break; 522 default: 523 return AE_ERROR; 524 } 525 526 BUG_ON(!raw_pci_ops); 527 528 result = raw_pci_ops->write(pci_id->segment, pci_id->bus, 529 PCI_DEVFN(pci_id->device, pci_id->function), 530 reg, size, value); 531 532 return (result ? AE_ERROR : AE_OK); 533 } 534 535 /* TODO: Change code to take advantage of driver model more */ 536 static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */ 537 acpi_handle chandle, /* current node */ 538 struct acpi_pci_id **id, 539 int *is_bridge, u8 * bus_number) 540 { 541 acpi_handle handle; 542 struct acpi_pci_id *pci_id = *id; 543 acpi_status status; 544 unsigned long temp; 545 acpi_object_type type; 546 u8 tu8; 547 548 acpi_get_parent(chandle, &handle); 549 if (handle != rhandle) { 550 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, 551 bus_number); 552 553 status = acpi_get_type(handle, &type); 554 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE)) 555 return; 556 557 status = 558 acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, 559 &temp); 560 if (ACPI_SUCCESS(status)) { 561 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp)); 562 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp)); 563 564 if (*is_bridge) 565 pci_id->bus = *bus_number; 566 567 /* any nicer way to get bus number of bridge ? */ 568 status = 569 acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 570 8); 571 if (ACPI_SUCCESS(status) 572 && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) { 573 status = 574 acpi_os_read_pci_configuration(pci_id, 0x18, 575 &tu8, 8); 576 if (!ACPI_SUCCESS(status)) { 577 /* Certainly broken... FIX ME */ 578 return; 579 } 580 *is_bridge = 1; 581 pci_id->bus = tu8; 582 status = 583 acpi_os_read_pci_configuration(pci_id, 0x19, 584 &tu8, 8); 585 if (ACPI_SUCCESS(status)) { 586 *bus_number = tu8; 587 } 588 } else 589 *is_bridge = 0; 590 } 591 } 592 } 593 594 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ 595 acpi_handle chandle, /* current node */ 596 struct acpi_pci_id **id) 597 { 598 int is_bridge = 1; 599 u8 bus_number = (*id)->bus; 600 601 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 602 } 603 604 static void acpi_os_execute_deferred(struct work_struct *work) 605 { 606 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 607 608 if (!dpc) { 609 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 610 return; 611 } 612 613 dpc->function(dpc->context); 614 615 kfree(dpc); 616 617 return; 618 } 619 620 /******************************************************************************* 621 * 622 * FUNCTION: acpi_os_execute 623 * 624 * PARAMETERS: Type - Type of the callback 625 * Function - Function to be executed 626 * Context - Function parameters 627 * 628 * RETURN: Status 629 * 630 * DESCRIPTION: Depending on type, either queues function for deferred execution or 631 * immediately executes function on a separate thread. 632 * 633 ******************************************************************************/ 634 635 acpi_status acpi_os_execute(acpi_execute_type type, 636 acpi_osd_exec_callback function, void *context) 637 { 638 acpi_status status = AE_OK; 639 struct acpi_os_dpc *dpc; 640 641 ACPI_FUNCTION_TRACE("os_queue_for_execution"); 642 643 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 644 "Scheduling function [%p(%p)] for deferred execution.\n", 645 function, context)); 646 647 if (!function) 648 return_ACPI_STATUS(AE_BAD_PARAMETER); 649 650 /* 651 * Allocate/initialize DPC structure. Note that this memory will be 652 * freed by the callee. The kernel handles the work_struct list in a 653 * way that allows us to also free its memory inside the callee. 654 * Because we may want to schedule several tasks with different 655 * parameters we can't use the approach some kernel code uses of 656 * having a static work_struct. 657 */ 658 659 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 660 if (!dpc) 661 return_ACPI_STATUS(AE_NO_MEMORY); 662 663 dpc->function = function; 664 dpc->context = context; 665 666 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 667 if (!queue_work(kacpid_wq, &dpc->work)) { 668 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 669 "Call to queue_work() failed.\n")); 670 kfree(dpc); 671 status = AE_ERROR; 672 } 673 674 return_ACPI_STATUS(status); 675 } 676 677 EXPORT_SYMBOL(acpi_os_execute); 678 679 void acpi_os_wait_events_complete(void *context) 680 { 681 flush_workqueue(kacpid_wq); 682 } 683 684 EXPORT_SYMBOL(acpi_os_wait_events_complete); 685 686 /* 687 * Allocate the memory for a spinlock and initialize it. 688 */ 689 acpi_status acpi_os_create_lock(acpi_spinlock * handle) 690 { 691 spin_lock_init(*handle); 692 693 return AE_OK; 694 } 695 696 /* 697 * Deallocate the memory for a spinlock. 698 */ 699 void acpi_os_delete_lock(acpi_spinlock handle) 700 { 701 return; 702 } 703 704 acpi_status 705 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 706 { 707 struct semaphore *sem = NULL; 708 709 710 sem = acpi_os_allocate(sizeof(struct semaphore)); 711 if (!sem) 712 return AE_NO_MEMORY; 713 memset(sem, 0, sizeof(struct semaphore)); 714 715 sema_init(sem, initial_units); 716 717 *handle = (acpi_handle *) sem; 718 719 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 720 *handle, initial_units)); 721 722 return AE_OK; 723 } 724 725 EXPORT_SYMBOL(acpi_os_create_semaphore); 726 727 /* 728 * TODO: A better way to delete semaphores? Linux doesn't have a 729 * 'delete_semaphore()' function -- may result in an invalid 730 * pointer dereference for non-synchronized consumers. Should 731 * we at least check for blocked threads and signal/cancel them? 732 */ 733 734 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 735 { 736 struct semaphore *sem = (struct semaphore *)handle; 737 738 739 if (!sem) 740 return AE_BAD_PARAMETER; 741 742 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 743 744 kfree(sem); 745 sem = NULL; 746 747 return AE_OK; 748 } 749 750 EXPORT_SYMBOL(acpi_os_delete_semaphore); 751 752 /* 753 * TODO: The kernel doesn't have a 'down_timeout' function -- had to 754 * improvise. The process is to sleep for one scheduler quantum 755 * until the semaphore becomes available. Downside is that this 756 * may result in starvation for timeout-based waits when there's 757 * lots of semaphore activity. 758 * 759 * TODO: Support for units > 1? 760 */ 761 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 762 { 763 acpi_status status = AE_OK; 764 struct semaphore *sem = (struct semaphore *)handle; 765 int ret = 0; 766 767 768 if (!sem || (units < 1)) 769 return AE_BAD_PARAMETER; 770 771 if (units > 1) 772 return AE_SUPPORT; 773 774 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 775 handle, units, timeout)); 776 777 /* 778 * This can be called during resume with interrupts off. 779 * Like boot-time, we should be single threaded and will 780 * always get the lock if we try -- timeout or not. 781 * If this doesn't succeed, then we will oops courtesy of 782 * might_sleep() in down(). 783 */ 784 if (!down_trylock(sem)) 785 return AE_OK; 786 787 switch (timeout) { 788 /* 789 * No Wait: 790 * -------- 791 * A zero timeout value indicates that we shouldn't wait - just 792 * acquire the semaphore if available otherwise return AE_TIME 793 * (a.k.a. 'would block'). 794 */ 795 case 0: 796 if (down_trylock(sem)) 797 status = AE_TIME; 798 break; 799 800 /* 801 * Wait Indefinitely: 802 * ------------------ 803 */ 804 case ACPI_WAIT_FOREVER: 805 down(sem); 806 break; 807 808 /* 809 * Wait w/ Timeout: 810 * ---------------- 811 */ 812 default: 813 // TODO: A better timeout algorithm? 814 { 815 int i = 0; 816 static const int quantum_ms = 1000 / HZ; 817 818 ret = down_trylock(sem); 819 for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) { 820 schedule_timeout_interruptible(1); 821 ret = down_trylock(sem); 822 } 823 824 if (ret != 0) 825 status = AE_TIME; 826 } 827 break; 828 } 829 830 if (ACPI_FAILURE(status)) { 831 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 832 "Failed to acquire semaphore[%p|%d|%d], %s", 833 handle, units, timeout, 834 acpi_format_exception(status))); 835 } else { 836 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 837 "Acquired semaphore[%p|%d|%d]", handle, 838 units, timeout)); 839 } 840 841 return status; 842 } 843 844 EXPORT_SYMBOL(acpi_os_wait_semaphore); 845 846 /* 847 * TODO: Support for units > 1? 848 */ 849 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 850 { 851 struct semaphore *sem = (struct semaphore *)handle; 852 853 854 if (!sem || (units < 1)) 855 return AE_BAD_PARAMETER; 856 857 if (units > 1) 858 return AE_SUPPORT; 859 860 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 861 units)); 862 863 up(sem); 864 865 return AE_OK; 866 } 867 868 EXPORT_SYMBOL(acpi_os_signal_semaphore); 869 870 #ifdef ACPI_FUTURE_USAGE 871 u32 acpi_os_get_line(char *buffer) 872 { 873 874 #ifdef ENABLE_DEBUGGER 875 if (acpi_in_debugger) { 876 u32 chars; 877 878 kdb_read(buffer, sizeof(line_buf)); 879 880 /* remove the CR kdb includes */ 881 chars = strlen(buffer) - 1; 882 buffer[chars] = '\0'; 883 } 884 #endif 885 886 return 0; 887 } 888 #endif /* ACPI_FUTURE_USAGE */ 889 890 acpi_status acpi_os_signal(u32 function, void *info) 891 { 892 switch (function) { 893 case ACPI_SIGNAL_FATAL: 894 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 895 break; 896 case ACPI_SIGNAL_BREAKPOINT: 897 /* 898 * AML Breakpoint 899 * ACPI spec. says to treat it as a NOP unless 900 * you are debugging. So if/when we integrate 901 * AML debugger into the kernel debugger its 902 * hook will go here. But until then it is 903 * not useful to print anything on breakpoints. 904 */ 905 break; 906 default: 907 break; 908 } 909 910 return AE_OK; 911 } 912 913 EXPORT_SYMBOL(acpi_os_signal); 914 915 static int __init acpi_os_name_setup(char *str) 916 { 917 char *p = acpi_os_name; 918 int count = ACPI_MAX_OVERRIDE_LEN - 1; 919 920 if (!str || !*str) 921 return 0; 922 923 for (; count-- && str && *str; str++) { 924 if (isalnum(*str) || *str == ' ' || *str == ':') 925 *p++ = *str; 926 else if (*str == '\'' || *str == '"') 927 continue; 928 else 929 break; 930 } 931 *p = 0; 932 933 return 1; 934 935 } 936 937 __setup("acpi_os_name=", acpi_os_name_setup); 938 939 /* 940 * _OSI control 941 * empty string disables _OSI 942 * TBD additional string adds to _OSI 943 */ 944 static int __init acpi_osi_setup(char *str) 945 { 946 if (str == NULL || *str == '\0') { 947 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 948 acpi_gbl_create_osi_method = FALSE; 949 } else { 950 /* TBD */ 951 printk(KERN_ERR PREFIX "_OSI additional string ignored -- %s\n", 952 str); 953 } 954 955 return 1; 956 } 957 958 __setup("acpi_osi=", acpi_osi_setup); 959 960 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 961 static int __init acpi_serialize_setup(char *str) 962 { 963 printk(KERN_INFO PREFIX "serialize enabled\n"); 964 965 acpi_gbl_all_methods_serialized = TRUE; 966 967 return 1; 968 } 969 970 __setup("acpi_serialize", acpi_serialize_setup); 971 972 /* 973 * Wake and Run-Time GPES are expected to be separate. 974 * We disable wake-GPEs at run-time to prevent spurious 975 * interrupts. 976 * 977 * However, if a system exists that shares Wake and 978 * Run-time events on the same GPE this flag is available 979 * to tell Linux to keep the wake-time GPEs enabled at run-time. 980 */ 981 static int __init acpi_wake_gpes_always_on_setup(char *str) 982 { 983 printk(KERN_INFO PREFIX "wake GPEs not disabled\n"); 984 985 acpi_gbl_leave_wake_gpes_disabled = FALSE; 986 987 return 1; 988 } 989 990 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); 991 992 /* 993 * max_cstate is defined in the base kernel so modules can 994 * change it w/o depending on the state of the processor module. 995 */ 996 unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER; 997 998 EXPORT_SYMBOL(max_cstate); 999 1000 /* 1001 * Acquire a spinlock. 1002 * 1003 * handle is a pointer to the spinlock_t. 1004 */ 1005 1006 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1007 { 1008 acpi_cpu_flags flags; 1009 spin_lock_irqsave(lockp, flags); 1010 return flags; 1011 } 1012 1013 /* 1014 * Release a spinlock. See above. 1015 */ 1016 1017 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1018 { 1019 spin_unlock_irqrestore(lockp, flags); 1020 } 1021 1022 #ifndef ACPI_USE_LOCAL_CACHE 1023 1024 /******************************************************************************* 1025 * 1026 * FUNCTION: acpi_os_create_cache 1027 * 1028 * PARAMETERS: name - Ascii name for the cache 1029 * size - Size of each cached object 1030 * depth - Maximum depth of the cache (in objects) <ignored> 1031 * cache - Where the new cache object is returned 1032 * 1033 * RETURN: status 1034 * 1035 * DESCRIPTION: Create a cache object 1036 * 1037 ******************************************************************************/ 1038 1039 acpi_status 1040 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1041 { 1042 *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL); 1043 if (*cache == NULL) 1044 return AE_ERROR; 1045 else 1046 return AE_OK; 1047 } 1048 1049 /******************************************************************************* 1050 * 1051 * FUNCTION: acpi_os_purge_cache 1052 * 1053 * PARAMETERS: Cache - Handle to cache object 1054 * 1055 * RETURN: Status 1056 * 1057 * DESCRIPTION: Free all objects within the requested cache. 1058 * 1059 ******************************************************************************/ 1060 1061 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1062 { 1063 kmem_cache_shrink(cache); 1064 return (AE_OK); 1065 } 1066 1067 /******************************************************************************* 1068 * 1069 * FUNCTION: acpi_os_delete_cache 1070 * 1071 * PARAMETERS: Cache - Handle to cache object 1072 * 1073 * RETURN: Status 1074 * 1075 * DESCRIPTION: Free all objects within the requested cache and delete the 1076 * cache object. 1077 * 1078 ******************************************************************************/ 1079 1080 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1081 { 1082 kmem_cache_destroy(cache); 1083 return (AE_OK); 1084 } 1085 1086 /******************************************************************************* 1087 * 1088 * FUNCTION: acpi_os_release_object 1089 * 1090 * PARAMETERS: Cache - Handle to cache object 1091 * Object - The object to be released 1092 * 1093 * RETURN: None 1094 * 1095 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1096 * the object is deleted. 1097 * 1098 ******************************************************************************/ 1099 1100 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1101 { 1102 kmem_cache_free(cache, object); 1103 return (AE_OK); 1104 } 1105 1106 /****************************************************************************** 1107 * 1108 * FUNCTION: acpi_os_validate_interface 1109 * 1110 * PARAMETERS: interface - Requested interface to be validated 1111 * 1112 * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise 1113 * 1114 * DESCRIPTION: Match an interface string to the interfaces supported by the 1115 * host. Strings originate from an AML call to the _OSI method. 1116 * 1117 *****************************************************************************/ 1118 1119 acpi_status 1120 acpi_os_validate_interface (char *interface) 1121 { 1122 1123 return AE_SUPPORT; 1124 } 1125 1126 1127 /****************************************************************************** 1128 * 1129 * FUNCTION: acpi_os_validate_address 1130 * 1131 * PARAMETERS: space_id - ACPI space ID 1132 * address - Physical address 1133 * length - Address length 1134 * 1135 * RETURN: AE_OK if address/length is valid for the space_id. Otherwise, 1136 * should return AE_AML_ILLEGAL_ADDRESS. 1137 * 1138 * DESCRIPTION: Validate a system address via the host OS. Used to validate 1139 * the addresses accessed by AML operation regions. 1140 * 1141 *****************************************************************************/ 1142 1143 acpi_status 1144 acpi_os_validate_address ( 1145 u8 space_id, 1146 acpi_physical_address address, 1147 acpi_size length) 1148 { 1149 1150 return AE_OK; 1151 } 1152 1153 1154 #endif 1155