1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 * 26 */ 27 28 #include <linux/config.h> 29 #include <linux/module.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <linux/mm.h> 33 #include <linux/pci.h> 34 #include <linux/smp_lock.h> 35 #include <linux/interrupt.h> 36 #include <linux/kmod.h> 37 #include <linux/delay.h> 38 #include <linux/workqueue.h> 39 #include <linux/nmi.h> 40 #include <acpi/acpi.h> 41 #include <asm/io.h> 42 #include <acpi/acpi_bus.h> 43 #include <acpi/processor.h> 44 #include <asm/uaccess.h> 45 46 #include <linux/efi.h> 47 48 49 #define _COMPONENT ACPI_OS_SERVICES 50 ACPI_MODULE_NAME ("osl") 51 52 #define PREFIX "ACPI: " 53 54 struct acpi_os_dpc 55 { 56 acpi_osd_exec_callback function; 57 void *context; 58 }; 59 60 #ifdef CONFIG_ACPI_CUSTOM_DSDT 61 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 62 #endif 63 64 #ifdef ENABLE_DEBUGGER 65 #include <linux/kdb.h> 66 67 /* stuff for debugger support */ 68 int acpi_in_debugger; 69 EXPORT_SYMBOL(acpi_in_debugger); 70 71 extern char line_buf[80]; 72 #endif /*ENABLE_DEBUGGER*/ 73 74 int acpi_specific_hotkey_enabled = TRUE; 75 EXPORT_SYMBOL(acpi_specific_hotkey_enabled); 76 77 static unsigned int acpi_irq_irq; 78 static acpi_osd_handler acpi_irq_handler; 79 static void *acpi_irq_context; 80 static struct workqueue_struct *kacpid_wq; 81 82 acpi_status 83 acpi_os_initialize(void) 84 { 85 return AE_OK; 86 } 87 88 acpi_status 89 acpi_os_initialize1(void) 90 { 91 /* 92 * Initialize PCI configuration space access, as we'll need to access 93 * it while walking the namespace (bus 0 and root bridges w/ _BBNs). 94 */ 95 #ifdef CONFIG_ACPI_PCI 96 if (!raw_pci_ops) { 97 printk(KERN_ERR PREFIX "Access to PCI configuration space unavailable\n"); 98 return AE_NULL_ENTRY; 99 } 100 #endif 101 kacpid_wq = create_singlethread_workqueue("kacpid"); 102 BUG_ON(!kacpid_wq); 103 104 return AE_OK; 105 } 106 107 acpi_status 108 acpi_os_terminate(void) 109 { 110 if (acpi_irq_handler) { 111 acpi_os_remove_interrupt_handler(acpi_irq_irq, 112 acpi_irq_handler); 113 } 114 115 destroy_workqueue(kacpid_wq); 116 117 return AE_OK; 118 } 119 120 void 121 acpi_os_printf(const char *fmt,...) 122 { 123 va_list args; 124 va_start(args, fmt); 125 acpi_os_vprintf(fmt, args); 126 va_end(args); 127 } 128 EXPORT_SYMBOL(acpi_os_printf); 129 130 void 131 acpi_os_vprintf(const char *fmt, va_list args) 132 { 133 static char buffer[512]; 134 135 vsprintf(buffer, fmt, args); 136 137 #ifdef ENABLE_DEBUGGER 138 if (acpi_in_debugger) { 139 kdb_printf("%s", buffer); 140 } else { 141 printk("%s", buffer); 142 } 143 #else 144 printk("%s", buffer); 145 #endif 146 } 147 148 extern int acpi_in_resume; 149 void * 150 acpi_os_allocate(acpi_size size) 151 { 152 if (acpi_in_resume) 153 return kmalloc(size, GFP_ATOMIC); 154 else 155 return kmalloc(size, GFP_KERNEL); 156 } 157 158 void 159 acpi_os_free(void *ptr) 160 { 161 kfree(ptr); 162 } 163 EXPORT_SYMBOL(acpi_os_free); 164 165 acpi_status 166 acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) 167 { 168 if (efi_enabled) { 169 addr->pointer_type = ACPI_PHYSICAL_POINTER; 170 if (efi.acpi20) 171 addr->pointer.physical = 172 (acpi_physical_address) virt_to_phys(efi.acpi20); 173 else if (efi.acpi) 174 addr->pointer.physical = 175 (acpi_physical_address) virt_to_phys(efi.acpi); 176 else { 177 printk(KERN_ERR PREFIX "System description tables not found\n"); 178 return AE_NOT_FOUND; 179 } 180 } else { 181 if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) { 182 printk(KERN_ERR PREFIX "System description tables not found\n"); 183 return AE_NOT_FOUND; 184 } 185 } 186 187 return AE_OK; 188 } 189 190 acpi_status 191 acpi_os_map_memory(acpi_physical_address phys, acpi_size size, void __iomem **virt) 192 { 193 if (efi_enabled) { 194 if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { 195 *virt = (void __iomem *) phys_to_virt(phys); 196 } else { 197 *virt = ioremap(phys, size); 198 } 199 } else { 200 if (phys > ULONG_MAX) { 201 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 202 return AE_BAD_PARAMETER; 203 } 204 /* 205 * ioremap checks to ensure this is in reserved space 206 */ 207 *virt = ioremap((unsigned long) phys, size); 208 } 209 210 if (!*virt) 211 return AE_NO_MEMORY; 212 213 return AE_OK; 214 } 215 216 void 217 acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 218 { 219 iounmap(virt); 220 } 221 222 #ifdef ACPI_FUTURE_USAGE 223 acpi_status 224 acpi_os_get_physical_address(void *virt, acpi_physical_address *phys) 225 { 226 if(!phys || !virt) 227 return AE_BAD_PARAMETER; 228 229 *phys = virt_to_phys(virt); 230 231 return AE_OK; 232 } 233 #endif 234 235 #define ACPI_MAX_OVERRIDE_LEN 100 236 237 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 238 239 acpi_status 240 acpi_os_predefined_override (const struct acpi_predefined_names *init_val, 241 acpi_string *new_val) 242 { 243 if (!init_val || !new_val) 244 return AE_BAD_PARAMETER; 245 246 *new_val = NULL; 247 if (!memcmp (init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 248 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 249 acpi_os_name); 250 *new_val = acpi_os_name; 251 } 252 253 return AE_OK; 254 } 255 256 acpi_status 257 acpi_os_table_override (struct acpi_table_header *existing_table, 258 struct acpi_table_header **new_table) 259 { 260 if (!existing_table || !new_table) 261 return AE_BAD_PARAMETER; 262 263 #ifdef CONFIG_ACPI_CUSTOM_DSDT 264 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 265 *new_table = (struct acpi_table_header*)AmlCode; 266 else 267 *new_table = NULL; 268 #else 269 *new_table = NULL; 270 #endif 271 return AE_OK; 272 } 273 274 static irqreturn_t 275 acpi_irq(int irq, void *dev_id, struct pt_regs *regs) 276 { 277 return (*acpi_irq_handler)(acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE; 278 } 279 280 acpi_status 281 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, void *context) 282 { 283 unsigned int irq; 284 285 /* 286 * Ignore the GSI from the core, and use the value in our copy of the 287 * FADT. It may not be the same if an interrupt source override exists 288 * for the SCI. 289 */ 290 gsi = acpi_fadt.sci_int; 291 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 292 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 293 gsi); 294 return AE_OK; 295 } 296 297 acpi_irq_handler = handler; 298 acpi_irq_context = context; 299 if (request_irq(irq, acpi_irq, SA_SHIRQ, "acpi", acpi_irq)) { 300 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 301 return AE_NOT_ACQUIRED; 302 } 303 acpi_irq_irq = irq; 304 305 return AE_OK; 306 } 307 308 acpi_status 309 acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 310 { 311 if (irq) { 312 free_irq(irq, acpi_irq); 313 acpi_irq_handler = NULL; 314 acpi_irq_irq = 0; 315 } 316 317 return AE_OK; 318 } 319 320 /* 321 * Running in interpreter thread context, safe to sleep 322 */ 323 324 void 325 acpi_os_sleep(acpi_integer ms) 326 { 327 current->state = TASK_INTERRUPTIBLE; 328 schedule_timeout(((signed long) ms * HZ) / 1000); 329 } 330 EXPORT_SYMBOL(acpi_os_sleep); 331 332 void 333 acpi_os_stall(u32 us) 334 { 335 while (us) { 336 u32 delay = 1000; 337 338 if (delay > us) 339 delay = us; 340 udelay(delay); 341 touch_nmi_watchdog(); 342 us -= delay; 343 } 344 } 345 EXPORT_SYMBOL(acpi_os_stall); 346 347 /* 348 * Support ACPI 3.0 AML Timer operand 349 * Returns 64-bit free-running, monotonically increasing timer 350 * with 100ns granularity 351 */ 352 u64 353 acpi_os_get_timer (void) 354 { 355 static u64 t; 356 357 #ifdef CONFIG_HPET 358 /* TBD: use HPET if available */ 359 #endif 360 361 #ifdef CONFIG_X86_PM_TIMER 362 /* TBD: default to PM timer if HPET was not available */ 363 #endif 364 if (!t) 365 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); 366 367 return ++t; 368 } 369 370 acpi_status 371 acpi_os_read_port( 372 acpi_io_address port, 373 u32 *value, 374 u32 width) 375 { 376 u32 dummy; 377 378 if (!value) 379 value = &dummy; 380 381 switch (width) 382 { 383 case 8: 384 *(u8*) value = inb(port); 385 break; 386 case 16: 387 *(u16*) value = inw(port); 388 break; 389 case 32: 390 *(u32*) value = inl(port); 391 break; 392 default: 393 BUG(); 394 } 395 396 return AE_OK; 397 } 398 EXPORT_SYMBOL(acpi_os_read_port); 399 400 acpi_status 401 acpi_os_write_port( 402 acpi_io_address port, 403 u32 value, 404 u32 width) 405 { 406 switch (width) 407 { 408 case 8: 409 outb(value, port); 410 break; 411 case 16: 412 outw(value, port); 413 break; 414 case 32: 415 outl(value, port); 416 break; 417 default: 418 BUG(); 419 } 420 421 return AE_OK; 422 } 423 EXPORT_SYMBOL(acpi_os_write_port); 424 425 acpi_status 426 acpi_os_read_memory( 427 acpi_physical_address phys_addr, 428 u32 *value, 429 u32 width) 430 { 431 u32 dummy; 432 void __iomem *virt_addr; 433 int iomem = 0; 434 435 if (efi_enabled) { 436 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { 437 /* HACK ALERT! We can use readb/w/l on real memory too.. */ 438 virt_addr = (void __iomem *) phys_to_virt(phys_addr); 439 } else { 440 iomem = 1; 441 virt_addr = ioremap(phys_addr, width); 442 } 443 } else 444 virt_addr = (void __iomem *) phys_to_virt(phys_addr); 445 if (!value) 446 value = &dummy; 447 448 switch (width) { 449 case 8: 450 *(u8*) value = readb(virt_addr); 451 break; 452 case 16: 453 *(u16*) value = readw(virt_addr); 454 break; 455 case 32: 456 *(u32*) value = readl(virt_addr); 457 break; 458 default: 459 BUG(); 460 } 461 462 if (efi_enabled) { 463 if (iomem) 464 iounmap(virt_addr); 465 } 466 467 return AE_OK; 468 } 469 470 acpi_status 471 acpi_os_write_memory( 472 acpi_physical_address phys_addr, 473 u32 value, 474 u32 width) 475 { 476 void __iomem *virt_addr; 477 int iomem = 0; 478 479 if (efi_enabled) { 480 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { 481 /* HACK ALERT! We can use writeb/w/l on real memory too */ 482 virt_addr = (void __iomem *) phys_to_virt(phys_addr); 483 } else { 484 iomem = 1; 485 virt_addr = ioremap(phys_addr, width); 486 } 487 } else 488 virt_addr = (void __iomem *) phys_to_virt(phys_addr); 489 490 switch (width) { 491 case 8: 492 writeb(value, virt_addr); 493 break; 494 case 16: 495 writew(value, virt_addr); 496 break; 497 case 32: 498 writel(value, virt_addr); 499 break; 500 default: 501 BUG(); 502 } 503 504 if (iomem) 505 iounmap(virt_addr); 506 507 return AE_OK; 508 } 509 510 #ifdef CONFIG_ACPI_PCI 511 512 acpi_status 513 acpi_os_read_pci_configuration (struct acpi_pci_id *pci_id, u32 reg, void *value, u32 width) 514 { 515 int result, size; 516 517 if (!value) 518 return AE_BAD_PARAMETER; 519 520 switch (width) { 521 case 8: 522 size = 1; 523 break; 524 case 16: 525 size = 2; 526 break; 527 case 32: 528 size = 4; 529 break; 530 default: 531 return AE_ERROR; 532 } 533 534 BUG_ON(!raw_pci_ops); 535 536 result = raw_pci_ops->read(pci_id->segment, pci_id->bus, 537 PCI_DEVFN(pci_id->device, pci_id->function), 538 reg, size, value); 539 540 return (result ? AE_ERROR : AE_OK); 541 } 542 EXPORT_SYMBOL(acpi_os_read_pci_configuration); 543 544 acpi_status 545 acpi_os_write_pci_configuration (struct acpi_pci_id *pci_id, u32 reg, acpi_integer value, u32 width) 546 { 547 int result, size; 548 549 switch (width) { 550 case 8: 551 size = 1; 552 break; 553 case 16: 554 size = 2; 555 break; 556 case 32: 557 size = 4; 558 break; 559 default: 560 return AE_ERROR; 561 } 562 563 BUG_ON(!raw_pci_ops); 564 565 result = raw_pci_ops->write(pci_id->segment, pci_id->bus, 566 PCI_DEVFN(pci_id->device, pci_id->function), 567 reg, size, value); 568 569 return (result ? AE_ERROR : AE_OK); 570 } 571 572 /* TODO: Change code to take advantage of driver model more */ 573 static void 574 acpi_os_derive_pci_id_2 ( 575 acpi_handle rhandle, /* upper bound */ 576 acpi_handle chandle, /* current node */ 577 struct acpi_pci_id **id, 578 int *is_bridge, 579 u8 *bus_number) 580 { 581 acpi_handle handle; 582 struct acpi_pci_id *pci_id = *id; 583 acpi_status status; 584 unsigned long temp; 585 acpi_object_type type; 586 u8 tu8; 587 588 acpi_get_parent(chandle, &handle); 589 if (handle != rhandle) { 590 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, bus_number); 591 592 status = acpi_get_type(handle, &type); 593 if ( (ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE) ) 594 return; 595 596 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &temp); 597 if (ACPI_SUCCESS(status)) { 598 pci_id->device = ACPI_HIWORD (ACPI_LODWORD (temp)); 599 pci_id->function = ACPI_LOWORD (ACPI_LODWORD (temp)); 600 601 if (*is_bridge) 602 pci_id->bus = *bus_number; 603 604 /* any nicer way to get bus number of bridge ? */ 605 status = acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 8); 606 if (ACPI_SUCCESS(status) && 607 ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) { 608 status = acpi_os_read_pci_configuration(pci_id, 0x18, &tu8, 8); 609 if (!ACPI_SUCCESS(status)) { 610 /* Certainly broken... FIX ME */ 611 return; 612 } 613 *is_bridge = 1; 614 pci_id->bus = tu8; 615 status = acpi_os_read_pci_configuration(pci_id, 0x19, &tu8, 8); 616 if (ACPI_SUCCESS(status)) { 617 *bus_number = tu8; 618 } 619 } else 620 *is_bridge = 0; 621 } 622 } 623 } 624 625 void 626 acpi_os_derive_pci_id ( 627 acpi_handle rhandle, /* upper bound */ 628 acpi_handle chandle, /* current node */ 629 struct acpi_pci_id **id) 630 { 631 int is_bridge = 1; 632 u8 bus_number = (*id)->bus; 633 634 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 635 } 636 637 #else /*!CONFIG_ACPI_PCI*/ 638 639 acpi_status 640 acpi_os_write_pci_configuration ( 641 struct acpi_pci_id *pci_id, 642 u32 reg, 643 acpi_integer value, 644 u32 width) 645 { 646 return AE_SUPPORT; 647 } 648 649 acpi_status 650 acpi_os_read_pci_configuration ( 651 struct acpi_pci_id *pci_id, 652 u32 reg, 653 void *value, 654 u32 width) 655 { 656 return AE_SUPPORT; 657 } 658 659 void 660 acpi_os_derive_pci_id ( 661 acpi_handle rhandle, /* upper bound */ 662 acpi_handle chandle, /* current node */ 663 struct acpi_pci_id **id) 664 { 665 } 666 667 #endif /*CONFIG_ACPI_PCI*/ 668 669 static void 670 acpi_os_execute_deferred ( 671 void *context) 672 { 673 struct acpi_os_dpc *dpc = NULL; 674 675 ACPI_FUNCTION_TRACE ("os_execute_deferred"); 676 677 dpc = (struct acpi_os_dpc *) context; 678 if (!dpc) { 679 ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); 680 return_VOID; 681 } 682 683 dpc->function(dpc->context); 684 685 kfree(dpc); 686 687 return_VOID; 688 } 689 690 acpi_status 691 acpi_os_queue_for_execution( 692 u32 priority, 693 acpi_osd_exec_callback function, 694 void *context) 695 { 696 acpi_status status = AE_OK; 697 struct acpi_os_dpc *dpc; 698 struct work_struct *task; 699 700 ACPI_FUNCTION_TRACE ("os_queue_for_execution"); 701 702 ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); 703 704 if (!function) 705 return_ACPI_STATUS (AE_BAD_PARAMETER); 706 707 /* 708 * Allocate/initialize DPC structure. Note that this memory will be 709 * freed by the callee. The kernel handles the tq_struct list in a 710 * way that allows us to also free its memory inside the callee. 711 * Because we may want to schedule several tasks with different 712 * parameters we can't use the approach some kernel code uses of 713 * having a static tq_struct. 714 * We can save time and code by allocating the DPC and tq_structs 715 * from the same memory. 716 */ 717 718 dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct work_struct), GFP_ATOMIC); 719 if (!dpc) 720 return_ACPI_STATUS (AE_NO_MEMORY); 721 722 dpc->function = function; 723 dpc->context = context; 724 725 task = (void *)(dpc+1); 726 INIT_WORK(task, acpi_os_execute_deferred, (void*)dpc); 727 728 if (!queue_work(kacpid_wq, task)) { 729 ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to queue_work() failed.\n")); 730 kfree(dpc); 731 status = AE_ERROR; 732 } 733 734 return_ACPI_STATUS (status); 735 } 736 EXPORT_SYMBOL(acpi_os_queue_for_execution); 737 738 void 739 acpi_os_wait_events_complete( 740 void *context) 741 { 742 flush_workqueue(kacpid_wq); 743 } 744 EXPORT_SYMBOL(acpi_os_wait_events_complete); 745 746 /* 747 * Allocate the memory for a spinlock and initialize it. 748 */ 749 acpi_status 750 acpi_os_create_lock ( 751 acpi_handle *out_handle) 752 { 753 spinlock_t *lock_ptr; 754 755 ACPI_FUNCTION_TRACE ("os_create_lock"); 756 757 lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); 758 759 spin_lock_init(lock_ptr); 760 761 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); 762 763 *out_handle = lock_ptr; 764 765 return_ACPI_STATUS (AE_OK); 766 } 767 768 769 /* 770 * Deallocate the memory for a spinlock. 771 */ 772 void 773 acpi_os_delete_lock ( 774 acpi_handle handle) 775 { 776 ACPI_FUNCTION_TRACE ("os_create_lock"); 777 778 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); 779 780 acpi_os_free(handle); 781 782 return_VOID; 783 } 784 785 /* 786 * Acquire a spinlock. 787 * 788 * handle is a pointer to the spinlock_t. 789 * flags is *not* the result of save_flags - it is an ACPI-specific flag variable 790 * that indicates whether we are at interrupt level. 791 */ 792 void 793 acpi_os_acquire_lock ( 794 acpi_handle handle, 795 u32 flags) 796 { 797 ACPI_FUNCTION_TRACE ("os_acquire_lock"); 798 799 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquiring spinlock[%p] from %s level\n", handle, 800 ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt"))); 801 802 if (flags & ACPI_NOT_ISR) 803 ACPI_DISABLE_IRQS(); 804 805 spin_lock((spinlock_t *)handle); 806 807 return_VOID; 808 } 809 810 811 /* 812 * Release a spinlock. See above. 813 */ 814 void 815 acpi_os_release_lock ( 816 acpi_handle handle, 817 u32 flags) 818 { 819 ACPI_FUNCTION_TRACE ("os_release_lock"); 820 821 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle, 822 ((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt"))); 823 824 spin_unlock((spinlock_t *)handle); 825 826 if (flags & ACPI_NOT_ISR) 827 ACPI_ENABLE_IRQS(); 828 829 return_VOID; 830 } 831 832 833 acpi_status 834 acpi_os_create_semaphore( 835 u32 max_units, 836 u32 initial_units, 837 acpi_handle *handle) 838 { 839 struct semaphore *sem = NULL; 840 841 ACPI_FUNCTION_TRACE ("os_create_semaphore"); 842 843 sem = acpi_os_allocate(sizeof(struct semaphore)); 844 if (!sem) 845 return_ACPI_STATUS (AE_NO_MEMORY); 846 memset(sem, 0, sizeof(struct semaphore)); 847 848 sema_init(sem, initial_units); 849 850 *handle = (acpi_handle*)sem; 851 852 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", *handle, initial_units)); 853 854 return_ACPI_STATUS (AE_OK); 855 } 856 EXPORT_SYMBOL(acpi_os_create_semaphore); 857 858 859 /* 860 * TODO: A better way to delete semaphores? Linux doesn't have a 861 * 'delete_semaphore()' function -- may result in an invalid 862 * pointer dereference for non-synchronized consumers. Should 863 * we at least check for blocked threads and signal/cancel them? 864 */ 865 866 acpi_status 867 acpi_os_delete_semaphore( 868 acpi_handle handle) 869 { 870 struct semaphore *sem = (struct semaphore*) handle; 871 872 ACPI_FUNCTION_TRACE ("os_delete_semaphore"); 873 874 if (!sem) 875 return_ACPI_STATUS (AE_BAD_PARAMETER); 876 877 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 878 879 acpi_os_free(sem); sem = NULL; 880 881 return_ACPI_STATUS (AE_OK); 882 } 883 EXPORT_SYMBOL(acpi_os_delete_semaphore); 884 885 886 /* 887 * TODO: The kernel doesn't have a 'down_timeout' function -- had to 888 * improvise. The process is to sleep for one scheduler quantum 889 * until the semaphore becomes available. Downside is that this 890 * may result in starvation for timeout-based waits when there's 891 * lots of semaphore activity. 892 * 893 * TODO: Support for units > 1? 894 */ 895 acpi_status 896 acpi_os_wait_semaphore( 897 acpi_handle handle, 898 u32 units, 899 u16 timeout) 900 { 901 acpi_status status = AE_OK; 902 struct semaphore *sem = (struct semaphore*)handle; 903 int ret = 0; 904 905 ACPI_FUNCTION_TRACE ("os_wait_semaphore"); 906 907 if (!sem || (units < 1)) 908 return_ACPI_STATUS (AE_BAD_PARAMETER); 909 910 if (units > 1) 911 return_ACPI_STATUS (AE_SUPPORT); 912 913 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout)); 914 915 if (in_atomic()) 916 timeout = 0; 917 918 switch (timeout) 919 { 920 /* 921 * No Wait: 922 * -------- 923 * A zero timeout value indicates that we shouldn't wait - just 924 * acquire the semaphore if available otherwise return AE_TIME 925 * (a.k.a. 'would block'). 926 */ 927 case 0: 928 if(down_trylock(sem)) 929 status = AE_TIME; 930 break; 931 932 /* 933 * Wait Indefinitely: 934 * ------------------ 935 */ 936 case ACPI_WAIT_FOREVER: 937 down(sem); 938 break; 939 940 /* 941 * Wait w/ Timeout: 942 * ---------------- 943 */ 944 default: 945 // TODO: A better timeout algorithm? 946 { 947 int i = 0; 948 static const int quantum_ms = 1000/HZ; 949 950 ret = down_trylock(sem); 951 for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) { 952 current->state = TASK_INTERRUPTIBLE; 953 schedule_timeout(1); 954 ret = down_trylock(sem); 955 } 956 957 if (ret != 0) 958 status = AE_TIME; 959 } 960 break; 961 } 962 963 if (ACPI_FAILURE(status)) { 964 ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Failed to acquire semaphore[%p|%d|%d], %s\n", 965 handle, units, timeout, acpi_format_exception(status))); 966 } 967 else { 968 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquired semaphore[%p|%d|%d]\n", handle, units, timeout)); 969 } 970 971 return_ACPI_STATUS (status); 972 } 973 EXPORT_SYMBOL(acpi_os_wait_semaphore); 974 975 976 /* 977 * TODO: Support for units > 1? 978 */ 979 acpi_status 980 acpi_os_signal_semaphore( 981 acpi_handle handle, 982 u32 units) 983 { 984 struct semaphore *sem = (struct semaphore *) handle; 985 986 ACPI_FUNCTION_TRACE ("os_signal_semaphore"); 987 988 if (!sem || (units < 1)) 989 return_ACPI_STATUS (AE_BAD_PARAMETER); 990 991 if (units > 1) 992 return_ACPI_STATUS (AE_SUPPORT); 993 994 ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units)); 995 996 up(sem); 997 998 return_ACPI_STATUS (AE_OK); 999 } 1000 EXPORT_SYMBOL(acpi_os_signal_semaphore); 1001 1002 #ifdef ACPI_FUTURE_USAGE 1003 u32 1004 acpi_os_get_line(char *buffer) 1005 { 1006 1007 #ifdef ENABLE_DEBUGGER 1008 if (acpi_in_debugger) { 1009 u32 chars; 1010 1011 kdb_read(buffer, sizeof(line_buf)); 1012 1013 /* remove the CR kdb includes */ 1014 chars = strlen(buffer) - 1; 1015 buffer[chars] = '\0'; 1016 } 1017 #endif 1018 1019 return 0; 1020 } 1021 #endif /* ACPI_FUTURE_USAGE */ 1022 1023 /* Assumes no unreadable holes inbetween */ 1024 u8 1025 acpi_os_readable(void *ptr, acpi_size len) 1026 { 1027 #if defined(__i386__) || defined(__x86_64__) 1028 char tmp; 1029 return !__get_user(tmp, (char __user *)ptr) && !__get_user(tmp, (char __user *)ptr + len - 1); 1030 #endif 1031 return 1; 1032 } 1033 1034 #ifdef ACPI_FUTURE_USAGE 1035 u8 1036 acpi_os_writable(void *ptr, acpi_size len) 1037 { 1038 /* could do dummy write (racy) or a kernel page table lookup. 1039 The later may be difficult at early boot when kmap doesn't work yet. */ 1040 return 1; 1041 } 1042 #endif 1043 1044 u32 1045 acpi_os_get_thread_id (void) 1046 { 1047 if (!in_atomic()) 1048 return current->pid; 1049 1050 return 0; 1051 } 1052 1053 acpi_status 1054 acpi_os_signal ( 1055 u32 function, 1056 void *info) 1057 { 1058 switch (function) 1059 { 1060 case ACPI_SIGNAL_FATAL: 1061 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1062 break; 1063 case ACPI_SIGNAL_BREAKPOINT: 1064 /* 1065 * AML Breakpoint 1066 * ACPI spec. says to treat it as a NOP unless 1067 * you are debugging. So if/when we integrate 1068 * AML debugger into the kernel debugger its 1069 * hook will go here. But until then it is 1070 * not useful to print anything on breakpoints. 1071 */ 1072 break; 1073 default: 1074 break; 1075 } 1076 1077 return AE_OK; 1078 } 1079 EXPORT_SYMBOL(acpi_os_signal); 1080 1081 static int __init 1082 acpi_os_name_setup(char *str) 1083 { 1084 char *p = acpi_os_name; 1085 int count = ACPI_MAX_OVERRIDE_LEN-1; 1086 1087 if (!str || !*str) 1088 return 0; 1089 1090 for (; count-- && str && *str; str++) { 1091 if (isalnum(*str) || *str == ' ' || *str == ':') 1092 *p++ = *str; 1093 else if (*str == '\'' || *str == '"') 1094 continue; 1095 else 1096 break; 1097 } 1098 *p = 0; 1099 1100 return 1; 1101 1102 } 1103 1104 __setup("acpi_os_name=", acpi_os_name_setup); 1105 1106 /* 1107 * _OSI control 1108 * empty string disables _OSI 1109 * TBD additional string adds to _OSI 1110 */ 1111 static int __init 1112 acpi_osi_setup(char *str) 1113 { 1114 if (str == NULL || *str == '\0') { 1115 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1116 acpi_gbl_create_osi_method = FALSE; 1117 } else 1118 { 1119 /* TBD */ 1120 printk(KERN_ERR PREFIX "_OSI additional string ignored -- %s\n", str); 1121 } 1122 1123 return 1; 1124 } 1125 1126 __setup("acpi_osi=", acpi_osi_setup); 1127 1128 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 1129 static int __init 1130 acpi_serialize_setup(char *str) 1131 { 1132 printk(KERN_INFO PREFIX "serialize enabled\n"); 1133 1134 acpi_gbl_all_methods_serialized = TRUE; 1135 1136 return 1; 1137 } 1138 1139 __setup("acpi_serialize", acpi_serialize_setup); 1140 1141 /* 1142 * Wake and Run-Time GPES are expected to be separate. 1143 * We disable wake-GPEs at run-time to prevent spurious 1144 * interrupts. 1145 * 1146 * However, if a system exists that shares Wake and 1147 * Run-time events on the same GPE this flag is available 1148 * to tell Linux to keep the wake-time GPEs enabled at run-time. 1149 */ 1150 static int __init 1151 acpi_wake_gpes_always_on_setup(char *str) 1152 { 1153 printk(KERN_INFO PREFIX "wake GPEs not disabled\n"); 1154 1155 acpi_gbl_leave_wake_gpes_disabled = FALSE; 1156 1157 return 1; 1158 } 1159 1160 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); 1161 1162 int __init 1163 acpi_hotkey_setup(char *str) 1164 { 1165 acpi_specific_hotkey_enabled = FALSE; 1166 return 1; 1167 } 1168 1169 __setup("acpi_generic_hotkey", acpi_hotkey_setup); 1170 1171 /* 1172 * max_cstate is defined in the base kernel so modules can 1173 * change it w/o depending on the state of the processor module. 1174 */ 1175 unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER; 1176 1177 1178 EXPORT_SYMBOL(max_cstate); 1179